input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
"""Support for an Intergas heater via an InComfort/InTouch Lan2RF gateway.""" from typing import Any, Dict, Optional from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.const import ( DEVICE_CLASS_PRESSURE, DEVICE_CLASS_TEMPERATURE, PRESSURE_BAR, TEMP_CELSIUS, ) from homeassistant.util import slugify from . import DOMAIN, IncomfortChild INCOMFORT_HEATER_TEMP = "CV Temp" INCOMFORT_PRESSURE = "CV Pressure" INCOMFORT_TAP_TEMP = "Tap Temp" INCOMFORT_MAP_ATTRS = { INCOMFORT_HEATER_TEMP: ["heater_temp", "is_pumping"], INCOMFORT_PRESSURE: ["pressure", None], INCOMFORT_TAP_TEMP: ["tap_temp", "is_tapping"], } async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up an InComfort/InTouch sensor device.""" if discovery_info is None: return client = hass.data[DOMAIN]["client"] heaters = hass.data[DOMAIN]["heaters"] async_add_entities( [IncomfortPressure(client, h, INCOMFORT_PRESSURE) for h in heaters] + [IncomfortTemperature(client, h, INCOMFORT_HEATER_TEMP) for h in heaters] + [IncomfortTemperature(client, h, INCOMFORT_TAP_TEMP) for h in heaters] ) class IncomfortSensor(IncomfortChild): """Representation of an InComfort/InTouch sensor device.""" def __init__(self, client, heater, name) -> None: """Initialize the sensor.""" super().__init__() self._client = client self._heater = heater self._unique_id = f"{heater.serial_no}_{slugify(name)}" self.entity_id = f"{SENSOR_DOMAIN}.{DOMAIN}_{slugify(name)}" self._name = f"Boiler {name}" self._device_class = None self._state_attr = INCOMFORT_MAP_ATTRS[name][0] self._unit_of_measurement = None @property def state(self) -> Optional[str]: """Return the state of the sensor.""" return self._heater.status[self._state_attr] @property def device_class(self) -> Optional[str]: """Return the device class of the sensor.""" return self._device_class @property def unit_of_measurement(self) -> Optional[str]: """Return the unit of measurement of the sensor.""" return self._unit_of_measurement class IncomfortPressure(IncomfortSensor): """Representation of an InTouch CV Pressure sensor.""" def __init__(self, client, heater, name) -> None: """Initialize the sensor.""" super().__init__(client, heater, name) self._device_class = DEVICE_CLASS_PRESSURE self._unit_of_measurement = PRESSURE_BAR class IncomfortTemperature(IncomfortSensor): """Representation of an InTouch Temperature sensor.""" def __init__(self, client, heater, name) -> None: """Initialize the signal strength sensor.""" super().__init__(client, heater, name) self._attr = INCOMFORT_MAP_ATTRS[name][1] self._device_class = DEVICE_CLASS_TEMPERATURE self._unit_of_measurement = TEMP_CELSIUS @property def device_state_attributes(self) -> Optional[Dict[str, Any]]: """Return the device state attributes.""" return {self._attr: self._heater.status[self._attr]}
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/incomfort/sensor.py
"""Support for Satel Integra devices.""" import collections import logging from satel_integra.satel_integra import AsyncSatel import voluptuous as vol from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send DEFAULT_ALARM_NAME = "satel_integra" DEFAULT_PORT = 7094 DEFAULT_CONF_ARM_HOME_MODE = 1 DEFAULT_DEVICE_PARTITION = 1 DEFAULT_ZONE_TYPE = "motion" _LOGGER = logging.getLogger(__name__) DOMAIN = "satel_integra" DATA_SATEL = "satel_integra" CONF_DEVICE_CODE = "code" CONF_DEVICE_PARTITIONS = "partitions" CONF_ARM_HOME_MODE = "arm_home_mode" CONF_ZONE_NAME = "name" CONF_ZONE_TYPE = "type" CONF_ZONES = "zones" CONF_OUTPUTS = "outputs" CONF_SWITCHABLE_OUTPUTS = "switchable_outputs" ZONES = "zones" SIGNAL_PANEL_MESSAGE = "satel_integra.panel_message" SIGNAL_PANEL_ARM_AWAY = "satel_integra.panel_arm_away" SIGNAL_PANEL_ARM_HOME = "satel_integra.panel_arm_home" SIGNAL_PANEL_DISARM = "satel_integra.panel_disarm" SIGNAL_ZONES_UPDATED = "satel_integra.zones_updated" SIGNAL_OUTPUTS_UPDATED = "satel_integra.outputs_updated" ZONE_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_NAME): cv.string, vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string, } ) EDITABLE_OUTPUT_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_NAME): cv.string}) PARTITION_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_NAME): cv.string, vol.Optional(CONF_ARM_HOME_MODE, default=DEFAULT_CONF_ARM_HOME_MODE): vol.In( [1, 2, 3] ), } ) def is_alarm_code_necessary(value): """Check if alarm code must be configured.""" if value.get(CONF_SWITCHABLE_OUTPUTS) and CONF_DEVICE_CODE not in value: raise vol.Invalid("You need to specify alarm code to use switchable_outputs") return value CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_DEVICE_CODE): cv.string, vol.Optional(CONF_DEVICE_PARTITIONS, default={}): { vol.Coerce(int): PARTITION_SCHEMA }, vol.Optional(CONF_ZONES, default={}): {vol.Coerce(int): ZONE_SCHEMA}, vol.Optional(CONF_OUTPUTS, default={}): {vol.Coerce(int): ZONE_SCHEMA}, vol.Optional(CONF_SWITCHABLE_OUTPUTS, default={}): { vol.Coerce(int): EDITABLE_OUTPUT_SCHEMA }, }, is_alarm_code_necessary, ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Satel Integra component.""" conf = config.get(DOMAIN) zones = conf.get(CONF_ZONES) outputs = conf.get(CONF_OUTPUTS) switchable_outputs = conf.get(CONF_SWITCHABLE_OUTPUTS) host = conf.get(CONF_HOST) port = conf.get(CONF_PORT) partitions = conf.get(CONF_DEVICE_PARTITIONS) monitored_outputs = collections.OrderedDict( list(outputs.items()) + list(switchable_outputs.items()) ) controller = AsyncSatel(host, port, hass.loop, zones, monitored_outputs, partitions) hass.data[DATA_SATEL] = controller result = await controller.connect() if not result: return False async def _close(): controller.close() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close()) _LOGGER.debug("Arm home config: %s, mode: %s ", conf, conf.get(CONF_ARM_HOME_MODE)) hass.async_create_task( async_load_platform(hass, "alarm_control_panel", DOMAIN, conf, config) ) hass.async_create_task( async_load_platform( hass, "binary_sensor", DOMAIN, {CONF_ZONES: zones, CONF_OUTPUTS: outputs}, config, ) ) hass.async_create_task( async_load_platform( hass, "switch", DOMAIN, { CONF_SWITCHABLE_OUTPUTS: switchable_outputs, CONF_DEVICE_CODE: conf.get(CONF_DEVICE_CODE), }, config, ) ) @callback def alarm_status_update_callback(): """Send status update received from alarm to Home Assistant.""" _LOGGER.debug("Sending request to update panel state") async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE) @callback def zones_update_callback(status): """Update zone objects as per notification from the alarm.""" _LOGGER.debug("Zones callback, status: %s", status) async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES]) @callback def outputs_update_callback(status): """Update zone objects as per notification from the alarm.""" _LOGGER.debug("Outputs updated callback , status: %s", status) async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"]) # Create a task instead of adding a tracking job, since this task will # run until the connection to satel_integra is closed. hass.loop.create_task(controller.keep_alive()) hass.loop.create_task( controller.monitor_status( alarm_status_update_callback, zones_update_callback, outputs_update_callback ) ) return True
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/satel_integra/__init__.py
"""Support for controlling projector via the PJLink protocol.""" import logging from pypjlink import MUTE_AUDIO, Projector from pypjlink.projector import ProjectorError import voluptuous as vol from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity from homeassistant.components.media_player.const import ( SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, ) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_ENCODING = "encoding" DEFAULT_PORT = 4352 DEFAULT_ENCODING = "utf-8" DEFAULT_TIMEOUT = 10 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, } ) SUPPORT_PJLINK = ( SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the PJLink platform.""" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if "pjlink" not in hass.data: hass.data["pjlink"] = {} hass_data = hass.data["pjlink"] device_label = f"{host}:{port}" if device_label in hass_data: return device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number): """Format input source for display in UI.""" return f"{input_source_name} {input_source_number}" class PjLinkDevice(MediaPlayerEntity): """Representation of a PJLink device.""" def __init__(self, host, port, name, encoding, password): """Iinitialize the PJLink device.""" self._host = host self._port = port self._name = name self._password = password self._encoding = encoding self._muted = False self._pwstate = STATE_OFF self._current_source = None with self.projector() as projector: if not self._name: self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = {format_input_source(*x): x for x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): """Create PJLink Projector instance.""" projector = Projector.from_address( self._host, self._port, self._encoding, DEFAULT_TIMEOUT ) projector.authenticate(self._password) return projector def update(self): """Get the latest state from the device.""" with self.projector() as projector: try: pwstate = projector.get_power() if pwstate in ("on", "warm-up"): self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source = format_input_source(*projector.get_input()) else: self._pwstate = STATE_OFF self._muted = False self._current_source = None except KeyError as err: if str(err) == "'OK'": self._pwstate = STATE_OFF self._muted = False self._current_source = None else: raise except ProjectorError as err: if str(err) == "unavailable time": self._pwstate = STATE_OFF self._muted = False self._current_source = None else: raise @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._pwstate @property def is_volume_muted(self): """Return boolean indicating mute status.""" return self._muted @property def source(self): """Return current input source.""" return self._current_source @property def source_list(self): """Return all available input sources.""" return self._source_list @property def supported_features(self): """Return projector supported features.""" return SUPPORT_PJLINK def turn_off(self): """Turn projector off.""" if self._pwstate == STATE_ON: with self.projector() as projector: projector.set_power("off") def turn_on(self): """Turn projector on.""" if self._pwstate == STATE_OFF: with self.projector() as projector: projector.set_power("on") def mute_volume(self, mute): """Mute (true) of unmute (false) media player.""" with self.projector() as projector: projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): """Set the input source.""" source = self._source_name_mapping[source] with self.projector() as projector: projector.set_input(*source)
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/pjlink/media_player.py
"""Support for ADS covers.""" import logging import voluptuous as vol from homeassistant.components.cover import ( ATTR_POSITION, DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA, SUPPORT_CLOSE, SUPPORT_OPEN, SUPPORT_SET_POSITION, SUPPORT_STOP, CoverEntity, ) from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME import homeassistant.helpers.config_validation as cv from . import ( CONF_ADS_VAR, CONF_ADS_VAR_POSITION, DATA_ADS, STATE_KEY_POSITION, STATE_KEY_STATE, AdsEntity, ) _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "ADS Cover" CONF_ADS_VAR_SET_POS = "adsvar_set_position" CONF_ADS_VAR_OPEN = "adsvar_open" CONF_ADS_VAR_CLOSE = "adsvar_close" CONF_ADS_VAR_STOP = "adsvar_stop" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_ADS_VAR): cv.string, vol.Optional(CONF_ADS_VAR_POSITION): cv.string, vol.Optional(CONF_ADS_VAR_SET_POS): cv.string, vol.Optional(CONF_ADS_VAR_CLOSE): cv.string, vol.Optional(CONF_ADS_VAR_OPEN): cv.string, vol.Optional(CONF_ADS_VAR_STOP): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the cover platform for ADS.""" ads_hub = hass.data[DATA_ADS] ads_var_is_closed = config.get(CONF_ADS_VAR) ads_var_position = config.get(CONF_ADS_VAR_POSITION) ads_var_pos_set = config.get(CONF_ADS_VAR_SET_POS) ads_var_open = config.get(CONF_ADS_VAR_OPEN) ads_var_close = config.get(CONF_ADS_VAR_CLOSE) ads_var_stop = config.get(CONF_ADS_VAR_STOP) name = config[CONF_NAME] device_class = config.get(CONF_DEVICE_CLASS) add_entities( [ AdsCover( ads_hub, ads_var_is_closed, ads_var_position, ads_var_pos_set, ads_var_open, ads_var_close, ads_var_stop, name, device_class, ) ] ) class AdsCover(AdsEntity, CoverEntity): """Representation of ADS cover.""" def __init__( self, ads_hub, ads_var_is_closed, ads_var_position, ads_var_pos_set, ads_var_open, ads_var_close, ads_var_stop, name, device_class, ): """Initialize AdsCover entity.""" super().__init__(ads_hub, name, ads_var_is_closed) if self._ads_var is None: if ads_var_position is not None: self._unique_id = ads_var_position elif ads_var_pos_set is not None: self._unique_id = ads_var_pos_set elif ads_var_open is not None: self._unique_id = ads_var_open self._state_dict[STATE_KEY_POSITION] = None self._ads_var_position = ads_var_position self._ads_var_pos_set = ads_var_pos_set self._ads_var_open = ads_var_open self._ads_var_close = ads_var_close self._ads_var_stop = ads_var_stop self._device_class = device_class async def async_added_to_hass(self): """Register device notification.""" if self._ads_var is not None: await self.async_initialize_device( self._ads_var, self._ads_hub.PLCTYPE_BOOL ) if self._ads_var_position is not None: await self.async_initialize_device( self._ads_var_position, self._ads_hub.PLCTYPE_BYTE, STATE_KEY_POSITION ) @property def device_class(self): """Return the class of this cover.""" return self._device_class @property def is_closed(self): """Return if the cover is closed.""" if self._ads_var is not None: return self._state_dict[STATE_KEY_STATE] if self._ads_var_position is not None: return self._state_dict[STATE_KEY_POSITION] == 0 return None @property def current_cover_position(self): """Return current position of cover.""" return self._state_dict[STATE_KEY_POSITION] @property def supported_features(self): """Flag supported features.""" supported_features = SUPPORT_OPEN | SUPPORT_CLOSE if self._ads_var_stop is not None: supported_features |= SUPPORT_STOP if self._ads_var_pos_set is not None: supported_features |= SUPPORT_SET_POSITION return supported_features def stop_cover(self, **kwargs): """Fire the stop action.""" if self._ads_var_stop: self._ads_hub.write_by_name( self._ads_var_stop, True, self._ads_hub.PLCTYPE_BOOL ) def set_cover_position(self, **kwargs): """Set cover position.""" position = kwargs[ATTR_POSITION] if self._ads_var_pos_set is not None: self._ads_hub.write_by_name( self._ads_var_pos_set, position, self._ads_hub.PLCTYPE_BYTE ) def open_cover(self, **kwargs): """Move the cover up.""" if self._ads_var_open is not None: self._ads_hub.write_by_name( self._ads_var_open, True, self._ads_hub.PLCTYPE_BOOL ) elif self._ads_var_pos_set is not None: self.set_cover_position(position=100) def close_cover(self, **kwargs): """Move the cover down.""" if self._ads_var_close is not None: self._ads_hub.write_by_name( self._ads_var_close, True, self._ads_hub.PLCTYPE_BOOL ) elif self._ads_var_pos_set is not None: self.set_cover_position(position=0) @property def available(self): """Return False if state has not been updated yet.""" if self._ads_var is not None or self._ads_var_position is not None: return ( self._state_dict[STATE_KEY_STATE] is not None or self._state_dict[STATE_KEY_POSITION] is not None ) return True
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/ads/cover.py
"""Support for ESPHome switches.""" import logging from typing import Optional from aioesphomeapi import SwitchInfo, SwitchState from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import EsphomeEntity, esphome_state_property, platform_async_setup_entry _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up ESPHome switches based on a config entry.""" await platform_async_setup_entry( hass, entry, async_add_entities, component_key="switch", info_type=SwitchInfo, entity_type=EsphomeSwitch, state_type=SwitchState, ) class EsphomeSwitch(EsphomeEntity, SwitchEntity): """A switch implementation for ESPHome.""" @property def _static_info(self) -> SwitchInfo: return super()._static_info @property def _state(self) -> Optional[SwitchState]: return super()._state @property def icon(self) -> str: """Return the icon.""" return self._static_info.icon @property def assumed_state(self) -> bool: """Return true if we do optimistic updates.""" return self._static_info.assumed_state # https://github.com/PyCQA/pylint/issues/3150 for @esphome_state_property # pylint: disable=invalid-overridden-method @esphome_state_property def is_on(self) -> Optional[bool]: """Return true if the switch is on.""" return self._state.state async def async_turn_on(self, **kwargs) -> None: """Turn the entity on.""" await self._client.switch_command(self._static_info.key, True) async def async_turn_off(self, **kwargs) -> None: """Turn the entity off.""" await self._client.switch_command(self._static_info.key, False)
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/esphome/switch.py
"""Support for IOTA wallets.""" from datetime import timedelta import logging from iota import Iota import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import load_platform from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) CONF_IRI = "iri" CONF_TESTNET = "testnet" CONF_WALLET_NAME = "name" CONF_WALLET_SEED = "seed" CONF_WALLETS = "wallets" DOMAIN = "iota" IOTA_PLATFORMS = ["sensor"] SCAN_INTERVAL = timedelta(minutes=10) WALLET_CONFIG = vol.Schema( { vol.Required(CONF_WALLET_NAME): cv.string, vol.Required(CONF_WALLET_SEED): cv.string, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_IRI): cv.string, vol.Optional(CONF_TESTNET, default=False): cv.boolean, vol.Required(CONF_WALLETS): vol.All(cv.ensure_list, [WALLET_CONFIG]), } ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Set up the IOTA component.""" iota_config = config[DOMAIN] for platform in IOTA_PLATFORMS: load_platform(hass, platform, DOMAIN, iota_config, config) return True class IotaDevice(Entity): """Representation of a IOTA device.""" def __init__(self, name, seed, iri, is_testnet=False): """Initialise the IOTA device.""" self._name = name self._seed = seed self.iri = iri self.is_testnet = is_testnet @property def name(self): """Return the default name of the device.""" return self._name @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {CONF_WALLET_NAME: self._name} return attr @property def api(self): """Construct API object for interaction with the IRI node.""" return Iota(adapter=self.iri, seed=self._seed)
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/iota/__init__.py
"""Provides device automations for Fan.""" from typing import List import voluptuous as vol from homeassistant.components.automation import AutomationActionType from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA from homeassistant.components.homeassistant.triggers import state as state_trigger from homeassistant.const import ( CONF_DEVICE_ID, CONF_DOMAIN, CONF_ENTITY_ID, CONF_PLATFORM, CONF_TYPE, STATE_OFF, STATE_ON, ) from homeassistant.core import CALLBACK_TYPE, HomeAssistant from homeassistant.helpers import config_validation as cv, entity_registry from homeassistant.helpers.typing import ConfigType from . import DOMAIN TRIGGER_TYPES = {"turned_on", "turned_off"} TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES), } ) async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]: """List device triggers for Fan devices.""" registry = await entity_registry.async_get_registry(hass) triggers = [] # Get all the integrations entities for this device for entry in entity_registry.async_entries_for_device(registry, device_id): if entry.domain != DOMAIN: continue # Add triggers for each entity that belongs to this integration triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "turned_on", } ) triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "turned_off", } ) return triggers async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Attach a trigger.""" config = TRIGGER_SCHEMA(config) if config[CONF_TYPE] == "turned_on": from_state = STATE_OFF to_state = STATE_ON else: from_state = STATE_ON to_state = STATE_OFF state_config = { state_trigger.CONF_PLATFORM: "state", CONF_ENTITY_ID: config[CONF_ENTITY_ID], state_trigger.CONF_FROM: from_state, state_trigger.CONF_TO: to_state, } state_config = state_trigger.TRIGGER_SCHEMA(state_config) return await state_trigger.async_attach_trigger( hass, state_config, action, automation_info, platform_type="device" )
"""The tests for the Alexa component.""" # pylint: disable=protected-access import json import pytest from homeassistant.components import alexa from homeassistant.components.alexa import intent from homeassistant.core import callback from homeassistant.setup import async_setup_component SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000" APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe" REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000" AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC" BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST" # pylint: disable=invalid-name calls = [] NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3" @pytest.fixture def alexa_client(loop, hass, hass_client): """Initialize a Home Assistant server for testing this module.""" @callback def mock_service(call): calls.append(call) hass.services.async_register("test", "alexa", mock_service) assert loop.run_until_complete( async_setup_component( hass, alexa.DOMAIN, { # Key is here to verify we allow other keys in config too "homeassistant": {}, "alexa": {}, }, ) ) assert loop.run_until_complete( async_setup_component( hass, "intent_script", { "intent_script": { "WhereAreWeIntent": { "speech": { "type": "plain", "text": """ {%- if is_state("device_tracker.paulus", "home") and is_state("device_tracker.anne_therese", "home") -%} You are both home, you silly {%- else -%} Anne Therese is at {{ states("device_tracker.anne_therese") }} and Paulus is at {{ states("device_tracker.paulus") }} {% endif %} """, } }, "GetZodiacHoroscopeIntent": { "speech": { "type": "plain", "text": "You told us your sign is {{ ZodiacSign }}.", } }, "AMAZON.PlaybackAction<object@MusicCreativeWork>": { "speech": { "type": "plain", "text": "Playing {{ object_byArtist_name }}.", } }, "CallServiceIntent": { "speech": { "type": "plain", "text": "Service called for {{ ZodiacSign }}", }, "card": { "type": "simple", "title": "Card title for {{ ZodiacSign }}", "content": "Card content: {{ ZodiacSign }}", }, "action": { "service": "test.alexa", "data_template": {"hello": "{{ ZodiacSign }}"}, "entity_id": "switch.test", }, }, APPLICATION_ID: { "speech": { "type": "plain", "text": "LaunchRequest has been received.", } }, } }, ) ) return loop.run_until_complete(hass_client()) def _intent_req(client, data=None): return client.post( intent.INTENTS_API_ENDPOINT, data=json.dumps(data or {}), headers={"content-type": "application/json"}, ) async def test_intent_launch_request(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "LaunchRequest has been received." async def test_intent_launch_request_not_configured(alexa_client): """Test the launch of a request.""" data = { "version": "1.0", "session": { "new": True, "sessionId": SESSION_ID, "application": { "applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000" }, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "LaunchRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "This intent is not yet configured within Home Assistant." async def test_intent_request_with_slots(alexa_client): """Test a request with slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is virgo." async def test_intent_request_with_slots_and_synonym_resolution(alexa_client): """Test a request with slots and a name synonym.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_NO_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is Virgo." async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client): """Test a request with slots and multiple name synonyms.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": { "ZodiacSign": { "name": "ZodiacSign", "value": "V zodiac", "resolutions": { "resolutionsPerAuthority": [ { "authority": AUTHORITY_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Virgo"}}], }, { "authority": BUILTIN_AUTH_ID, "status": {"code": "ER_SUCCESS_MATCH"}, "values": [{"value": {"name": "Test"}}], }, ] }, } }, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is V zodiac." async def test_intent_request_with_slots_but_no_value(alexa_client): """Test a request with slots but no value.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "GetZodiacHoroscopeIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign"}}, }, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You told us your sign is ." async def test_intent_request_without_slots(hass, alexa_client): """Test a request without slots.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": {"name": "WhereAreWeIntent"}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Anne Therese is at unknown and Paulus is at unknown" hass.states.async_set("device_tracker.paulus", "home") hass.states.async_set("device_tracker.anne_therese", "home") req = await _intent_req(alexa_client, data) assert req.status == 200 json = await req.json() text = json.get("response", {}).get("outputSpeech", {}).get("text") assert text == "You are both home, you silly" async def test_intent_request_calling_service(alexa_client): """Test a request for calling a service.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": {}, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "IntentRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "intent": { "name": "CallServiceIntent", "slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}}, }, }, } call_count = len(calls) req = await _intent_req(alexa_client, data) assert req.status == 200 assert call_count + 1 == len(calls) call = calls[-1] assert call.domain == "test" assert call.service == "alexa" assert call.data.get("entity_id") == ["switch.test"] assert call.data.get("hello") == "virgo" data = await req.json() assert data["response"]["card"]["title"] == "Card title for virgo" assert data["response"]["card"]["content"] == "Card content: virgo" assert data["response"]["outputSpeech"]["type"] == "PlainText" assert data["response"]["outputSpeech"]["text"] == "Service called for virgo" async def test_intent_session_ended_request(alexa_client): """Test the request for ending the session.""" data = { "version": "1.0", "session": { "new": False, "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, "attributes": { "supportedHoroscopePeriods": { "daily": True, "weekly": False, "monthly": False, } }, "user": {"userId": "amzn1.account.AM3B00000000000000000000000"}, }, "request": { "type": "SessionEndedRequest", "requestId": REQUEST_ID, "timestamp": "2015-05-13T12:34:56Z", "reason": "USER_INITIATED", }, } req = await _intent_req(alexa_client, data) assert req.status == 200 text = await req.text() assert text == "" async def test_intent_from_built_in_intent_library(alexa_client): """Test intents from the Built-in Intent Library.""" data = { "request": { "intent": { "name": "AMAZON.PlaybackAction<object@MusicCreativeWork>", "slots": { "object.byArtist.name": { "name": "object.byArtist.name", "value": "the shins", }, "object.composer.name": {"name": "object.composer.name"}, "object.contentSource": {"name": "object.contentSource"}, "object.era": {"name": "object.era"}, "object.genre": {"name": "object.genre"}, "object.name": {"name": "object.name"}, "object.owner.name": {"name": "object.owner.name"}, "object.select": {"name": "object.select"}, "object.sort": {"name": "object.sort"}, "object.type": {"name": "object.type", "value": "music"}, }, }, "timestamp": "2016-12-14T23:23:37Z", "type": "IntentRequest", "requestId": REQUEST_ID, }, "session": { "sessionId": SESSION_ID, "application": {"applicationId": APPLICATION_ID}, }, } req = await _intent_req(alexa_client, data) assert req.status == 200 data = await req.json() text = data.get("response", {}).get("outputSpeech", {}).get("text") assert text == "Playing the shins."
tchellomello/home-assistant
tests/components/alexa/test_intent.py
homeassistant/components/fan/device_trigger.py
"""Support for SleepIQ sensors.""" from homeassistant.components import sleepiq ICON = "mdi:hotel" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the SleepIQ sensors.""" if discovery_info is None: return data = sleepiq.DATA data.update() dev = list() for bed_id, bed in data.beds.items(): for side in sleepiq.SIDES: if getattr(bed, side) is not None: dev.append(SleepNumberSensor(data, bed_id, side)) add_entities(dev) class SleepNumberSensor(sleepiq.SleepIQSensor): """Implementation of a SleepIQ sensor.""" def __init__(self, sleepiq_data, bed_id, side): """Initialize the sensor.""" sleepiq.SleepIQSensor.__init__(self, sleepiq_data, bed_id, side) self._state = None self.type = sleepiq.SLEEP_NUMBER self._name = sleepiq.SENSOR_TYPES[self.type] self.update() @property def state(self): """Return the state of the sensor.""" return self._state @property def icon(self): """Icon to use in the frontend, if any.""" return ICON def update(self): """Get the latest data from SleepIQ and updates the states.""" sleepiq.SleepIQSensor.update(self) self._state = self.side.sleep_number
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/sleepiq/sensor.py
"""Real-time information about public transport departures in Norway.""" from datetime import datetime, timedelta import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, CONF_SHOW_ON_MAP, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) API_CLIENT_NAME = "homeassistant-homeassistant" ATTRIBUTION = "Data provided by entur.org under NLOD" CONF_STOP_IDS = "stop_ids" CONF_EXPAND_PLATFORMS = "expand_platforms" CONF_WHITELIST_LINES = "line_whitelist" CONF_OMIT_NON_BOARDING = "omit_non_boarding" CONF_NUMBER_OF_DEPARTURES = "number_of_departures" DEFAULT_NAME = "Entur" DEFAULT_ICON_KEY = "bus" ICONS = { "air": "mdi:airplane", "bus": "mdi:bus", "metro": "mdi:subway", "rail": "mdi:train", "tram": "mdi:tram", "water": "mdi:ferry", } SCAN_INTERVAL = timedelta(seconds=45) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_STOP_IDS): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_EXPAND_PLATFORMS, default=True): cv.boolean, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean, vol.Optional(CONF_WHITELIST_LINES, default=[]): cv.ensure_list, vol.Optional(CONF_OMIT_NON_BOARDING, default=True): cv.boolean, vol.Optional(CONF_NUMBER_OF_DEPARTURES, default=2): vol.All( cv.positive_int, vol.Range(min=2, max=10) ), } ) ATTR_STOP_ID = "stop_id" ATTR_ROUTE = "route" ATTR_ROUTE_ID = "route_id" ATTR_EXPECTED_AT = "due_at" ATTR_DELAY = "delay" ATTR_REALTIME = "real_time" ATTR_NEXT_UP_IN = "next_due_in" ATTR_NEXT_UP_ROUTE = "next_route" ATTR_NEXT_UP_ROUTE_ID = "next_route_id" ATTR_NEXT_UP_AT = "next_due_at" ATTR_NEXT_UP_DELAY = "next_delay" ATTR_NEXT_UP_REALTIME = "next_real_time" ATTR_TRANSPORT_MODE = "transport_mode" def due_in_minutes(timestamp: datetime) -> int: """Get the time in minutes from a timestamp.""" if timestamp is None: return None diff = timestamp - dt_util.now() return int(diff.total_seconds() / 60) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Entur public transport sensor.""" from enturclient import EnturPublicTransportData expand = config.get(CONF_EXPAND_PLATFORMS) line_whitelist = config.get(CONF_WHITELIST_LINES) name = config.get(CONF_NAME) show_on_map = config.get(CONF_SHOW_ON_MAP) stop_ids = config.get(CONF_STOP_IDS) omit_non_boarding = config.get(CONF_OMIT_NON_BOARDING) number_of_departures = config.get(CONF_NUMBER_OF_DEPARTURES) stops = [s for s in stop_ids if "StopPlace" in s] quays = [s for s in stop_ids if "Quay" in s] data = EnturPublicTransportData( API_CLIENT_NAME, stops=stops, quays=quays, line_whitelist=line_whitelist, omit_non_boarding=omit_non_boarding, number_of_departures=number_of_departures, web_session=async_get_clientsession(hass), ) if expand: await data.expand_all_quays() await data.update() proxy = EnturProxy(data) entities = [] for place in data.all_stop_places_quays(): try: given_name = "{} {}".format(name, data.get_stop_info(place).name) except KeyError: given_name = f"{name} {place}" entities.append( EnturPublicTransportSensor(proxy, given_name, place, show_on_map) ) async_add_entities(entities, True) class EnturProxy: """Proxy for the Entur client. Ensure throttle to not hit rate limiting on the API. """ def __init__(self, api): """Initialize the proxy.""" self._api = api @Throttle(timedelta(seconds=15)) async def async_update(self) -> None: """Update data in client.""" await self._api.update() def get_stop_info(self, stop_id: str) -> dict: """Get info about specific stop place.""" return self._api.get_stop_info(stop_id) class EnturPublicTransportSensor(Entity): """Implementation of a Entur public transport sensor.""" def __init__(self, api: EnturProxy, name: str, stop: str, show_on_map: bool): """Initialize the sensor.""" self.api = api self._stop = stop self._show_on_map = show_on_map self._name = name self._state = None self._icon = ICONS[DEFAULT_ICON_KEY] self._attributes = {} @property def name(self) -> str: """Return the name of the sensor.""" return self._name @property def state(self) -> str: """Return the state of the sensor.""" return self._state @property def device_state_attributes(self) -> dict: """Return the state attributes.""" self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION self._attributes[ATTR_STOP_ID] = self._stop return self._attributes @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return "min" @property def icon(self) -> str: """Icon to use in the frontend.""" return self._icon async def async_update(self) -> None: """Get the latest data and update the states.""" await self.api.async_update() self._attributes = {} data = self.api.get_stop_info(self._stop) if data is None: self._state = None return if self._show_on_map and data.latitude and data.longitude: self._attributes[CONF_LATITUDE] = data.latitude self._attributes[CONF_LONGITUDE] = data.longitude calls = data.estimated_calls if not calls: self._state = None return self._state = due_in_minutes(calls[0].expected_departure_time) self._icon = ICONS.get(calls[0].transport_mode, ICONS[DEFAULT_ICON_KEY]) self._attributes[ATTR_ROUTE] = calls[0].front_display self._attributes[ATTR_ROUTE_ID] = calls[0].line_id self._attributes[ATTR_EXPECTED_AT] = calls[0].expected_departure_time.strftime( "%H:%M" ) self._attributes[ATTR_REALTIME] = calls[0].is_realtime self._attributes[ATTR_DELAY] = calls[0].delay_in_min number_of_calls = len(calls) if number_of_calls < 2: return self._attributes[ATTR_NEXT_UP_ROUTE] = calls[1].front_display self._attributes[ATTR_NEXT_UP_ROUTE_ID] = calls[1].line_id self._attributes[ATTR_NEXT_UP_AT] = calls[1].expected_departure_time.strftime( "%H:%M" ) self._attributes[ATTR_NEXT_UP_IN] = "{} min".format( due_in_minutes(calls[1].expected_departure_time) ) self._attributes[ATTR_NEXT_UP_REALTIME] = calls[1].is_realtime self._attributes[ATTR_NEXT_UP_DELAY] = calls[1].delay_in_min if number_of_calls < 3: return for i, call in enumerate(calls[2:]): key_name = "departure_#" + str(i + 3) self._attributes[key_name] = "{}{} {}".format( "" if bool(call.is_realtime) else "ca. ", call.expected_departure_time.strftime("%H:%M"), call.front_display, )
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/entur_public_transport/sensor.py
"""Support for ISY994 covers.""" import logging from typing import Callable from homeassistant.components.cover import DOMAIN, CoverDevice from homeassistant.const import ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNKNOWN, ) from homeassistant.helpers.typing import ConfigType from . import ISY994_NODES, ISY994_PROGRAMS, ISYDevice _LOGGER = logging.getLogger(__name__) VALUE_TO_STATE = { 0: STATE_CLOSED, 101: STATE_UNKNOWN, 102: "stopped", 103: STATE_CLOSING, 104: STATE_OPENING, } def setup_platform( hass, config: ConfigType, add_entities: Callable[[list], None], discovery_info=None ): """Set up the ISY994 cover platform.""" devices = [] for node in hass.data[ISY994_NODES][DOMAIN]: devices.append(ISYCoverDevice(node)) for name, status, actions in hass.data[ISY994_PROGRAMS][DOMAIN]: devices.append(ISYCoverProgram(name, status, actions)) add_entities(devices) class ISYCoverDevice(ISYDevice, CoverDevice): """Representation of an ISY994 cover device.""" @property def current_cover_position(self) -> int: """Return the current cover position.""" if self.is_unknown() or self.value is None: return None return sorted((0, self.value, 100))[1] @property def is_closed(self) -> bool: """Get whether the ISY994 cover device is closed.""" return self.state == STATE_CLOSED @property def state(self) -> str: """Get the state of the ISY994 cover device.""" if self.is_unknown(): return None return VALUE_TO_STATE.get(self.value, STATE_OPEN) def open_cover(self, **kwargs) -> None: """Send the open cover command to the ISY994 cover device.""" if not self._node.on(val=100): _LOGGER.error("Unable to open the cover") def close_cover(self, **kwargs) -> None: """Send the close cover command to the ISY994 cover device.""" if not self._node.off(): _LOGGER.error("Unable to close the cover") class ISYCoverProgram(ISYCoverDevice): """Representation of an ISY994 cover program.""" def __init__(self, name: str, node: object, actions: object) -> None: """Initialize the ISY994 cover program.""" super().__init__(node) self._name = name self._actions = actions @property def state(self) -> str: """Get the state of the ISY994 cover program.""" return STATE_CLOSED if bool(self.value) else STATE_OPEN def open_cover(self, **kwargs) -> None: """Send the open cover command to the ISY994 cover program.""" if not self._actions.runThen(): _LOGGER.error("Unable to open the cover") def close_cover(self, **kwargs) -> None: """Send the close cover command to the ISY994 cover program.""" if not self._actions.runElse(): _LOGGER.error("Unable to close the cover")
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/isy994/cover.py
"""Register a custom front end panel.""" import logging import os import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.loader import bind_hass _LOGGER = logging.getLogger(__name__) DOMAIN = "panel_custom" CONF_COMPONENT_NAME = "name" CONF_SIDEBAR_TITLE = "sidebar_title" CONF_SIDEBAR_ICON = "sidebar_icon" CONF_URL_PATH = "url_path" CONF_CONFIG = "config" CONF_WEBCOMPONENT_PATH = "webcomponent_path" CONF_JS_URL = "js_url" CONF_MODULE_URL = "module_url" CONF_EMBED_IFRAME = "embed_iframe" CONF_TRUST_EXTERNAL_SCRIPT = "trust_external_script" CONF_URL_EXCLUSIVE_GROUP = "url_exclusive_group" CONF_REQUIRE_ADMIN = "require_admin" MSG_URL_CONFLICT = "Pass in only one of webcomponent_path, module_url or js_url" DEFAULT_EMBED_IFRAME = False DEFAULT_TRUST_EXTERNAL = False DEFAULT_ICON = "mdi:bookmark" LEGACY_URL = "/api/panel_custom/{}" PANEL_DIR = "panels" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_COMPONENT_NAME): cv.string, vol.Optional(CONF_SIDEBAR_TITLE): cv.string, vol.Optional(CONF_SIDEBAR_ICON, default=DEFAULT_ICON): cv.icon, vol.Optional(CONF_URL_PATH): cv.string, vol.Optional(CONF_CONFIG): dict, vol.Exclusive( CONF_WEBCOMPONENT_PATH, CONF_URL_EXCLUSIVE_GROUP, msg=MSG_URL_CONFLICT, ): cv.string, vol.Exclusive( CONF_JS_URL, CONF_URL_EXCLUSIVE_GROUP, msg=MSG_URL_CONFLICT ): cv.string, vol.Exclusive( CONF_MODULE_URL, CONF_URL_EXCLUSIVE_GROUP, msg=MSG_URL_CONFLICT, ): cv.string, vol.Optional( CONF_EMBED_IFRAME, default=DEFAULT_EMBED_IFRAME ): cv.boolean, vol.Optional( CONF_TRUST_EXTERNAL_SCRIPT, default=DEFAULT_TRUST_EXTERNAL ): cv.boolean, vol.Optional(CONF_REQUIRE_ADMIN, default=False): cv.boolean, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) @bind_hass async def async_register_panel( hass, # The url to serve the panel frontend_url_path, # The webcomponent name that loads your panel webcomponent_name, # Title/icon for sidebar sidebar_title=None, sidebar_icon=None, # HTML source of your panel html_url=None, # JS source of your panel js_url=None, # JS module of your panel module_url=None, # If your panel should be run inside an iframe embed_iframe=DEFAULT_EMBED_IFRAME, # Should user be asked for confirmation when loading external source trust_external=DEFAULT_TRUST_EXTERNAL, # Configuration to be passed to the panel config=None, # If your panel should only be shown to admin users require_admin=False, ): """Register a new custom panel.""" if js_url is None and html_url is None and module_url is None: raise ValueError("Either js_url, module_url or html_url is required.") if (js_url and html_url) or (module_url and html_url): raise ValueError("Pass in only one of JS url, Module url or HTML url.") if config is not None and not isinstance(config, dict): raise ValueError("Config needs to be a dictionary.") custom_panel_config = { "name": webcomponent_name, "embed_iframe": embed_iframe, "trust_external": trust_external, } if js_url is not None: custom_panel_config["js_url"] = js_url if module_url is not None: custom_panel_config["module_url"] = module_url if html_url is not None: custom_panel_config["html_url"] = html_url if config is not None: # Make copy because we're mutating it config = dict(config) else: config = {} config["_panel_custom"] = custom_panel_config hass.components.frontend.async_register_built_in_panel( component_name="custom", sidebar_title=sidebar_title, sidebar_icon=sidebar_icon, frontend_url_path=frontend_url_path, config=config, require_admin=require_admin, ) async def async_setup(hass, config): """Initialize custom panel.""" if DOMAIN not in config: return True success = False for panel in config[DOMAIN]: name = panel[CONF_COMPONENT_NAME] kwargs = { "webcomponent_name": panel[CONF_COMPONENT_NAME], "frontend_url_path": panel.get(CONF_URL_PATH, name), "sidebar_title": panel.get(CONF_SIDEBAR_TITLE), "sidebar_icon": panel.get(CONF_SIDEBAR_ICON), "config": panel.get(CONF_CONFIG), "trust_external": panel[CONF_TRUST_EXTERNAL_SCRIPT], "embed_iframe": panel[CONF_EMBED_IFRAME], "require_admin": panel[CONF_REQUIRE_ADMIN], } panel_path = panel.get(CONF_WEBCOMPONENT_PATH) if panel_path is None: panel_path = hass.config.path(PANEL_DIR, f"{name}.html") if CONF_JS_URL in panel: kwargs["js_url"] = panel[CONF_JS_URL] elif CONF_MODULE_URL in panel: kwargs["module_url"] = panel[CONF_MODULE_URL] elif not await hass.async_add_job(os.path.isfile, panel_path): _LOGGER.error("Unable to find webcomponent for %s: %s", name, panel_path) continue else: url = LEGACY_URL.format(name) hass.http.register_static_path(url, panel_path) kwargs["html_url"] = url await async_register_panel(hass, **kwargs) success = True return success
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/panel_custom/__init__.py
"""Support for Danfoss Air HRV.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.const import CONF_HOST from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) DANFOSS_AIR_PLATFORMS = ["sensor", "binary_sensor", "switch"] DOMAIN = "danfoss_air" MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) CONFIG_SCHEMA = vol.Schema( {DOMAIN: vol.Schema({vol.Required(CONF_HOST): cv.string})}, extra=vol.ALLOW_EXTRA ) def setup(hass, config): """Set up the Danfoss Air component.""" conf = config[DOMAIN] hass.data[DOMAIN] = DanfossAir(conf[CONF_HOST]) for platform in DANFOSS_AIR_PLATFORMS: discovery.load_platform(hass, platform, DOMAIN, {}, config) return True class DanfossAir: """Handle all communication with Danfoss Air CCM unit.""" def __init__(self, host): """Initialize the Danfoss Air CCM connection.""" self._data = {} from pydanfossair.danfossclient import DanfossClient self._client = DanfossClient(host) def get_value(self, item): """Get value for sensor.""" return self._data.get(item) def update_state(self, command, state_command): """Send update command to Danfoss Air CCM.""" self._data[state_command] = self._client.command(command) @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Use the data from Danfoss Air API.""" _LOGGER.debug("Fetching data from Danfoss Air CCM module") from pydanfossair.commands import ReadCommand self._data[ReadCommand.exhaustTemperature] = self._client.command( ReadCommand.exhaustTemperature ) self._data[ReadCommand.outdoorTemperature] = self._client.command( ReadCommand.outdoorTemperature ) self._data[ReadCommand.supplyTemperature] = self._client.command( ReadCommand.supplyTemperature ) self._data[ReadCommand.extractTemperature] = self._client.command( ReadCommand.extractTemperature ) self._data[ReadCommand.humidity] = round( self._client.command(ReadCommand.humidity), 2 ) self._data[ReadCommand.filterPercent] = round( self._client.command(ReadCommand.filterPercent), 2 ) self._data[ReadCommand.bypass] = self._client.command(ReadCommand.bypass) self._data[ReadCommand.fan_step] = self._client.command(ReadCommand.fan_step) self._data[ReadCommand.supply_fan_speed] = self._client.command( ReadCommand.supply_fan_speed ) self._data[ReadCommand.exhaust_fan_speed] = self._client.command( ReadCommand.exhaust_fan_speed ) self._data[ReadCommand.away_mode] = self._client.command(ReadCommand.away_mode) self._data[ReadCommand.boost] = self._client.command(ReadCommand.boost) self._data[ReadCommand.battery_percent] = self._client.command( ReadCommand.battery_percent ) self._data[ReadCommand.bypass] = self._client.command(ReadCommand.bypass) self._data[ReadCommand.automatic_bypass] = self._client.command( ReadCommand.automatic_bypass ) _LOGGER.debug("Done fetching data from Danfoss Air CCM module")
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/danfoss_air/__init__.py
"""Config flow to configure the OpenUV component.""" import voluptuous as vol from homeassistant import config_entries from homeassistant.const import ( CONF_API_KEY, CONF_ELEVATION, CONF_LATITUDE, CONF_LONGITUDE, ) from homeassistant.core import callback from homeassistant.helpers import aiohttp_client, config_validation as cv from .const import DOMAIN @callback def configured_instances(hass): """Return a set of configured OpenUV instances.""" return set( "{0}, {1}".format( entry.data.get(CONF_LATITUDE, hass.config.latitude), entry.data.get(CONF_LONGITUDE, hass.config.longitude), ) for entry in hass.config_entries.async_entries(DOMAIN) ) @config_entries.HANDLERS.register(DOMAIN) class OpenUvFlowHandler(config_entries.ConfigFlow): """Handle an OpenUV config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL def __init__(self): """Initialize the config flow.""" pass async def _show_form(self, errors=None): """Show the form to the user.""" data_schema = vol.Schema( { vol.Required(CONF_API_KEY): str, vol.Optional(CONF_LATITUDE): cv.latitude, vol.Optional(CONF_LONGITUDE): cv.longitude, vol.Optional(CONF_ELEVATION): vol.Coerce(float), } ) return self.async_show_form( step_id="user", data_schema=data_schema, errors=errors if errors else {} ) async def async_step_import(self, import_config): """Import a config entry from configuration.yaml.""" return await self.async_step_user(import_config) async def async_step_user(self, user_input=None): """Handle the start of the config flow.""" from pyopenuv import Client from pyopenuv.errors import OpenUvError if not user_input: return await self._show_form() identifier = "{0}, {1}".format( user_input.get(CONF_LATITUDE, self.hass.config.latitude), user_input.get(CONF_LONGITUDE, self.hass.config.longitude), ) if identifier in configured_instances(self.hass): return await self._show_form({CONF_LATITUDE: "identifier_exists"}) websession = aiohttp_client.async_get_clientsession(self.hass) client = Client(user_input[CONF_API_KEY], 0, 0, websession) try: await client.uv_index() except OpenUvError: return await self._show_form({CONF_API_KEY: "invalid_api_key"}) return self.async_create_entry(title=identifier, data=user_input)
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/openuv/config_flow.py
"""Support for MQTT room presence detection.""" import logging import json from datetime import timedelta import voluptuous as vol from homeassistant.components import mqtt import homeassistant.helpers.config_validation as cv from homeassistant.components.mqtt import CONF_STATE_TOPIC from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, CONF_TIMEOUT, STATE_NOT_HOME, ATTR_ID from homeassistant.core import callback from homeassistant.helpers.entity import Entity from homeassistant.util import dt, slugify _LOGGER = logging.getLogger(__name__) ATTR_DEVICE_ID = "device_id" ATTR_DISTANCE = "distance" ATTR_ROOM = "room" CONF_DEVICE_ID = "device_id" CONF_AWAY_TIMEOUT = "away_timeout" DEFAULT_AWAY_TIMEOUT = 0 DEFAULT_NAME = "Room Sensor" DEFAULT_TIMEOUT = 5 DEFAULT_TOPIC = "room_presence" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_AWAY_TIMEOUT, default=DEFAULT_AWAY_TIMEOUT): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ).extend(mqtt.MQTT_RO_PLATFORM_SCHEMA.schema) MQTT_PAYLOAD = vol.Schema( vol.All( json.loads, vol.Schema( { vol.Required(ATTR_ID): cv.string, vol.Required(ATTR_DISTANCE): vol.Coerce(float), }, extra=vol.ALLOW_EXTRA, ), ) ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up MQTT room Sensor.""" async_add_entities( [ MQTTRoomSensor( config.get(CONF_NAME), config.get(CONF_STATE_TOPIC), config.get(CONF_DEVICE_ID), config.get(CONF_TIMEOUT), config.get(CONF_AWAY_TIMEOUT), ) ] ) class MQTTRoomSensor(Entity): """Representation of a room sensor that is updated via MQTT.""" def __init__(self, name, state_topic, device_id, timeout, consider_home): """Initialize the sensor.""" self._state = STATE_NOT_HOME self._name = name self._state_topic = "{}{}".format(state_topic, "/+") self._device_id = slugify(device_id).upper() self._timeout = timeout self._consider_home = ( timedelta(seconds=consider_home) if consider_home else None ) self._distance = None self._updated = None async def async_added_to_hass(self): """Subscribe to MQTT events.""" @callback def update_state(device_id, room, distance): """Update the sensor state.""" self._state = room self._distance = distance self._updated = dt.utcnow() self.async_schedule_update_ha_state() @callback def message_received(msg): """Handle new MQTT messages.""" try: data = MQTT_PAYLOAD(msg.payload) except vol.MultipleInvalid as error: _LOGGER.debug("Skipping update because of malformatted data: %s", error) return device = _parse_update_data(msg.topic, data) if device.get(CONF_DEVICE_ID) == self._device_id: if self._distance is None or self._updated is None: update_state(**device) else: # update if: # device is in the same room OR # device is closer to another room OR # last update from other room was too long ago timediff = dt.utcnow() - self._updated if ( device.get(ATTR_ROOM) == self._state or device.get(ATTR_DISTANCE) < self._distance or timediff.seconds >= self._timeout ): update_state(**device) return await mqtt.async_subscribe( self.hass, self._state_topic, message_received, 1 ) @property def name(self): """Return the name of the sensor.""" return self._name @property def device_state_attributes(self): """Return the state attributes.""" return {ATTR_DISTANCE: self._distance} @property def state(self): """Return the current room of the entity.""" return self._state def update(self): """Update the state for absent devices.""" if ( self._updated and self._consider_home and dt.utcnow() - self._updated > self._consider_home ): self._state = STATE_NOT_HOME def _parse_update_data(topic, data): """Parse the room presence update.""" parts = topic.split("/") room = parts[-1] device_id = slugify(data.get(ATTR_ID)).upper() distance = data.get("distance") parsed_data = {ATTR_DEVICE_ID: device_id, ATTR_ROOM: room, ATTR_DISTANCE: distance} return parsed_data
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/mqtt_room/sensor.py
"""Support for functionality to have conversations with Home Assistant.""" import logging import re import voluptuous as vol from homeassistant import core from homeassistant.components import http from homeassistant.components.cover import INTENT_CLOSE_COVER, INTENT_OPEN_COVER from homeassistant.components.http.data_validator import RequestDataValidator from homeassistant.const import EVENT_COMPONENT_LOADED from homeassistant.core import callback from homeassistant.helpers import config_validation as cv, intent from homeassistant.loader import bind_hass from homeassistant.setup import ATTR_COMPONENT from .util import create_matcher _LOGGER = logging.getLogger(__name__) ATTR_TEXT = "text" DOMAIN = "conversation" REGEX_TURN_COMMAND = re.compile(r"turn (?P<name>(?: |\w)+) (?P<command>\w+)") REGEX_TYPE = type(re.compile("")) UTTERANCES = { "cover": { INTENT_OPEN_COVER: ["Open [the] [a] [an] {name}[s]"], INTENT_CLOSE_COVER: ["Close [the] [a] [an] {name}[s]"], } } SERVICE_PROCESS = "process" SERVICE_PROCESS_SCHEMA = vol.Schema({vol.Required(ATTR_TEXT): cv.string}) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional("intents"): vol.Schema( {cv.string: vol.All(cv.ensure_list, [cv.string])} ) } ) }, extra=vol.ALLOW_EXTRA, ) @core.callback @bind_hass def async_register(hass, intent_type, utterances): """Register utterances and any custom intents. Registrations don't require conversations to be loaded. They will become active once the conversation component is loaded. """ intents = hass.data.get(DOMAIN) if intents is None: intents = hass.data[DOMAIN] = {} conf = intents.get(intent_type) if conf is None: conf = intents[intent_type] = [] for utterance in utterances: if isinstance(utterance, REGEX_TYPE): conf.append(utterance) else: conf.append(create_matcher(utterance)) async def async_setup(hass, config): """Register the process service.""" config = config.get(DOMAIN, {}) intents = hass.data.get(DOMAIN) if intents is None: intents = hass.data[DOMAIN] = {} for intent_type, utterances in config.get("intents", {}).items(): conf = intents.get(intent_type) if conf is None: conf = intents[intent_type] = [] conf.extend(create_matcher(utterance) for utterance in utterances) async def process(service): """Parse text into commands.""" text = service.data[ATTR_TEXT] _LOGGER.debug("Processing: <%s>", text) try: await _process(hass, text) except intent.IntentHandleError as err: _LOGGER.error("Error processing %s: %s", text, err) hass.services.async_register( DOMAIN, SERVICE_PROCESS, process, schema=SERVICE_PROCESS_SCHEMA ) hass.http.register_view(ConversationProcessView) # We strip trailing 's' from name because our state matcher will fail # if a letter is not there. By removing 's' we can match singular and # plural names. async_register( hass, intent.INTENT_TURN_ON, ["Turn [the] [a] {name}[s] on", "Turn on [the] [a] [an] {name}[s]"], ) async_register( hass, intent.INTENT_TURN_OFF, ["Turn [the] [a] [an] {name}[s] off", "Turn off [the] [a] [an] {name}[s]"], ) async_register( hass, intent.INTENT_TOGGLE, ["Toggle [the] [a] [an] {name}[s]", "[the] [a] [an] {name}[s] toggle"], ) @callback def register_utterances(component): """Register utterances for a component.""" if component not in UTTERANCES: return for intent_type, sentences in UTTERANCES[component].items(): async_register(hass, intent_type, sentences) @callback def component_loaded(event): """Handle a new component loaded.""" register_utterances(event.data[ATTR_COMPONENT]) hass.bus.async_listen(EVENT_COMPONENT_LOADED, component_loaded) # Check already loaded components. for component in hass.config.components: register_utterances(component) return True async def _process(hass, text): """Process a line of text.""" intents = hass.data.get(DOMAIN, {}) for intent_type, matchers in intents.items(): for matcher in matchers: match = matcher.match(text) if not match: continue response = await hass.helpers.intent.async_handle( DOMAIN, intent_type, {key: {"value": value} for key, value in match.groupdict().items()}, text, ) return response class ConversationProcessView(http.HomeAssistantView): """View to retrieve shopping list content.""" url = "/api/conversation/process" name = "api:conversation:process" @RequestDataValidator(vol.Schema({vol.Required("text"): str})) async def post(self, request, data): """Send a request for processing.""" hass = request.app["hass"] try: intent_result = await _process(hass, data["text"]) except intent.IntentHandleError as err: intent_result = intent.IntentResponse() intent_result.async_set_speech(str(err)) if intent_result is None: intent_result = intent.IntentResponse() intent_result.async_set_speech("Sorry, I didn't understand that") return self.json(intent_result)
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/conversation/__init__.py
"""Support for Generic Modbus Thermostats.""" import logging import struct import voluptuous as vol from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice from homeassistant.components.climate.const import ( SUPPORT_TARGET_TEMPERATURE, HVAC_MODE_HEAT, ) from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, CONF_SLAVE import homeassistant.helpers.config_validation as cv from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN _LOGGER = logging.getLogger(__name__) CONF_TARGET_TEMP = "target_temp_register" CONF_CURRENT_TEMP = "current_temp_register" CONF_DATA_TYPE = "data_type" CONF_COUNT = "data_count" CONF_PRECISION = "precision" DATA_TYPE_INT = "int" DATA_TYPE_UINT = "uint" DATA_TYPE_FLOAT = "float" SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE HVAC_MODES = [HVAC_MODE_HEAT] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_CURRENT_TEMP): cv.positive_int, vol.Required(CONF_NAME): cv.string, vol.Required(CONF_SLAVE): cv.positive_int, vol.Required(CONF_TARGET_TEMP): cv.positive_int, vol.Optional(CONF_COUNT, default=2): cv.positive_int, vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_FLOAT): vol.In( [DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT] ), vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string, vol.Optional(CONF_PRECISION, default=1): cv.positive_int, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Modbus Thermostat Platform.""" name = config.get(CONF_NAME) modbus_slave = config.get(CONF_SLAVE) target_temp_register = config.get(CONF_TARGET_TEMP) current_temp_register = config.get(CONF_CURRENT_TEMP) data_type = config.get(CONF_DATA_TYPE) count = config.get(CONF_COUNT) precision = config.get(CONF_PRECISION) hub_name = config.get(CONF_HUB) hub = hass.data[MODBUS_DOMAIN][hub_name] add_entities( [ ModbusThermostat( hub, name, modbus_slave, target_temp_register, current_temp_register, data_type, count, precision, ) ], True, ) class ModbusThermostat(ClimateDevice): """Representation of a Modbus Thermostat.""" def __init__( self, hub, name, modbus_slave, target_temp_register, current_temp_register, data_type, count, precision, ): """Initialize the unit.""" self._hub = hub self._name = name self._slave = modbus_slave self._target_temperature_register = target_temp_register self._current_temperature_register = current_temp_register self._target_temperature = None self._current_temperature = None self._data_type = data_type self._count = int(count) self._precision = precision self._structure = ">f" data_types = { DATA_TYPE_INT: {1: "h", 2: "i", 4: "q"}, DATA_TYPE_UINT: {1: "H", 2: "I", 4: "Q"}, DATA_TYPE_FLOAT: {1: "e", 2: "f", 4: "d"}, } self._structure = ">{}".format(data_types[self._data_type][self._count]) @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS def update(self): """Update Target & Current Temperature.""" self._target_temperature = self.read_register(self._target_temperature_register) self._current_temperature = self.read_register( self._current_temperature_register ) @property def hvac_mode(self): """Return the current HVAC mode.""" return HVAC_MODE_HEAT @property def hvac_modes(self): """Return the possible HVAC modes.""" return HVAC_MODES @property def name(self): """Return the name of the climate device.""" return self._name @property def current_temperature(self): """Return the current temperature.""" return self._current_temperature @property def target_temperature(self): """Return the target temperature.""" return self._target_temperature def set_temperature(self, **kwargs): """Set new target temperature.""" target_temperature = kwargs.get(ATTR_TEMPERATURE) if target_temperature is None: return byte_string = struct.pack(self._structure, target_temperature) register_value = struct.unpack(">h", byte_string[0:2])[0] try: self.write_register(self._target_temperature_register, register_value) except AttributeError as ex: _LOGGER.error(ex) def read_register(self, register): """Read holding register using the Modbus hub slave.""" try: result = self._hub.read_holding_registers( self._slave, register, self._count ) except AttributeError as ex: _LOGGER.error(ex) byte_string = b"".join( [x.to_bytes(2, byteorder="big") for x in result.registers] ) val = struct.unpack(self._structure, byte_string)[0] register_value = format(val, f".{self._precision}f") return register_value def write_register(self, register, value): """Write register using the Modbus hub slave.""" self._hub.write_registers(self._slave, register, [value, 0])
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/modbus/climate.py
"""Offer time listening automation rules.""" import logging import voluptuous as vol from homeassistant.core import callback from homeassistant.const import CONF_AT, CONF_PLATFORM from homeassistant.helpers import config_validation as cv from homeassistant.helpers.event import async_track_time_change # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) TRIGGER_SCHEMA = vol.Schema( {vol.Required(CONF_PLATFORM): "time", vol.Required(CONF_AT): cv.time} ) async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) return async_track_time_change( hass, time_automation_listener, hour=hours, minute=minutes, second=seconds )
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/automation/time.py
"""Support for interacting with and controlling the cmus music player.""" import logging import voluptuous as vol from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA from homeassistant.components.media_player.const import ( MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_SET, ) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_PAUSED, STATE_PLAYING, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "cmus" DEFAULT_PORT = 3000 SUPPORT_CMUS = ( SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA | SUPPORT_SEEK | SUPPORT_PLAY ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Inclusive(CONF_HOST, "remote"): cv.string, vol.Inclusive(CONF_PASSWORD, "remote"): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discover_info=None): """Set up the CMUS platform.""" from pycmus import exceptions host = config.get(CONF_HOST) password = config.get(CONF_PASSWORD) port = config.get(CONF_PORT) name = config.get(CONF_NAME) try: cmus_remote = CmusDevice(host, password, port, name) except exceptions.InvalidPassword: _LOGGER.error("The provided password was rejected by cmus") return False add_entities([cmus_remote], True) class CmusDevice(MediaPlayerDevice): """Representation of a running cmus.""" # pylint: disable=no-member def __init__(self, server, password, port, name): """Initialize the CMUS device.""" from pycmus import remote if server: self.cmus = remote.PyCmus(server=server, password=password, port=port) auto_name = f"cmus-{server}" else: self.cmus = remote.PyCmus() auto_name = "cmus-local" self._name = name or auto_name self.status = {} def update(self): """Get the latest data and update the state.""" status = self.cmus.get_status_dict() if not status: _LOGGER.warning("Received no status from cmus") else: self.status = status @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the media state.""" if self.status.get("status") == "playing": return STATE_PLAYING if self.status.get("status") == "paused": return STATE_PAUSED return STATE_OFF @property def media_content_id(self): """Content ID of current playing media.""" return self.status.get("file") @property def content_type(self): """Content type of the current playing media.""" return MEDIA_TYPE_MUSIC @property def media_duration(self): """Duration of current playing media in seconds.""" return self.status.get("duration") @property def media_title(self): """Title of current playing media.""" return self.status["tag"].get("title") @property def media_artist(self): """Artist of current playing media, music track only.""" return self.status["tag"].get("artist") @property def media_track(self): """Track number of current playing media, music track only.""" return self.status["tag"].get("tracknumber") @property def media_album_name(self): """Album name of current playing media, music track only.""" return self.status["tag"].get("album") @property def media_album_artist(self): """Album artist of current playing media, music track only.""" return self.status["tag"].get("albumartist") @property def volume_level(self): """Return the volume level.""" left = self.status["set"].get("vol_left")[0] right = self.status["set"].get("vol_right")[0] if left != right: volume = float(left + right) / 2 else: volume = left return int(volume) / 100 @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_CMUS def turn_off(self): """Service to send the CMUS the command to stop playing.""" self.cmus.player_stop() def turn_on(self): """Service to send the CMUS the command to start playing.""" self.cmus.player_play() def set_volume_level(self, volume): """Set volume level, range 0..1.""" self.cmus.set_volume(int(volume * 100)) def volume_up(self): """Set the volume up.""" left = self.status["set"].get("vol_left") right = self.status["set"].get("vol_right") if left != right: current_volume = float(left + right) / 2 else: current_volume = left if current_volume <= 100: self.cmus.set_volume(int(current_volume) + 5) def volume_down(self): """Set the volume down.""" left = self.status["set"].get("vol_left") right = self.status["set"].get("vol_right") if left != right: current_volume = float(left + right) / 2 else: current_volume = left if current_volume <= 100: self.cmus.set_volume(int(current_volume) - 5) def play_media(self, media_type, media_id, **kwargs): """Send the play command.""" if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]: self.cmus.player_play_file(media_id) else: _LOGGER.error( "Invalid media type %s. Only %s and %s are supported", media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, ) def media_pause(self): """Send the pause command.""" self.cmus.player_pause() def media_next_track(self): """Send next track command.""" self.cmus.player_next() def media_previous_track(self): """Send next track command.""" self.cmus.player_prev() def media_seek(self, position): """Send seek command.""" self.cmus.seek(position) def media_play(self): """Send the play command.""" self.cmus.player_play() def media_stop(self): """Send the stop command.""" self.cmus.stop()
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/cmus/media_player.py
"""Support for Rheem EcoNet water heaters.""" import datetime import logging import voluptuous as vol from homeassistant.components.water_heater import ( DOMAIN, PLATFORM_SCHEMA, STATE_ECO, STATE_ELECTRIC, STATE_GAS, STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_OFF, STATE_PERFORMANCE, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, WaterHeaterDevice, ) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_TEMPERATURE, CONF_PASSWORD, CONF_USERNAME, TEMP_FAHRENHEIT, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) ATTR_VACATION_START = "next_vacation_start_date" ATTR_VACATION_END = "next_vacation_end_date" ATTR_ON_VACATION = "on_vacation" ATTR_TODAYS_ENERGY_USAGE = "todays_energy_usage" ATTR_IN_USE = "in_use" ATTR_START_DATE = "start_date" ATTR_END_DATE = "end_date" SUPPORT_FLAGS_HEATER = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE SERVICE_ADD_VACATION = "econet_add_vacation" SERVICE_DELETE_VACATION = "econet_delete_vacation" ADD_VACATION_SCHEMA = vol.Schema( { vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(ATTR_START_DATE): cv.positive_int, vol.Required(ATTR_END_DATE): cv.positive_int, } ) DELETE_VACATION_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids}) ECONET_DATA = "econet" ECONET_STATE_TO_HA = { "Energy Saver": STATE_ECO, "gas": STATE_GAS, "High Demand": STATE_HIGH_DEMAND, "Off": STATE_OFF, "Performance": STATE_PERFORMANCE, "Heat Pump Only": STATE_HEAT_PUMP, "Electric-Only": STATE_ELECTRIC, "Electric": STATE_ELECTRIC, "Heat Pump": STATE_HEAT_PUMP, } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the EcoNet water heaters.""" from pyeconet.api import PyEcoNet hass.data[ECONET_DATA] = {} hass.data[ECONET_DATA]["water_heaters"] = [] username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) econet = PyEcoNet(username, password) water_heaters = econet.get_water_heaters() hass_water_heaters = [ EcoNetWaterHeater(water_heater) for water_heater in water_heaters ] add_entities(hass_water_heaters) hass.data[ECONET_DATA]["water_heaters"].extend(hass_water_heaters) def service_handle(service): """Handle the service calls.""" entity_ids = service.data.get("entity_id") all_heaters = hass.data[ECONET_DATA]["water_heaters"] _heaters = [ x for x in all_heaters if not entity_ids or x.entity_id in entity_ids ] for _water_heater in _heaters: if service.service == SERVICE_ADD_VACATION: start = service.data.get(ATTR_START_DATE) end = service.data.get(ATTR_END_DATE) _water_heater.add_vacation(start, end) if service.service == SERVICE_DELETE_VACATION: for vacation in _water_heater.water_heater.vacations: vacation.delete() _water_heater.schedule_update_ha_state(True) hass.services.register( DOMAIN, SERVICE_ADD_VACATION, service_handle, schema=ADD_VACATION_SCHEMA ) hass.services.register( DOMAIN, SERVICE_DELETE_VACATION, service_handle, schema=DELETE_VACATION_SCHEMA ) class EcoNetWaterHeater(WaterHeaterDevice): """Representation of an EcoNet water heater.""" def __init__(self, water_heater): """Initialize the water heater.""" self.water_heater = water_heater self.supported_modes = self.water_heater.supported_modes self.econet_state_to_ha = {} self.ha_state_to_econet = {} for mode in ECONET_STATE_TO_HA: if mode in self.supported_modes: self.econet_state_to_ha[mode] = ECONET_STATE_TO_HA.get(mode) for key, value in self.econet_state_to_ha.items(): self.ha_state_to_econet[value] = key for mode in self.supported_modes: if mode not in ECONET_STATE_TO_HA: error = ( "Invalid operation mode mapping. " + mode + " doesn't map. Please report this." ) _LOGGER.error(error) @property def name(self): """Return the device name.""" return self.water_heater.name @property def available(self): """Return if the the device is online or not.""" return self.water_heater.is_connected @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_FAHRENHEIT @property def device_state_attributes(self): """Return the optional device state attributes.""" data = {} vacations = self.water_heater.get_vacations() if vacations: data[ATTR_VACATION_START] = vacations[0].start_date data[ATTR_VACATION_END] = vacations[0].end_date data[ATTR_ON_VACATION] = self.water_heater.is_on_vacation todays_usage = self.water_heater.total_usage_for_today if todays_usage: data[ATTR_TODAYS_ENERGY_USAGE] = todays_usage data[ATTR_IN_USE] = self.water_heater.in_use return data @property def current_operation(self): """ Return current operation as one of the following. ["eco", "heat_pump", "high_demand", "electric_only"] """ current_op = self.econet_state_to_ha.get(self.water_heater.mode) return current_op @property def operation_list(self): """List of available operation modes.""" op_list = [] for mode in self.supported_modes: ha_mode = self.econet_state_to_ha.get(mode) if ha_mode is not None: op_list.append(ha_mode) return op_list @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS_HEATER def set_temperature(self, **kwargs): """Set new target temperature.""" target_temp = kwargs.get(ATTR_TEMPERATURE) if target_temp is not None: self.water_heater.set_target_set_point(target_temp) else: _LOGGER.error("A target temperature must be provided") def set_operation_mode(self, operation_mode): """Set operation mode.""" op_mode_to_set = self.ha_state_to_econet.get(operation_mode) if op_mode_to_set is not None: self.water_heater.set_mode(op_mode_to_set) else: _LOGGER.error("An operation mode must be provided") def add_vacation(self, start, end): """Add a vacation to this water heater.""" if not start: start = datetime.datetime.now() else: start = datetime.datetime.fromtimestamp(start) end = datetime.datetime.fromtimestamp(end) self.water_heater.set_vacation_mode(start, end) def update(self): """Get the latest date.""" self.water_heater.update_state() @property def target_temperature(self): """Return the temperature we try to reach.""" return self.water_heater.set_point @property def min_temp(self): """Return the minimum temperature.""" return self.water_heater.min_set_point @property def max_temp(self): """Return the maximum temperature.""" return self.water_heater.max_set_point
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/econet/water_heater.py
"""Support for AlarmDecoder devices.""" import logging from datetime import timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_HOST from homeassistant.helpers.discovery import load_platform from homeassistant.util import dt as dt_util from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA _LOGGER = logging.getLogger(__name__) DOMAIN = "alarmdecoder" DATA_AD = "alarmdecoder" CONF_DEVICE = "device" CONF_DEVICE_BAUD = "baudrate" CONF_DEVICE_PATH = "path" CONF_DEVICE_PORT = "port" CONF_DEVICE_TYPE = "type" CONF_PANEL_DISPLAY = "panel_display" CONF_ZONE_NAME = "name" CONF_ZONE_TYPE = "type" CONF_ZONE_LOOP = "loop" CONF_ZONE_RFID = "rfid" CONF_ZONES = "zones" CONF_RELAY_ADDR = "relayaddr" CONF_RELAY_CHAN = "relaychan" DEFAULT_DEVICE_TYPE = "socket" DEFAULT_DEVICE_HOST = "localhost" DEFAULT_DEVICE_PORT = 10000 DEFAULT_DEVICE_PATH = "/dev/ttyUSB0" DEFAULT_DEVICE_BAUD = 115200 DEFAULT_PANEL_DISPLAY = False DEFAULT_ZONE_TYPE = "opening" SIGNAL_PANEL_MESSAGE = "alarmdecoder.panel_message" SIGNAL_PANEL_ARM_AWAY = "alarmdecoder.panel_arm_away" SIGNAL_PANEL_ARM_HOME = "alarmdecoder.panel_arm_home" SIGNAL_PANEL_DISARM = "alarmdecoder.panel_disarm" SIGNAL_ZONE_FAULT = "alarmdecoder.zone_fault" SIGNAL_ZONE_RESTORE = "alarmdecoder.zone_restore" SIGNAL_RFX_MESSAGE = "alarmdecoder.rfx_message" SIGNAL_REL_MESSAGE = "alarmdecoder.rel_message" DEVICE_SOCKET_SCHEMA = vol.Schema( { vol.Required(CONF_DEVICE_TYPE): "socket", vol.Optional(CONF_HOST, default=DEFAULT_DEVICE_HOST): cv.string, vol.Optional(CONF_DEVICE_PORT, default=DEFAULT_DEVICE_PORT): cv.port, } ) DEVICE_SERIAL_SCHEMA = vol.Schema( { vol.Required(CONF_DEVICE_TYPE): "serial", vol.Optional(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): cv.string, vol.Optional(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): cv.string, } ) DEVICE_USB_SCHEMA = vol.Schema({vol.Required(CONF_DEVICE_TYPE): "usb"}) ZONE_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_NAME): cv.string, vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): vol.Any( DEVICE_CLASSES_SCHEMA ), vol.Optional(CONF_ZONE_RFID): cv.string, vol.Optional(CONF_ZONE_LOOP): vol.All(vol.Coerce(int), vol.Range(min=1, max=4)), vol.Inclusive( CONF_RELAY_ADDR, "relaylocation", "Relay address and channel must exist together", ): cv.byte, vol.Inclusive( CONF_RELAY_CHAN, "relaylocation", "Relay address and channel must exist together", ): cv.byte, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_DEVICE): vol.Any( DEVICE_SOCKET_SCHEMA, DEVICE_SERIAL_SCHEMA, DEVICE_USB_SCHEMA ), vol.Optional( CONF_PANEL_DISPLAY, default=DEFAULT_PANEL_DISPLAY ): cv.boolean, vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA}, } ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Set up for the AlarmDecoder devices.""" from alarmdecoder import AlarmDecoder from alarmdecoder.devices import SocketDevice, SerialDevice, USBDevice conf = config.get(DOMAIN) restart = False device = conf.get(CONF_DEVICE) display = conf.get(CONF_PANEL_DISPLAY) zones = conf.get(CONF_ZONES) device_type = device.get(CONF_DEVICE_TYPE) host = DEFAULT_DEVICE_HOST port = DEFAULT_DEVICE_PORT path = DEFAULT_DEVICE_PATH baud = DEFAULT_DEVICE_BAUD def stop_alarmdecoder(event): """Handle the shutdown of AlarmDecoder.""" _LOGGER.debug("Shutting down alarmdecoder") nonlocal restart restart = False controller.close() def open_connection(now=None): """Open a connection to AlarmDecoder.""" from alarmdecoder.util import NoDeviceError nonlocal restart try: controller.open(baud) except NoDeviceError: _LOGGER.debug("Failed to connect. Retrying in 5 seconds") hass.helpers.event.track_point_in_time( open_connection, dt_util.utcnow() + timedelta(seconds=5) ) return _LOGGER.debug("Established a connection with the alarmdecoder") restart = True def handle_closed_connection(event): """Restart after unexpected loss of connection.""" nonlocal restart if not restart: return restart = False _LOGGER.warning("AlarmDecoder unexpectedly lost connection.") hass.add_job(open_connection) def handle_message(sender, message): """Handle message from AlarmDecoder.""" hass.helpers.dispatcher.dispatcher_send(SIGNAL_PANEL_MESSAGE, message) def handle_rfx_message(sender, message): """Handle RFX message from AlarmDecoder.""" hass.helpers.dispatcher.dispatcher_send(SIGNAL_RFX_MESSAGE, message) def zone_fault_callback(sender, zone): """Handle zone fault from AlarmDecoder.""" hass.helpers.dispatcher.dispatcher_send(SIGNAL_ZONE_FAULT, zone) def zone_restore_callback(sender, zone): """Handle zone restore from AlarmDecoder.""" hass.helpers.dispatcher.dispatcher_send(SIGNAL_ZONE_RESTORE, zone) def handle_rel_message(sender, message): """Handle relay message from AlarmDecoder.""" hass.helpers.dispatcher.dispatcher_send(SIGNAL_REL_MESSAGE, message) controller = False if device_type == "socket": host = device.get(CONF_HOST) port = device.get(CONF_DEVICE_PORT) controller = AlarmDecoder(SocketDevice(interface=(host, port))) elif device_type == "serial": path = device.get(CONF_DEVICE_PATH) baud = device.get(CONF_DEVICE_BAUD) controller = AlarmDecoder(SerialDevice(interface=path)) elif device_type == "usb": AlarmDecoder(USBDevice.find()) return False controller.on_message += handle_message controller.on_rfx_message += handle_rfx_message controller.on_zone_fault += zone_fault_callback controller.on_zone_restore += zone_restore_callback controller.on_close += handle_closed_connection controller.on_relay_changed += handle_rel_message hass.data[DATA_AD] = controller open_connection() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_alarmdecoder) load_platform(hass, "alarm_control_panel", DOMAIN, conf, config) if zones: load_platform(hass, "binary_sensor", DOMAIN, {CONF_ZONES: zones}, config) if display: load_platform(hass, "sensor", DOMAIN, conf, config) return True
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/alarmdecoder/__init__.py
"""Simplepush notification service.""" import logging import voluptuous as vol from homeassistant.const import CONF_PASSWORD import homeassistant.helpers.config_validation as cv from homeassistant.components.notify import ( ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService, ) _LOGGER = logging.getLogger(__name__) ATTR_ENCRYPTED = "encrypted" CONF_DEVICE_KEY = "device_key" CONF_EVENT = "event" CONF_SALT = "salt" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_DEVICE_KEY): cv.string, vol.Optional(CONF_EVENT): cv.string, vol.Inclusive(CONF_PASSWORD, ATTR_ENCRYPTED): cv.string, vol.Inclusive(CONF_SALT, ATTR_ENCRYPTED): cv.string, } ) def get_service(hass, config, discovery_info=None): """Get the Simplepush notification service.""" return SimplePushNotificationService(config) class SimplePushNotificationService(BaseNotificationService): """Implementation of the notification service for Simplepush.""" def __init__(self, config): """Initialize the Simplepush notification service.""" self._device_key = config.get(CONF_DEVICE_KEY) self._event = config.get(CONF_EVENT) self._password = config.get(CONF_PASSWORD) self._salt = config.get(CONF_SALT) def send_message(self, message="", **kwargs): """Send a message to a Simplepush user.""" from simplepush import send, send_encrypted title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT) if self._password: send_encrypted( self._device_key, self._password, self._salt, title, message, event=self._event, ) else: send(self._device_key, title, message, event=self._event)
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/simplepush/notify.py
"""Support for the Opple light.""" import logging import voluptuous as vol from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, Light, ) from homeassistant.const import CONF_HOST, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.util.color import ( color_temperature_kelvin_to_mired as kelvin_to_mired, color_temperature_mired_to_kelvin as mired_to_kelvin, ) _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "opple light" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Opple light platform.""" name = config[CONF_NAME] host = config[CONF_HOST] entity = OppleLight(name, host) add_entities([entity]) _LOGGER.debug("Init light %s %s", host, entity.unique_id) class OppleLight(Light): """Opple light device.""" def __init__(self, name, host): """Initialize an Opple light.""" from pyoppleio.OppleLightDevice import OppleLightDevice self._device = OppleLightDevice(host) self._name = name self._is_on = None self._brightness = None self._color_temp = None @property def available(self): """Return True if light is available.""" return self._device.is_online @property def unique_id(self): """Return unique ID for light.""" return self._device.mac @property def name(self): """Return the display name of this light.""" return self._name @property def is_on(self): """Return true if light is on.""" return self._is_on @property def brightness(self): """Return the brightness of the light.""" return self._brightness @property def color_temp(self): """Return the color temperature of this light.""" return kelvin_to_mired(self._color_temp) @property def min_mireds(self): """Return minimum supported color temperature.""" return 175 @property def max_mireds(self): """Return maximum supported color temperature.""" return 333 @property def supported_features(self): """Flag supported features.""" return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP def turn_on(self, **kwargs): """Instruct the light to turn on.""" _LOGGER.debug("Turn on light %s %s", self._device.ip, kwargs) if not self.is_on: self._device.power_on = True if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]: self._device.brightness = kwargs[ATTR_BRIGHTNESS] if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]: color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP]) self._device.color_temperature = color_temp def turn_off(self, **kwargs): """Instruct the light to turn off.""" self._device.power_on = False _LOGGER.debug("Turn off light %s", self._device.ip) def update(self): """Synchronize state with light.""" prev_available = self.available self._device.update() if ( prev_available == self.available and self._is_on == self._device.power_on and self._brightness == self._device.brightness and self._color_temp == self._device.color_temperature ): return if not self.available: _LOGGER.debug("Light %s is offline", self._device.ip) return self._is_on = self._device.power_on self._brightness = self._device.brightness self._color_temp = self._device.color_temperature if not self.is_on: _LOGGER.debug("Update light %s success: power off", self._device.ip) else: _LOGGER.debug( "Update light %s success: power on brightness %s " "color temperature %s", self._device.ip, self._brightness, self._color_temp, )
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/opple/light.py
"""Support for sending Wake-On-LAN magic packets.""" from functools import partial import logging import voluptuous as vol from homeassistant.const import CONF_MAC import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DOMAIN = "wake_on_lan" CONF_BROADCAST_ADDRESS = "broadcast_address" SERVICE_SEND_MAGIC_PACKET = "send_magic_packet" WAKE_ON_LAN_SEND_MAGIC_PACKET_SCHEMA = vol.Schema( {vol.Required(CONF_MAC): cv.string, vol.Optional(CONF_BROADCAST_ADDRESS): cv.string} ) async def async_setup(hass, config): """Set up the wake on LAN component.""" import wakeonlan async def send_magic_packet(call): """Send magic packet to wake up a device.""" mac_address = call.data.get(CONF_MAC) broadcast_address = call.data.get(CONF_BROADCAST_ADDRESS) _LOGGER.info( "Send magic packet to mac %s (broadcast: %s)", mac_address, broadcast_address, ) if broadcast_address is not None: await hass.async_add_job( partial( wakeonlan.send_magic_packet, mac_address, ip_address=broadcast_address, ) ) else: await hass.async_add_job(partial(wakeonlan.send_magic_packet, mac_address)) hass.services.async_register( DOMAIN, SERVICE_SEND_MAGIC_PACKET, send_magic_packet, schema=WAKE_ON_LAN_SEND_MAGIC_PACKET_SCHEMA, ) return True
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/wake_on_lan/__init__.py
"""Support for XS1 switches.""" import logging from xs1_api_client.api_constants import ActuatorType from homeassistant.helpers.entity import ToggleEntity from . import ACTUATORS, DOMAIN as COMPONENT_DOMAIN, XS1DeviceEntity _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the XS1 switch platform.""" actuators = hass.data[COMPONENT_DOMAIN][ACTUATORS] switch_entities = [] for actuator in actuators: if (actuator.type() == ActuatorType.SWITCH) or ( actuator.type() == ActuatorType.DIMMER ): switch_entities.append(XS1SwitchEntity(actuator)) add_entities(switch_entities) class XS1SwitchEntity(XS1DeviceEntity, ToggleEntity): """Representation of a XS1 switch actuator.""" @property def name(self): """Return the name of the device if any.""" return self.device.name() @property def is_on(self): """Return true if switch is on.""" return self.device.value() == 100 def turn_on(self, **kwargs): """Turn the device on.""" self.device.turn_on() def turn_off(self, **kwargs): """Turn the device off.""" self.device.turn_off()
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/xs1/switch.py
"""Device tracker helpers.""" import asyncio from typing import Dict, Any, Callable, Optional from types import ModuleType import attr from homeassistant.core import callback from homeassistant.setup import async_prepare_setup_platform from homeassistant.helpers import config_per_platform from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.typing import ConfigType, HomeAssistantType from homeassistant.helpers.event import async_track_time_interval from homeassistant.util import dt as dt_util from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE from .const import ( DOMAIN, PLATFORM_TYPE_LEGACY, CONF_SCAN_INTERVAL, SCAN_INTERVAL, SOURCE_TYPE_ROUTER, LOGGER, ) @attr.s class DeviceTrackerPlatform: """Class to hold platform information.""" LEGACY_SETUP = ( "async_get_scanner", "get_scanner", "async_setup_scanner", "setup_scanner", ) name = attr.ib(type=str) platform = attr.ib(type=ModuleType) config = attr.ib(type=Dict) @property def type(self): """Return platform type.""" for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),): for meth in methods: if hasattr(self.platform, meth): return platform_type return None async def async_setup_legacy(self, hass, tracker, discovery_info=None): """Set up a legacy platform.""" LOGGER.info("Setting up %s.%s", DOMAIN, self.type) try: scanner = None setup = None if hasattr(self.platform, "async_get_scanner"): scanner = await self.platform.async_get_scanner( hass, {DOMAIN: self.config} ) elif hasattr(self.platform, "get_scanner"): scanner = await hass.async_add_job( self.platform.get_scanner, hass, {DOMAIN: self.config} ) elif hasattr(self.platform, "async_setup_scanner"): setup = await self.platform.async_setup_scanner( hass, self.config, tracker.async_see, discovery_info ) elif hasattr(self.platform, "setup_scanner"): setup = await hass.async_add_job( self.platform.setup_scanner, hass, self.config, tracker.see, discovery_info, ) else: raise HomeAssistantError("Invalid legacy device_tracker platform.") if scanner: async_setup_scanner_platform( hass, self.config, scanner, tracker.async_see, self.type ) return if not setup: LOGGER.error("Error setting up platform %s", self.type) return except Exception: # pylint: disable=broad-except LOGGER.exception("Error setting up platform %s", self.type) async def async_extract_config(hass, config): """Extract device tracker config and split between legacy and modern.""" legacy = [] for platform in await asyncio.gather( *( async_create_platform_type(hass, config, p_type, p_config) for p_type, p_config in config_per_platform(config, DOMAIN) ) ): if platform is None: continue if platform.type == PLATFORM_TYPE_LEGACY: legacy.append(platform) else: raise ValueError( "Unable to determine type for {}: {}".format( platform.name, platform.type ) ) return legacy async def async_create_platform_type( hass, config, p_type, p_config ) -> Optional[DeviceTrackerPlatform]: """Determine type of platform.""" platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type) if platform is None: return None return DeviceTrackerPlatform(p_type, platform, p_config) @callback def async_setup_scanner_platform( hass: HomeAssistantType, config: ConfigType, scanner: Any, async_see_device: Callable, platform: str, ): """Set up the connect scanner-based platform to device tracker. This method must be run in the event loop. """ interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL) update_lock = asyncio.Lock() scanner.hass = hass # Initial scan of each mac we also tell about host name for config seen: Any = set() async def async_device_tracker_scan(now: dt_util.dt.datetime): """Handle interval matches.""" if update_lock.locked(): LOGGER.warning( "Updating device list from %s took longer than the scheduled " "scan interval %s", platform, interval, ) return async with update_lock: found_devices = await scanner.async_scan_devices() for mac in found_devices: if mac in seen: host_name = None else: host_name = await scanner.async_get_device_name(mac) seen.add(mac) try: extra_attributes = await scanner.async_get_extra_attributes(mac) except NotImplementedError: extra_attributes = dict() kwargs = { "mac": mac, "host_name": host_name, "source_type": SOURCE_TYPE_ROUTER, "attributes": { "scanner": scanner.__class__.__name__, **extra_attributes, }, } zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME) if zone_home: kwargs["gps"] = [ zone_home.attributes[ATTR_LATITUDE], zone_home.attributes[ATTR_LONGITUDE], ] kwargs["gps_accuracy"] = 0 hass.async_create_task(async_see_device(**kwargs)) async_track_time_interval(hass, async_device_tracker_scan, interval) hass.async_create_task(async_device_tracker_scan(None))
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/device_tracker/setup.py
"""Constants for the IGD component.""" import logging CONF_ENABLE_PORT_MAPPING = "port_mapping" CONF_ENABLE_SENSORS = "sensors" CONF_HASS = "hass" CONF_LOCAL_IP = "local_ip" CONF_PORTS = "ports" DOMAIN = "upnp" LOGGER = logging.getLogger(__package__) SIGNAL_REMOVE_SENSOR = "upnp_remove_sensor"
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/upnp/const.py
"""Support for the ZHA platform.""" import logging import time from homeassistant.components.device_tracker import DOMAIN, SOURCE_TYPE_ROUTER from homeassistant.components.device_tracker.config_entry import ScannerEntity from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .core.const import ( CHANNEL_POWER_CONFIGURATION, DATA_ZHA, DATA_ZHA_DISPATCHERS, SIGNAL_ATTR_UPDATED, ZHA_DISCOVERY_NEW, ) from .entity import ZhaEntity from .sensor import battery_percentage_remaining_formatter _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Zigbee Home Automation device tracker from config entry.""" async def async_discover(discovery_info): await _async_setup_entities( hass, config_entry, async_add_entities, [discovery_info] ) unsub = async_dispatcher_connect( hass, ZHA_DISCOVERY_NEW.format(DOMAIN), async_discover ) hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub) device_trackers = hass.data.get(DATA_ZHA, {}).get(DOMAIN) if device_trackers is not None: await _async_setup_entities( hass, config_entry, async_add_entities, device_trackers.values() ) del hass.data[DATA_ZHA][DOMAIN] async def _async_setup_entities( hass, config_entry, async_add_entities, discovery_infos ): """Set up the ZHA device trackers.""" entities = [] for discovery_info in discovery_infos: entities.append(ZHADeviceScannerEntity(**discovery_info)) async_add_entities(entities, update_before_add=True) class ZHADeviceScannerEntity(ScannerEntity, ZhaEntity): """Represent a tracked device.""" def __init__(self, **kwargs): """Initialize the ZHA device tracker.""" super().__init__(**kwargs) self._battery_channel = self.cluster_channels.get(CHANNEL_POWER_CONFIGURATION) self._connected = False self._keepalive_interval = 60 self._should_poll = True self._battery_level = None async def async_added_to_hass(self): """Run when about to be added to hass.""" await super().async_added_to_hass() if self._battery_channel: await self.async_accept_signal( self._battery_channel, SIGNAL_ATTR_UPDATED, self.async_battery_percentage_remaining_updated, ) async def async_update(self): """Handle polling.""" if self.zha_device.last_seen is None: self._connected = False else: difference = time.time() - self.zha_device.last_seen if difference > self._keepalive_interval: self._connected = False else: self._connected = True @property def is_connected(self): """Return true if the device is connected to the network.""" return self._connected @property def source_type(self): """Return the source type, eg gps or router, of the device.""" return SOURCE_TYPE_ROUTER @callback def async_battery_percentage_remaining_updated(self, value): """Handle tracking.""" self.debug("battery_percentage_remaining updated: %s", value) self._connected = True self._battery_level = battery_percentage_remaining_formatter(value) self.async_schedule_update_ha_state() @property def battery_level(self): """Return the battery level of the device. Percentage from 0-100. """ return self._battery_level
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/zha/device_tracker.py
"""Support for Rain Bird Irrigation system LNK WiFi Module.""" import logging from pyrainbird import AvailableStations, RainbirdController import voluptuous as vol from homeassistant.components.switch import SwitchDevice from homeassistant.const import ATTR_ENTITY_ID, CONF_FRIENDLY_NAME, CONF_TRIGGER_TIME from homeassistant.helpers import config_validation as cv from . import CONF_ZONES, DATA_RAINBIRD, DOMAIN, RAINBIRD_CONTROLLER _LOGGER = logging.getLogger(__name__) ATTR_DURATION = "duration" SERVICE_START_IRRIGATION = "start_irrigation" SERVICE_SCHEMA_IRRIGATION = vol.Schema( { vol.Required(ATTR_ENTITY_ID): cv.entity_id, vol.Required(ATTR_DURATION): vol.All(vol.Coerce(float), vol.Range(min=0)), } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up Rain Bird switches over a Rain Bird controller.""" if discovery_info is None: return controller: RainbirdController = hass.data[DATA_RAINBIRD][ discovery_info[RAINBIRD_CONTROLLER] ] available_stations: AvailableStations = controller.get_available_stations() if not (available_stations and available_stations.stations): return devices = [] for zone in range(1, available_stations.stations.count + 1): if available_stations.stations.active(zone): zone_config = discovery_info.get(CONF_ZONES, {}).get(zone, {}) time = zone_config.get(CONF_TRIGGER_TIME, discovery_info[CONF_TRIGGER_TIME]) name = zone_config.get(CONF_FRIENDLY_NAME) devices.append( RainBirdSwitch( controller, zone, time, name if name else "Sprinkler {}".format(zone), ) ) add_entities(devices, True) def start_irrigation(service): entity_id = service.data[ATTR_ENTITY_ID] duration = service.data[ATTR_DURATION] for device in devices: if device.entity_id == entity_id: device.turn_on(duration=duration) hass.services.register( DOMAIN, SERVICE_START_IRRIGATION, start_irrigation, schema=SERVICE_SCHEMA_IRRIGATION, ) class RainBirdSwitch(SwitchDevice): """Representation of a Rain Bird switch.""" def __init__(self, controller: RainbirdController, zone, time, name): """Initialize a Rain Bird Switch Device.""" self._rainbird = controller self._zone = zone self._name = name self._state = None self._duration = time self._attributes = {ATTR_DURATION: self._duration, "zone": self._zone} @property def device_state_attributes(self): """Return state attributes.""" return self._attributes @property def name(self): """Get the name of the switch.""" return self._name def turn_on(self, **kwargs): """Turn the switch on.""" if self._rainbird.irrigate_zone( int(self._zone), int(kwargs[ATTR_DURATION] if ATTR_DURATION in kwargs else self._duration), ): self._state = True def turn_off(self, **kwargs): """Turn the switch off.""" if self._rainbird.stop_irrigation(): self._state = False def update(self): """Update switch status.""" self._state = self._rainbird.get_zone_state(self._zone) @property def is_on(self): """Return true if switch is on.""" return self._state
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/components/rainbird/switch.py
"""Manage config entries in Home Assistant.""" import asyncio import logging import functools import uuid from typing import Any, Callable, List, Optional, Set import weakref import attr from homeassistant import data_entry_flow, loader from homeassistant.core import callback, HomeAssistant from homeassistant.exceptions import HomeAssistantError, ConfigEntryNotReady from homeassistant.setup import async_setup_component, async_process_deps_reqs from homeassistant.util.decorator import Registry from homeassistant.helpers import entity_registry # mypy: allow-untyped-defs _LOGGER = logging.getLogger(__name__) _UNDEF = object() SOURCE_USER = "user" SOURCE_DISCOVERY = "discovery" SOURCE_IMPORT = "import" HANDLERS = Registry() STORAGE_KEY = "core.config_entries" STORAGE_VERSION = 1 # Deprecated since 0.73 PATH_CONFIG = ".config_entries.json" SAVE_DELAY = 1 # The config entry has been set up successfully ENTRY_STATE_LOADED = "loaded" # There was an error while trying to set up this config entry ENTRY_STATE_SETUP_ERROR = "setup_error" # There was an error while trying to migrate the config entry to a new version ENTRY_STATE_MIGRATION_ERROR = "migration_error" # The config entry was not ready to be set up yet, but might be later ENTRY_STATE_SETUP_RETRY = "setup_retry" # The config entry has not been loaded ENTRY_STATE_NOT_LOADED = "not_loaded" # An error occurred when trying to unload the entry ENTRY_STATE_FAILED_UNLOAD = "failed_unload" UNRECOVERABLE_STATES = (ENTRY_STATE_MIGRATION_ERROR, ENTRY_STATE_FAILED_UNLOAD) DISCOVERY_NOTIFICATION_ID = "config_entry_discovery" DISCOVERY_SOURCES = ("ssdp", "zeroconf", SOURCE_DISCOVERY, SOURCE_IMPORT) EVENT_FLOW_DISCOVERED = "config_entry_discovered" CONN_CLASS_CLOUD_PUSH = "cloud_push" CONN_CLASS_CLOUD_POLL = "cloud_poll" CONN_CLASS_LOCAL_PUSH = "local_push" CONN_CLASS_LOCAL_POLL = "local_poll" CONN_CLASS_ASSUMED = "assumed" CONN_CLASS_UNKNOWN = "unknown" class ConfigError(HomeAssistantError): """Error while configuring an account.""" class UnknownEntry(ConfigError): """Unknown entry specified.""" class OperationNotAllowed(ConfigError): """Raised when a config entry operation is not allowed.""" class ConfigEntry: """Hold a configuration entry.""" __slots__ = ( "entry_id", "version", "domain", "title", "data", "options", "system_options", "source", "connection_class", "state", "_setup_lock", "update_listeners", "_async_cancel_retry_setup", ) def __init__( self, version: int, domain: str, title: str, data: dict, source: str, connection_class: str, system_options: dict, options: Optional[dict] = None, entry_id: Optional[str] = None, state: str = ENTRY_STATE_NOT_LOADED, ) -> None: """Initialize a config entry.""" # Unique id of the config entry self.entry_id = entry_id or uuid.uuid4().hex # Version of the configuration. self.version = version # Domain the configuration belongs to self.domain = domain # Title of the configuration self.title = title # Config data self.data = data # Entry options self.options = options or {} # Entry system options self.system_options = SystemOptions(**system_options) # Source of the configuration (user, discovery, cloud) self.source = source # Connection class self.connection_class = connection_class # State of the entry (LOADED, NOT_LOADED) self.state = state # Listeners to call on update self.update_listeners: List = [] # Function to cancel a scheduled retry self._async_cancel_retry_setup: Optional[Callable[[], Any]] = None async def async_setup( self, hass: HomeAssistant, *, integration: Optional[loader.Integration] = None, tries: int = 0, ) -> None: """Set up an entry.""" if integration is None: integration = await loader.async_get_integration(hass, self.domain) try: component = integration.get_component() except ImportError as err: _LOGGER.error( "Error importing integration %s to set up %s config entry: %s", integration.domain, self.domain, err, ) if self.domain == integration.domain: self.state = ENTRY_STATE_SETUP_ERROR return if self.domain == integration.domain: try: integration.get_platform("config_flow") except ImportError as err: _LOGGER.error( "Error importing platform config_flow from integration %s to set up %s config entry: %s", integration.domain, self.domain, err, ) self.state = ENTRY_STATE_SETUP_ERROR return # Perform migration if not await self.async_migrate(hass): self.state = ENTRY_STATE_MIGRATION_ERROR return try: result = await component.async_setup_entry( # type: ignore hass, self ) if not isinstance(result, bool): _LOGGER.error( "%s.async_setup_entry did not return boolean", integration.domain ) result = False except ConfigEntryNotReady: self.state = ENTRY_STATE_SETUP_RETRY wait_time = 2 ** min(tries, 4) * 5 tries += 1 _LOGGER.warning( "Config entry for %s not ready yet. Retrying in %d seconds.", self.domain, wait_time, ) async def setup_again(now): """Run setup again.""" self._async_cancel_retry_setup = None await self.async_setup(hass, integration=integration, tries=tries) self._async_cancel_retry_setup = hass.helpers.event.async_call_later( wait_time, setup_again ) return except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error setting up entry %s for %s", self.title, integration.domain ) result = False # Only store setup result as state if it was not forwarded. if self.domain != integration.domain: return if result: self.state = ENTRY_STATE_LOADED else: self.state = ENTRY_STATE_SETUP_ERROR async def async_unload( self, hass: HomeAssistant, *, integration: Optional[loader.Integration] = None ) -> bool: """Unload an entry. Returns if unload is possible and was successful. """ if integration is None: integration = await loader.async_get_integration(hass, self.domain) component = integration.get_component() if integration.domain == self.domain: if self.state in UNRECOVERABLE_STATES: return False if self.state != ENTRY_STATE_LOADED: if self._async_cancel_retry_setup is not None: self._async_cancel_retry_setup() self._async_cancel_retry_setup = None self.state = ENTRY_STATE_NOT_LOADED return True supports_unload = hasattr(component, "async_unload_entry") if not supports_unload: if integration.domain == self.domain: self.state = ENTRY_STATE_FAILED_UNLOAD return False try: result = await component.async_unload_entry( # type: ignore hass, self ) assert isinstance(result, bool) # Only adjust state if we unloaded the component if result and integration.domain == self.domain: self.state = ENTRY_STATE_NOT_LOADED return result except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error unloading entry %s for %s", self.title, integration.domain ) if integration.domain == self.domain: self.state = ENTRY_STATE_FAILED_UNLOAD return False async def async_remove(self, hass: HomeAssistant) -> None: """Invoke remove callback on component.""" integration = await loader.async_get_integration(hass, self.domain) component = integration.get_component() if not hasattr(component, "async_remove_entry"): return try: await component.async_remove_entry( # type: ignore hass, self ) except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error calling entry remove callback %s for %s", self.title, integration.domain, ) async def async_migrate(self, hass: HomeAssistant) -> bool: """Migrate an entry. Returns True if config entry is up-to-date or has been migrated. """ handler = HANDLERS.get(self.domain) if handler is None: _LOGGER.error( "Flow handler not found for entry %s for %s", self.title, self.domain ) return False # Handler may be a partial while isinstance(handler, functools.partial): handler = handler.func if self.version == handler.VERSION: return True integration = await loader.async_get_integration(hass, self.domain) component = integration.get_component() supports_migrate = hasattr(component, "async_migrate_entry") if not supports_migrate: _LOGGER.error( "Migration handler not found for entry %s for %s", self.title, self.domain, ) return False try: result = await component.async_migrate_entry( # type: ignore hass, self ) if not isinstance(result, bool): _LOGGER.error( "%s.async_migrate_entry did not return boolean", self.domain ) return False if result: # pylint: disable=protected-access hass.config_entries._async_schedule_save() # type: ignore return result except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error migrating entry %s for %s", self.title, self.domain ) return False def add_update_listener(self, listener: Callable) -> Callable: """Listen for when entry is updated. Listener: Callback function(hass, entry) Returns function to unlisten. """ weak_listener = weakref.ref(listener) self.update_listeners.append(weak_listener) return lambda: self.update_listeners.remove(weak_listener) def as_dict(self): """Return dictionary version of this entry.""" return { "entry_id": self.entry_id, "version": self.version, "domain": self.domain, "title": self.title, "data": self.data, "options": self.options, "system_options": self.system_options.as_dict(), "source": self.source, "connection_class": self.connection_class, } class ConfigEntries: """Manage the configuration entries. An instance of this object is available via `hass.config_entries`. """ def __init__(self, hass: HomeAssistant, hass_config: dict) -> None: """Initialize the entry manager.""" self.hass = hass self.flow = data_entry_flow.FlowManager( hass, self._async_create_flow, self._async_finish_flow ) self.options = OptionsFlowManager(hass) self._hass_config = hass_config self._entries: List[ConfigEntry] = [] self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY) EntityRegistryDisabledHandler(hass).async_setup() @callback def async_domains(self) -> List[str]: """Return domains for which we have entries.""" seen: Set[str] = set() result = [] for entry in self._entries: if entry.domain not in seen: seen.add(entry.domain) result.append(entry.domain) return result @callback def async_get_entry(self, entry_id: str) -> Optional[ConfigEntry]: """Return entry with matching entry_id.""" for entry in self._entries: if entry_id == entry.entry_id: return entry return None @callback def async_entries(self, domain: Optional[str] = None) -> List[ConfigEntry]: """Return all entries or entries for a specific domain.""" if domain is None: return list(self._entries) return [entry for entry in self._entries if entry.domain == domain] async def async_remove(self, entry_id): """Remove an entry.""" entry = self.async_get_entry(entry_id) if entry is None: raise UnknownEntry if entry.state in UNRECOVERABLE_STATES: unload_success = entry.state != ENTRY_STATE_FAILED_UNLOAD else: unload_success = await self.async_unload(entry_id) await entry.async_remove(self.hass) self._entries.remove(entry) self._async_schedule_save() dev_reg, ent_reg = await asyncio.gather( self.hass.helpers.device_registry.async_get_registry(), self.hass.helpers.entity_registry.async_get_registry(), ) dev_reg.async_clear_config_entry(entry_id) ent_reg.async_clear_config_entry(entry_id) return {"require_restart": not unload_success} async def async_initialize(self) -> None: """Initialize config entry config.""" # Migrating for config entries stored before 0.73 config = await self.hass.helpers.storage.async_migrator( self.hass.config.path(PATH_CONFIG), self._store, old_conf_migrate_func=_old_conf_migrator, ) if config is None: self._entries = [] return self._entries = [ ConfigEntry( version=entry["version"], domain=entry["domain"], entry_id=entry["entry_id"], data=entry["data"], source=entry["source"], title=entry["title"], # New in 0.79 connection_class=entry.get("connection_class", CONN_CLASS_UNKNOWN), # New in 0.89 options=entry.get("options"), # New in 0.98 system_options=entry.get("system_options", {}), ) for entry in config["entries"] ] async def async_setup(self, entry_id: str) -> bool: """Set up a config entry. Return True if entry has been successfully loaded. """ entry = self.async_get_entry(entry_id) if entry is None: raise UnknownEntry if entry.state != ENTRY_STATE_NOT_LOADED: raise OperationNotAllowed # Setup Component if not set up yet if entry.domain in self.hass.config.components: await entry.async_setup(self.hass) else: # Setting up the component will set up all its config entries result = await async_setup_component( self.hass, entry.domain, self._hass_config ) if not result: return result return entry.state == ENTRY_STATE_LOADED async def async_unload(self, entry_id: str) -> bool: """Unload a config entry.""" entry = self.async_get_entry(entry_id) if entry is None: raise UnknownEntry if entry.state in UNRECOVERABLE_STATES: raise OperationNotAllowed return await entry.async_unload(self.hass) async def async_reload(self, entry_id: str) -> bool: """Reload an entry. If an entry was not loaded, will just load. """ unload_result = await self.async_unload(entry_id) if not unload_result: return unload_result return await self.async_setup(entry_id) @callback def async_update_entry( self, entry, *, data=_UNDEF, options=_UNDEF, system_options=_UNDEF ): """Update a config entry.""" if data is not _UNDEF: entry.data = data if options is not _UNDEF: entry.options = options if system_options is not _UNDEF: entry.system_options.update(**system_options) for listener_ref in entry.update_listeners: listener = listener_ref() self.hass.async_create_task(listener(self.hass, entry)) self._async_schedule_save() async def async_forward_entry_setup(self, entry, domain): """Forward the setup of an entry to a different component. By default an entry is setup with the component it belongs to. If that component also has related platforms, the component will have to forward the entry to be setup by that component. You don't want to await this coroutine if it is called as part of the setup of a component, because it can cause a deadlock. """ # Setup Component if not set up yet if domain not in self.hass.config.components: result = await async_setup_component(self.hass, domain, self._hass_config) if not result: return False integration = await loader.async_get_integration(self.hass, domain) await entry.async_setup(self.hass, integration=integration) async def async_forward_entry_unload(self, entry, domain): """Forward the unloading of an entry to a different component.""" # It was never loaded. if domain not in self.hass.config.components: return True integration = await loader.async_get_integration(self.hass, domain) return await entry.async_unload(self.hass, integration=integration) async def _async_finish_flow(self, flow, result): """Finish a config flow and add an entry.""" # Remove notification if no other discovery config entries in progress if not any( ent["context"]["source"] in DISCOVERY_SOURCES for ent in self.hass.config_entries.flow.async_progress() if ent["flow_id"] != flow.flow_id ): self.hass.components.persistent_notification.async_dismiss( DISCOVERY_NOTIFICATION_ID ) if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY: return result entry = ConfigEntry( version=result["version"], domain=result["handler"], title=result["title"], data=result["data"], options={}, system_options={}, source=flow.context["source"], connection_class=flow.CONNECTION_CLASS, ) self._entries.append(entry) self._async_schedule_save() await self.async_setup(entry.entry_id) result["result"] = entry return result async def _async_create_flow(self, handler_key, *, context, data): """Create a flow for specified handler. Handler key is the domain of the component that we want to set up. """ try: integration = await loader.async_get_integration(self.hass, handler_key) except loader.IntegrationNotFound: _LOGGER.error("Cannot find integration %s", handler_key) raise data_entry_flow.UnknownHandler # Make sure requirements and dependencies of component are resolved await async_process_deps_reqs(self.hass, self._hass_config, integration) try: integration.get_platform("config_flow") except ImportError as err: _LOGGER.error( "Error occurred loading config flow for integration %s: %s", handler_key, err, ) raise data_entry_flow.UnknownHandler handler = HANDLERS.get(handler_key) if handler is None: raise data_entry_flow.UnknownHandler source = context["source"] # Create notification. if source in DISCOVERY_SOURCES: self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED) self.hass.components.persistent_notification.async_create( title="New devices discovered", message=( "We have discovered new devices on your network. " "[Check it out](/config/integrations)" ), notification_id=DISCOVERY_NOTIFICATION_ID, ) flow = handler() flow.init_step = source return flow def _async_schedule_save(self) -> None: """Save the entity registry to a file.""" self._store.async_delay_save(self._data_to_save, SAVE_DELAY) @callback def _data_to_save(self): """Return data to save.""" return {"entries": [entry.as_dict() for entry in self._entries]} async def _old_conf_migrator(old_config): """Migrate the pre-0.73 config format to the latest version.""" return {"entries": old_config} class ConfigFlow(data_entry_flow.FlowHandler): """Base class for config flows with some helpers.""" def __init_subclass__(cls, domain=None, **kwargs): """Initialize a subclass, register if possible.""" super().__init_subclass__(**kwargs) # type: ignore if domain is not None: HANDLERS.register(domain)(cls) CONNECTION_CLASS = CONN_CLASS_UNKNOWN @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" raise data_entry_flow.UnknownHandler @callback def _async_current_entries(self): """Return current entries.""" return self.hass.config_entries.async_entries(self.handler) @callback def _async_in_progress(self): """Return other in progress flows for current domain.""" return [ flw for flw in self.hass.config_entries.flow.async_progress() if flw["handler"] == self.handler and flw["flow_id"] != self.flow_id ] class OptionsFlowManager: """Flow to set options for a configuration entry.""" def __init__(self, hass: HomeAssistant) -> None: """Initialize the options manager.""" self.hass = hass self.flow = data_entry_flow.FlowManager( hass, self._async_create_flow, self._async_finish_flow ) async def _async_create_flow(self, entry_id, *, context, data): """Create an options flow for a config entry. Entry_id and flow.handler is the same thing to map entry with flow. """ entry = self.hass.config_entries.async_get_entry(entry_id) if entry is None: return if entry.domain not in HANDLERS: raise data_entry_flow.UnknownHandler flow = HANDLERS[entry.domain].async_get_options_flow(entry) return flow async def _async_finish_flow(self, flow, result): """Finish an options flow and update options for configuration entry. Flow.handler and entry_id is the same thing to map flow with entry. """ entry = self.hass.config_entries.async_get_entry(flow.handler) if entry is None: return self.hass.config_entries.async_update_entry(entry, options=result["data"]) result["result"] = True return result class OptionsFlow(data_entry_flow.FlowHandler): """Base class for config option flows.""" pass @attr.s(slots=True) class SystemOptions: """Config entry system options.""" disable_new_entities = attr.ib(type=bool, default=False) def update(self, *, disable_new_entities): """Update properties.""" self.disable_new_entities = disable_new_entities def as_dict(self): """Return dictionary version of this config entrys system options.""" return {"disable_new_entities": self.disable_new_entities} class EntityRegistryDisabledHandler: """Handler to handle when entities related to config entries updating disabled_by.""" RELOAD_AFTER_UPDATE_DELAY = 30 def __init__(self, hass: HomeAssistant) -> None: """Initialize the handler.""" self.hass = hass self.registry: Optional[entity_registry.EntityRegistry] = None self.changed: Set[str] = set() self._remove_call_later: Optional[Callable[[], None]] = None @callback def async_setup(self) -> None: """Set up the disable handler.""" self.hass.bus.async_listen( entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, self._handle_entry_updated ) async def _handle_entry_updated(self, event): """Handle entity registry entry update.""" if ( event.data["action"] != "update" or "disabled_by" not in event.data["changes"] ): return if self.registry is None: self.registry = await entity_registry.async_get_registry(self.hass) entity_entry = self.registry.async_get(event.data["entity_id"]) if ( # Stop if no entry found entity_entry is None # Stop if entry not connected to config entry or entity_entry.config_entry_id is None # Stop if the entry got disabled. In that case the entity handles it # themselves. or entity_entry.disabled_by ): return config_entry = self.hass.config_entries.async_get_entry( entity_entry.config_entry_id ) if config_entry.entry_id not in self.changed and await support_entry_unload( self.hass, config_entry.domain ): self.changed.add(config_entry.entry_id) if not self.changed: return # We are going to delay reloading on *every* entity registry change so that # if a user is happily clicking along, it will only reload at the end. if self._remove_call_later: self._remove_call_later() self._remove_call_later = self.hass.helpers.event.async_call_later( self.RELOAD_AFTER_UPDATE_DELAY, self._handle_reload ) async def _handle_reload(self, _now): """Handle a reload.""" self._remove_call_later = None to_reload = self.changed self.changed = set() _LOGGER.info( "Reloading config entries because disabled_by changed in entity registry: %s", ", ".join(self.changed), ) await asyncio.gather( *[self.hass.config_entries.async_reload(entry_id) for entry_id in to_reload] ) async def support_entry_unload(hass: HomeAssistant, domain: str) -> bool: """Test if a domain supports entry unloading.""" integration = await loader.async_get_integration(hass, domain) component = integration.get_component() return hasattr(component, "async_unload_entry")
"""The tests the for GPSLogger device tracker platform.""" from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components import gpslogger, zone from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN, TRACKER_UPDATE from homeassistant.const import ( HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.helpers.dispatcher import DATA_DISPATCHER from homeassistant.setup import async_setup_component HOME_LATITUDE = 37.239622 HOME_LONGITUDE = -115.815811 # pylint: disable=redefined-outer-name @pytest.fixture(autouse=True) def mock_dev_track(mock_device_tracker_conf): """Mock device tracker config loading.""" pass @pytest.fixture async def gpslogger_client(loop, hass, aiohttp_client): """Mock client for GPSLogger (unauthenticated).""" assert await async_setup_component(hass, "persistent_notification", {}) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}}) await hass.async_block_till_done() with patch("homeassistant.components.device_tracker.legacy.update_config"): return await aiohttp_client(hass.http.app) @pytest.fixture(autouse=True) async def setup_zones(loop, hass): """Set up Zone config in HA.""" assert await async_setup_component( hass, zone.DOMAIN, { "zone": { "name": "Home", "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "radius": 100, } }, ) await hass.async_block_till_done() @pytest.fixture async def webhook_id(hass, gpslogger_client): """Initialize the GPSLogger component and get the webhook_id.""" hass.config.api = Mock(base_url="http://example.com") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() return result["result"].data["webhook_id"] async def test_missing_data(hass, gpslogger_client, webhook_id): """Test missing data.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": 1.0, "longitude": 1.1, "device": "123"} # No data req = await gpslogger_client.post(url) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No latitude copy = data.copy() del copy["latitude"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY # No device copy = data.copy() del copy["device"] req = await gpslogger_client.post(url, data=copy) await hass.async_block_till_done() assert req.status == HTTP_UNPROCESSABLE_ENTITY async def test_enter_and_exit(hass, gpslogger_client, webhook_id): """Test when there is a known zone.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name # Enter Home again req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name data["longitude"] = 0 data["latitude"] = 0 # Enter Somewhere else req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_NOT_HOME == state_name dev_reg = await hass.helpers.device_registry.async_get_registry() assert len(dev_reg.devices) == 1 ent_reg = await hass.helpers.entity_registry.async_get_registry() assert len(ent_reg.entities) == 1 async def test_enter_with_attrs(hass, gpslogger_client, webhook_id): """Test when additional attributes are present.""" url = "/api/webhook/{}".format(webhook_id) data = { "latitude": 1.0, "longitude": 1.1, "device": "123", "accuracy": 10.5, "battery": 10, "speed": 100, "direction": 105.32, "altitude": 102, "provider": "gps", "activity": "running", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_NOT_HOME assert state.attributes["gps_accuracy"] == 10.5 assert state.attributes["battery_level"] == 10.0 assert state.attributes["speed"] == 100.0 assert state.attributes["direction"] == 105.32 assert state.attributes["altitude"] == 102.0 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "running" data = { "latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123", "accuracy": 123, "battery": 23, "speed": 23, "direction": 123, "altitude": 123, "provider": "gps", "activity": "idle", } req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])) assert state.state == STATE_HOME assert state.attributes["gps_accuracy"] == 123 assert state.attributes["battery_level"] == 23 assert state.attributes["speed"] == 23 assert state.attributes["direction"] == 123 assert state.attributes["altitude"] == 123 assert state.attributes["provider"] == "gps" assert state.attributes["activity"] == "idle" @pytest.mark.xfail( reason="The device_tracker component does not support unloading yet." ) async def test_load_unload_entry(hass, gpslogger_client, webhook_id): """Test that the appropriate dispatch signals are added and removed.""" url = "/api/webhook/{}".format(webhook_id) data = {"latitude": HOME_LATITUDE, "longitude": HOME_LONGITUDE, "device": "123"} # Enter the Home req = await gpslogger_client.post(url, data=data) await hass.async_block_till_done() assert req.status == HTTP_OK state_name = hass.states.get( "{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]) ).state assert STATE_HOME == state_name assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1 entry = hass.config_entries.async_entries(DOMAIN)[0] assert await gpslogger.async_unload_entry(hass, entry) await hass.async_block_till_done() assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
Cinntax/home-assistant
tests/components/gpslogger/test_init.py
homeassistant/config_entries.py
from __future__ import absolute_import import textwrap def _create_test_package_submodule(env): env.scratch_path.join("version_pkg_submodule").mkdir() submodule_path = env.scratch_path / 'version_pkg_submodule' env.run('touch', 'testfile', cwd=submodule_path) env.run('git', 'init', cwd=submodule_path) env.run('git', 'add', '.', cwd=submodule_path) env.run('git', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-am', 'initial version / submodule', cwd=submodule_path) return submodule_path def _change_test_package_submodule(env, submodule_path): submodule_path.join("testfile").write("this is a changed file") submodule_path.join("testfile2").write("this is an added file") env.run('git', 'add', '.', cwd=submodule_path) env.run('git', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-am', 'submodule change', cwd=submodule_path) def _pull_in_submodule_changes_to_module(env, module_path): env.run( 'git', 'pull', '-q', 'origin', 'master', cwd=module_path / 'testpkg/static/', ) env.run('git', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-am', 'submodule change', cwd=module_path) def _create_test_package_with_submodule(env): env.scratch_path.join("version_pkg").mkdir() version_pkg_path = env.scratch_path / 'version_pkg' version_pkg_path.join("testpkg").mkdir() pkg_path = version_pkg_path / 'testpkg' pkg_path.join("__init__.py").write("# hello there") pkg_path.join("version_pkg.py").write(textwrap.dedent('''\ def main(): print('0.1') ''')) version_pkg_path.join("setup.py").write(textwrap.dedent('''\ from setuptools import setup, find_packages setup(name='version_pkg', version='0.1', packages=find_packages(), ) ''')) env.run('git', 'init', cwd=version_pkg_path, expect_error=True) env.run('git', 'add', '.', cwd=version_pkg_path, expect_error=True) env.run('git', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-am', 'initial version', cwd=version_pkg_path, expect_error=True) submodule_path = _create_test_package_submodule(env) env.run( 'git', 'submodule', 'add', submodule_path, 'testpkg/static', cwd=version_pkg_path, expect_error=True, ) env.run('git', 'commit', '-q', '--author', 'pip <pypa-dev@googlegroups.com>', '-am', 'initial version w submodule', cwd=version_pkg_path, expect_error=True) return version_pkg_path, submodule_path
import os import textwrap import glob from os.path import join, curdir, pardir import pytest from pip.utils import rmtree from tests.lib import pyversion from tests.lib.local_repos import local_checkout from tests.lib.path import Path def test_without_setuptools(script): script.run("pip", "uninstall", "setuptools", "-y") result = script.run( "python", "-c", "import pip; pip.main(['install', 'INITools==0.2', '--no-use-wheel'])", expect_error=True, ) assert ( "setuptools must be installed to install from a source distribution" in result.stdout ) def test_pip_second_command_line_interface_works(script): """ Check if ``pip<PYVERSION>`` commands behaves equally """ args = ['pip%s' % pyversion] args.extend(['install', 'INITools==0.2']) result = script.run(*args) egg_info_folder = ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'initools' assert egg_info_folder in result.files_created, str(result) assert initools_folder in result.files_created, str(result) def test_install_from_pypi(script): """ Test installing a package from PyPI. """ result = script.pip('install', '-vvv', 'INITools==0.2') egg_info_folder = ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'initools' assert egg_info_folder in result.files_created, str(result) assert initools_folder in result.files_created, str(result) def test_editable_install(script): """ Test editable installation. """ result = script.pip('install', '-e', 'INITools==0.2', expect_error=True) assert ( "INITools==0.2 should either be a path to a local project or a VCS url" in result.stdout ) assert not result.files_created assert not result.files_updated def test_install_editable_from_svn(script, tmpdir): """ Test checking out from svn. """ result = script.pip( 'install', '-e', '%s#egg=initools-dev' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache") ) ) result.assert_installed('INITools', with_files=['.svn']) def test_download_editable_to_custom_path(script, tmpdir): """ Test downloading an editable using a relative custom src folder. """ script.scratch_path.join("customdl").mkdir() result = script.pip( 'install', '-e', '%s#egg=initools-dev' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache") ), '--src', 'customsrc', '--download', 'customdl', ) customsrc = Path('scratch') / 'customsrc' / 'initools' assert customsrc in result.files_created, ( sorted(result.files_created.keys()) ) assert customsrc / 'setup.py' in result.files_created, ( sorted(result.files_created.keys()) ) customdl = Path('scratch') / 'customdl' / 'initools' customdl_files_created = [ filename for filename in result.files_created if filename.startswith(customdl) ] assert customdl_files_created def test_editable_no_install_followed_by_no_download(script, tmpdir): """ Test installing an editable in two steps (first with --no-install, then with --no-download). """ result = script.pip( 'install', '-e', '%s#egg=initools-dev' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ), '--no-install', expect_error=True, ) result.assert_installed( 'INITools', without_egg_link=True, with_files=['.svn'], ) result = script.pip( 'install', '-e', '%s#egg=initools-dev' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ), '--no-download', expect_error=True, ) result.assert_installed('INITools', without_files=[curdir, '.svn']) def test_no_install_followed_by_no_download(script): """ Test installing in two steps (first with --no-install, then with --no-download). """ egg_info_folder = ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion ) initools_folder = script.site_packages / 'initools' build_dir = script.venv / 'build' / 'INITools' result1 = script.pip( 'install', 'INITools==0.2', '--no-install', expect_error=True, ) assert egg_info_folder not in result1.files_created, str(result1) assert initools_folder not in result1.files_created, ( sorted(result1.files_created) ) assert build_dir in result1.files_created, result1.files_created assert build_dir / 'INITools.egg-info' in result1.files_created result2 = script.pip( 'install', 'INITools==0.2', '--no-download', expect_error=True, ) assert egg_info_folder in result2.files_created, str(result2) assert initools_folder in result2.files_created, ( sorted(result2.files_created) ) assert build_dir not in result2.files_created assert build_dir / 'INITools.egg-info' not in result2.files_created def test_bad_install_with_no_download(script): """ Test that --no-download behaves sensibly if the package source can't be found. """ result = script.pip( 'install', 'INITools==0.2', '--no-download', expect_error=True, ) assert ( "perhaps --no-download was used without first running " "an equivalent install with --no-install?" in result.stdout ) def test_install_dev_version_from_pypi(script): """ Test using package==dev. """ result = script.pip( 'install', 'INITools==dev', '--allow-external', 'INITools', '--allow-unverified', 'INITools', expect_error=True, ) assert (script.site_packages / 'initools') in result.files_created, ( str(result.stdout) ) def test_install_editable_from_git(script, tmpdir): """ Test cloning from Git. """ args = ['install'] args.extend([ '-e', '%s#egg=pip-test-package' % local_checkout( 'git+http://github.com/pypa/pip-test-package.git', tmpdir.join("cache"), ), ]) result = script.pip(*args, **{"expect_error": True}) result.assert_installed('pip-test-package', with_files=['.git']) def test_install_editable_from_hg(script, tmpdir): """ Test cloning from Mercurial. """ result = script.pip( 'install', '-e', '%s#egg=ScriptTest' % local_checkout( 'hg+https://bitbucket.org/ianb/scripttest', tmpdir.join("cache"), ), expect_error=True, ) result.assert_installed('ScriptTest', with_files=['.hg']) def test_vcs_url_final_slash_normalization(script, tmpdir): """ Test that presence or absence of final slash in VCS URL is normalized. """ script.pip( 'install', '-e', '%s/#egg=ScriptTest' % local_checkout( 'hg+https://bitbucket.org/ianb/scripttest', tmpdir.join("cache"), ), ) def test_install_editable_from_bazaar(script, tmpdir): """ Test checking out from Bazaar. """ result = script.pip( 'install', '-e', '%s/@174#egg=django-wikiapp' % local_checkout( 'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp' '/release-0.1', tmpdir.join("cache"), ), expect_error=True, ) result.assert_installed('django-wikiapp', with_files=['.bzr']) def test_vcs_url_urlquote_normalization(script, tmpdir): """ Test that urlquoted characters are normalized for repo URL comparison. """ script.pip( 'install', '-e', '%s/#egg=django-wikiapp' % local_checkout( 'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp' '/release-0.1', tmpdir.join("cache"), ), ) def test_install_from_local_directory(script, data): """ Test installing from a local directory. """ to_install = data.packages.join("FSPkg") result = script.pip('install', to_install, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info_folder = ( script.site_packages / 'FSPkg-0.1dev-py%s.egg-info' % pyversion ) assert fspkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_from_local_directory_with_symlinks_to_directories( script, data): """ Test installing from a local directory containing symlinks to directories. """ to_install = data.packages.join("symlinks") result = script.pip('install', to_install, expect_error=False) pkg_folder = script.site_packages / 'symlinks' egg_info_folder = ( script.site_packages / 'symlinks-0.1dev-py%s.egg-info' % pyversion ) assert pkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_from_local_directory_with_no_setup_py(script, data): """ Test installing from a local directory with no 'setup.py'. """ result = script.pip('install', data.root, expect_error=True) assert not result.files_created assert "is not installable. File 'setup.py' not found." in result.stdout def test_editable_install_from_local_directory_with_no_setup_py(script, data): """ Test installing from a local directory with no 'setup.py'. """ result = script.pip('install', '-e', data.root, expect_error=True) assert not result.files_created assert "is not installable. File 'setup.py' not found." in result.stdout def test_install_as_egg(script, data): """ Test installing as egg, instead of flat install. """ to_install = data.packages.join("FSPkg") result = script.pip('install', to_install, '--egg', expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_folder = script.site_packages / 'FSPkg-0.1dev-py%s.egg' % pyversion assert fspkg_folder not in result.files_created, str(result.stdout) assert egg_folder in result.files_created, str(result) assert join(egg_folder, 'fspkg') in result.files_created, str(result) def test_install_curdir(script, data): """ Test installing current directory ('.'). """ run_from = data.packages.join("FSPkg") # Python 2.4 Windows balks if this exists already egg_info = join(run_from, "FSPkg.egg-info") if os.path.isdir(egg_info): rmtree(egg_info) result = script.pip('install', curdir, cwd=run_from, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info_folder = ( script.site_packages / 'FSPkg-0.1dev-py%s.egg-info' % pyversion ) assert fspkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_pardir(script, data): """ Test installing parent directory ('..'). """ run_from = data.packages.join("FSPkg", "fspkg") result = script.pip('install', pardir, cwd=run_from, expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_info_folder = ( script.site_packages / 'FSPkg-0.1dev-py%s.egg-info' % pyversion ) assert fspkg_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_global_option(script): """ Test using global distutils options. (In particular those that disable the actual install action) """ result = script.pip( 'install', '--global-option=--version', "INITools==0.1", ) assert '0.1\n' in result.stdout def test_install_with_pax_header(script, data): """ test installing from a tarball with pax header for python<2.6 """ script.pip('install', 'paxpkg.tar.bz2', cwd=data.packages) def test_install_with_hacked_egg_info(script, data): """ test installing a package which defines its own egg_info class """ run_from = data.packages.join("HackedEggInfo") result = script.pip('install', '.', cwd=run_from) assert 'Successfully installed hackedegginfo\n' in result.stdout def test_install_using_install_option_and_editable(script, tmpdir): """ Test installing a tool using -e and --install-option """ folder = 'script_folder' script.scratch_path.join(folder).mkdir() url = 'git+git://github.com/pypa/pip-test-package' result = script.pip( 'install', '-e', '%s#egg=pip-test-package' % local_checkout(url, tmpdir.join("cache")), '--install-option=--script-dir=%s' % folder ) script_file = ( script.venv / 'src' / 'pip-test-package' / folder / 'pip-test-package' + script.exe ) assert script_file in result.files_created def test_install_global_option_using_editable(script, tmpdir): """ Test using global distutils options, but in an editable installation """ url = 'hg+http://bitbucket.org/runeh/anyjson' result = script.pip( 'install', '--global-option=--version', '-e', '%s@0.2.5#egg=anyjson' % local_checkout(url, tmpdir.join("cache")) ) assert '0.2.5\n' in result.stdout def test_install_package_with_same_name_in_curdir(script): """ Test installing a package with the same name of a local folder """ script.scratch_path.join("mock==0.6").mkdir() result = script.pip('install', 'mock==0.6') egg_folder = script.site_packages / 'mock-0.6.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) mock100_setup_py = textwrap.dedent('''\ from setuptools import setup setup(name='mock', version='100.1')''') def test_install_folder_using_dot_slash(script): """ Test installing a folder using pip install ./foldername """ script.scratch_path.join("mock").mkdir() pkg_path = script.scratch_path / 'mock' pkg_path.join("setup.py").write(mock100_setup_py) result = script.pip('install', './mock') egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_install_folder_using_slash_in_the_end(script): r""" Test installing a folder using pip install foldername/ or foldername\ """ script.scratch_path.join("mock").mkdir() pkg_path = script.scratch_path / 'mock' pkg_path.join("setup.py").write(mock100_setup_py) result = script.pip('install', 'mock' + os.path.sep) egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_install_folder_using_relative_path(script): """ Test installing a folder using pip install folder1/folder2 """ script.scratch_path.join("initools").mkdir() script.scratch_path.join("initools", "mock").mkdir() pkg_path = script.scratch_path / 'initools' / 'mock' pkg_path.join("setup.py").write(mock100_setup_py) result = script.pip('install', Path('initools') / 'mock') egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_install_package_which_contains_dev_in_name(script): """ Test installing package from pypi which contains 'dev' in name """ result = script.pip('install', 'django-devserver==0.0.4') devserver_folder = script.site_packages / 'devserver' egg_info_folder = ( script.site_packages / 'django_devserver-0.0.4-py%s.egg-info' % pyversion ) assert devserver_folder in result.files_created, str(result.stdout) assert egg_info_folder in result.files_created, str(result) def test_install_package_with_target(script): """ Test installing a package using pip install --target """ target_dir = script.scratch_path / 'target' result = script.pip('install', '-t', target_dir, "initools==0.1") assert Path('scratch') / 'target' / 'initools' in result.files_created, ( str(result) ) # Test repeated call without --upgrade, no files should have changed result = script.pip('install', '-t', target_dir, "initools==0.1") assert not Path('scratch') / 'target' / 'initools' in result.files_updated # Test upgrade call, check that new version is installed result = script.pip('install', '--upgrade', '-t', target_dir, "initools==0.2") assert Path('scratch') / 'target' / 'initools' in result.files_updated, ( str(result) ) egg_folder = ( Path('scratch') / 'target' / 'INITools-0.2-py%s.egg-info' % pyversion) assert egg_folder in result.files_created, ( str(result) ) # Test install and upgrade of single-module package result = script.pip('install', '-t', target_dir, 'six') assert Path('scratch') / 'target' / 'six.py' in result.files_created, ( str(result) ) result = script.pip('install', '-t', target_dir, '--upgrade', 'six') assert Path('scratch') / 'target' / 'six.py' in result.files_updated, ( str(result) ) def test_install_package_with_root(script, data): """ Test installing a package using pip install --root """ root_dir = script.scratch_path / 'root' result = script.pip( 'install', '--root', root_dir, '-f', data.find_links, '--no-index', 'simple==1.0', ) normal_install_path = ( script.base_path / script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion ) # use distutils to change the root exactly how the --root option does it from distutils.util import change_root root_path = change_root( os.path.join(script.scratch, 'root'), normal_install_path ) assert root_path in result.files_created, str(result) # skip on win/py3 for now, see issue #782 @pytest.mark.skipif("sys.platform == 'win32' and sys.version_info >= (3,)") def test_install_package_that_emits_unicode(script, data): """ Install a package with a setup.py that emits UTF-8 output and then fails. Refs https://github.com/pypa/pip/issues/326 """ to_install = data.packages.join("BrokenEmitsUTF8") result = script.pip( 'install', to_install, expect_error=True, expect_temp=True, quiet=True, ) assert ( 'FakeError: this package designed to fail on install' in result.stdout ) assert 'UnicodeDecodeError' not in result.stdout def test_install_package_with_utf8_setup(script, data): """Install a package with a setup.py that declares a utf-8 encoding.""" to_install = data.packages.join("SetupPyUTF8") script.pip('install', to_install) def test_install_package_with_latin1_setup(script, data): """Install a package with a setup.py that declares a latin-1 encoding.""" to_install = data.packages.join("SetupPyLatin1") script.pip('install', to_install) def test_url_req_case_mismatch_no_index(script, data): """ tar ball url requirements (with no egg fragment), that happen to have upper case project names, should be considered equal to later requirements that reference the project name using lower case. tests/packages contains Upper-1.0.tar.gz and Upper-2.0.tar.gz 'requiresupper' has install_requires = ['upper'] """ Upper = os.path.join(data.find_links, 'Upper-1.0.tar.gz') result = script.pip( 'install', '--no-index', '-f', data.find_links, Upper, 'requiresupper' ) # only Upper-1.0.tar.gz should get installed. egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) def test_url_req_case_mismatch_file_index(script, data): """ tar ball url requirements (with no egg fragment), that happen to have upper case project names, should be considered equal to later requirements that reference the project name using lower case. tests/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz 'requiredinner' has install_requires = ['dinner'] This test is similar to test_url_req_case_mismatch_no_index; that test tests behaviour when using "--no-index -f", while this one does the same test when using "--index-url". Unfortunately this requires a different set of packages as it requires a prepared index.html file and subdirectory-per-package structure. """ Dinner = os.path.join(data.find_links3, 'Dinner', 'Dinner-1.0.tar.gz') result = script.pip( 'install', '--index-url', data.find_links3, Dinner, 'requiredinner' ) # only Upper-1.0.tar.gz should get installed. egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) def test_url_incorrect_case_no_index(script, data): """ Same as test_url_req_case_mismatch_no_index, except testing for the case where the incorrect case is given in the name of the package to install rather than in a requirements file. """ result = script.pip( 'install', '--no-index', '-f', data.find_links, "upper", ) # only Upper-2.0.tar.gz should get installed. egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_url_incorrect_case_file_index(script, data): """ Same as test_url_req_case_mismatch_file_index, except testing for the case where the incorrect case is given in the name of the package to install rather than in a requirements file. """ result = script.pip( 'install', '--index-url', data.find_links3, "dinner", ) # only Upper-2.0.tar.gz should get installed. egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion assert egg_folder not in result.files_created, str(result) egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion assert egg_folder in result.files_created, str(result) def test_compiles_pyc(script): """ Test installing with --compile on """ del script.environ["PYTHONDONTWRITEBYTECODE"] script.pip("install", "--compile", "--no-use-wheel", "INITools==0.2") # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "initools/__init__.pyc"), ] exists += glob.glob( script.site_packages_path / "initools/__pycache__/__init__*.pyc" ) assert any(exists) def test_no_compiles_pyc(script, data): """ Test installing from wheel with --compile on """ del script.environ["PYTHONDONTWRITEBYTECODE"] script.pip("install", "--no-compile", "--no-use-wheel", "INITools==0.2") # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "initools/__init__.pyc"), ] exists += glob.glob( script.site_packages_path / "initools/__pycache__/__init__*.pyc" ) assert not any(exists)
cjerdonek/pip
tests/functional/test_install.py
tests/lib/git_submodule_helpers.py
# Copyright (c) 2008, Aldo Cortesi. All rights reserved. # Copyright (c) 2017, Dirk Hartmann. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from .base import _SimpleLayoutBase class Max(_SimpleLayoutBase): """Maximized layout A simple layout that only displays one window at a time, filling the screen. This is suitable for use on laptops and other devices with small screens. Conceptually, the windows are managed as a stack, with commands to switch to next and previous windows in the stack. """ defaults = [("name", "max", "Name of this layout.")] def __init__(self, **config): _SimpleLayoutBase.__init__(self, **config) self.add_defaults(Max.defaults) def clone(self, group): return _SimpleLayoutBase.clone(self, group) def add(self, client): return self.clients.add(client, 1) def configure(self, client, screen): if self.clients and client is self.clients.current_client: client.place( screen.x, screen.y, screen.width, screen.height, 0, None ) client.unhide() else: client.hide() cmd_previous = _SimpleLayoutBase.previous cmd_next = _SimpleLayoutBase.next cmd_up = cmd_previous cmd_down = cmd_next
# Copyright (c) 2011 Florian Mounier # Copyright (c) 2012 Tycho Andersen # Copyright (c) 2014 Sean Vig # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest import libqtile import libqtile.sh import libqtile.confreader import libqtile.layout import libqtile.manager import libqtile.config class ShConfig(object): keys = [] mouse = [] groups = [ libqtile.config.Group("a"), libqtile.config.Group("b"), ] layouts = [ libqtile.layout.Max(), ] floating_layout = libqtile.layout.floating.Floating() screens = [ libqtile.config.Screen() ] main = None sh_config = pytest.mark.parametrize("qtile", [ShConfig], indirect=True) @sh_config def test_columnize(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) assert qtile.sh.columnize(["one", "two"]) == "one two" qtile.sh.termwidth = 1 assert qtile.sh.columnize(["one", "two"], update_termwidth=False) == "one\ntwo" qtile.sh.termwidth = 15 v = qtile.sh.columnize(["one", "two", "three", "four", "five"], update_termwidth=False) assert v == 'one two \nthree four \nfive ' @sh_config def test_ls(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) qtile.sh.do_cd("layout") qtile.sh.do_ls("") @sh_config def test_findNode(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) n = qtile.sh._findNode(qtile.sh.current, "layout") assert n.path == "layout" assert n.parent n = qtile.sh._findNode(n, "0") assert n.path == "layout[0]" n = qtile.sh._findNode(n, "..") assert n.path == "layout" n = qtile.sh._findNode(n, "0", "..") assert n.path == "layout" n = qtile.sh._findNode(n, "..", "layout", 0) assert n.path == "layout[0]" assert not qtile.sh._findNode(n, "wibble") assert not qtile.sh._findNode(n, "..", "0", "wibble") @sh_config def test_do_cd(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) assert qtile.sh.do_cd("layout") == 'layout' assert qtile.sh.do_cd("0/wibble") == 'No such path.' assert qtile.sh.do_cd("0/") == 'layout[0]' @sh_config def test_call(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) assert qtile.sh._call("status", []) == "OK" v = qtile.sh._call("nonexistent", "") assert "No such command" in v v = qtile.sh._call("status", "(((") assert "Syntax error" in v v = qtile.sh._call("status", "(1)") assert "Command exception" in v @sh_config def test_complete(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) assert qtile.sh._complete("c", "c") == [ "cd", "commands", "critical", ] assert qtile.sh._complete("cd l", "l") == ["layout/"] assert qtile.sh._complete("cd layout/", "layout/") == [ "layout/" + x for x in ["group", "window", "screen", "0"] ] assert qtile.sh._complete("cd layout/", "layout/g") == ["layout/group/"] @sh_config def test_help(qtile): qtile.sh = libqtile.sh.QSh(qtile.c) assert qtile.sh.do_help("nonexistent").startswith("No such command") assert qtile.sh.do_help("help")
kynikos/qtile
test/test_sh.py
libqtile/layout/max.py
"""Monitor Memory on a CFME/Miq appliance and builds report&graphs displaying usage per process.""" import json import os import time import traceback from collections import OrderedDict from datetime import datetime from threading import Thread import yaml from yaycl import AttrDict from cfme.utils.conf import cfme_performance from cfme.utils.log import logger from cfme.utils.path import results_path from cfme.utils.version import current_version from cfme.utils.version import get_version miq_workers = [ 'MiqGenericWorker', 'MiqPriorityWorker', 'MiqScheduleWorker', 'MiqUiWorker', 'MiqWebServiceWorker', 'MiqWebsocketWorker', 'MiqReportingWorker', 'MiqReplicationWorker', 'MiqSmartProxyWorker', 'MiqVimBrokerWorker', 'MiqEmsRefreshCoreWorker', # Refresh Workers: 'ManageIQ::Providers::Microsoft::InfraManager::RefreshWorker', 'ManageIQ::Providers::Openstack::InfraManager::RefreshWorker', 'ManageIQ::Providers::Redhat::InfraManager::RefreshWorker', 'ManageIQ::Providers::Vmware::InfraManager::RefreshWorker', 'MiqEmsRefreshWorkerMicrosoft', # 5.4 'MiqEmsRefreshWorkerRedhat', # 5.4 'MiqEmsRefreshWorkerVmware', # 5.4 'ManageIQ::Providers::Amazon::CloudManager::RefreshWorker', 'ManageIQ::Providers::Azure::CloudManager::RefreshWorker', 'ManageIQ::Providers::Google::CloudManager::RefreshWorker', 'ManageIQ::Providers::Openstack::CloudManager::RefreshWorker', 'MiqEmsRefreshWorkerAmazon', # 5.4 'MiqEmsRefreshWorkerOpenstack', # 5.4 'ManageIQ::Providers::AnsibleTower::ConfigurationManager::RefreshWorker', 'ManageIQ::Providers::Foreman::ConfigurationManager::RefreshWorker', 'ManageIQ::Providers::Foreman::ProvisioningManager::RefreshWorker', 'MiqEmsRefreshWorkerForemanConfiguration', # 5.4 'MiqEmsRefreshWorkerForemanProvisioning', # 5.4 'ManageIQ::Providers::Atomic::ContainerManager::RefreshWorker', 'ManageIQ::Providers::AtomicEnterprise::ContainerManager::RefreshWorker', 'ManageIQ::Providers::Kubernetes::ContainerManager::RefreshWorker', 'ManageIQ::Providers::Openshift::ContainerManager::RefreshWorker', 'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::RefreshWorker', 'ManageIQ::Providers::StorageManager::CinderManager::RefreshWorker', 'ManageIQ::Providers::StorageManager::SwiftManager::RefreshWorker', 'ManageIQ::Providers::Amazon::NetworkManager::RefreshWorker', 'ManageIQ::Providers::Azure::NetworkManager::RefreshWorker', 'ManageIQ::Providers::Google::NetworkManager::RefreshWorker', 'ManageIQ::Providers::Openstack::NetworkManager::RefreshWorker', 'MiqNetappRefreshWorker', 'MiqSmisRefreshWorker', # Event Workers: 'MiqEventHandler', 'ManageIQ::Providers::Openstack::InfraManager::EventCatcher', 'ManageIQ::Providers::StorageManager::CinderManager::EventCatcher', 'ManageIQ::Providers::Redhat::InfraManager::EventCatcher', 'ManageIQ::Providers::Vmware::InfraManager::EventCatcher', 'MiqEventCatcherRedhat', # 5.4 'MiqEventCatcherVmware', # 5.4 'ManageIQ::Providers::Amazon::CloudManager::EventCatcher', 'ManageIQ::Providers::Azure::CloudManager::EventCatcher', 'ManageIQ::Providers::Google::CloudManager::EventCatcher', 'ManageIQ::Providers::Openstack::CloudManager::EventCatcher', 'MiqEventCatcherAmazon', # 5.4 'MiqEventCatcherOpenstack', # 5.4 'ManageIQ::Providers::Atomic::ContainerManager::EventCatcher', 'ManageIQ::Providers::AtomicEnterprise::ContainerManager::EventCatcher', 'ManageIQ::Providers::Kubernetes::ContainerManager::EventCatcher', 'ManageIQ::Providers::Openshift::ContainerManager::EventCatcher', 'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::EventCatcher', 'ManageIQ::Providers::Openstack::NetworkManager::EventCatcher', # Metrics Processor/Collector Workers 'MiqEmsMetricsProcessorWorker', 'ManageIQ::Providers::Openstack::InfraManager::MetricsCollectorWorker', 'ManageIQ::Providers::Redhat::InfraManager::MetricsCollectorWorker', 'ManageIQ::Providers::Vmware::InfraManager::MetricsCollectorWorker', 'MiqEmsMetricsCollectorWorkerRedhat', # 5.4 'MiqEmsMetricsCollectorWorkerVmware', # 5.4 'ManageIQ::Providers::Amazon::CloudManager::MetricsCollectorWorker', 'ManageIQ::Providers::Azure::CloudManager::MetricsCollectorWorker', 'ManageIQ::Providers::Openstack::CloudManager::MetricsCollectorWorker', 'MiqEmsMetricsCollectorWorkerAmazon', # 5.4 'MiqEmsMetricsCollectorWorkerOpenstack', # 5.4 'ManageIQ::Providers::Atomic::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::AtomicEnterprise::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::Kubernetes::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::Openshift::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::Openstack::NetworkManager::MetricsCollectorWorker', 'MiqStorageMetricsCollectorWorker', 'MiqVmdbStorageBridgeWorker'] ruby_processes = list(miq_workers) ruby_processes.extend(['evm:dbsync:replicate', 'MIQ Server (evm_server.rb)', 'evm_watchdog.rb', 'appliance_console.rb']) process_order = list(ruby_processes) process_order.extend(['memcached', 'postgres', 'httpd', 'collectd']) # Timestamp created at first import, thus grouping all reports of like workload test_ts = time.strftime('%Y%m%d%H%M%S') # 10s sample interval (occasionally sampling can take almost 4s on an appliance doing a lot of work) SAMPLE_INTERVAL = 10 class SmemMemoryMonitor(Thread): def __init__(self, ssh_client, scenario_data): super(SmemMemoryMonitor, self).__init__() self.ssh_client = ssh_client self.scenario_data = scenario_data self.grafana_urls = {} self.miq_server_id = '' self.use_slab = False self.signal = True def create_process_result(self, process_results, starttime, process_pid, process_name, memory_by_pid): if process_pid in list(memory_by_pid.keys()): if process_name not in process_results: process_results[process_name] = OrderedDict() process_results[process_name][process_pid] = OrderedDict() if process_pid not in process_results[process_name]: process_results[process_name][process_pid] = OrderedDict() process_results[process_name][process_pid][starttime] = {} rss_mem = memory_by_pid[process_pid]['rss'] pss_mem = memory_by_pid[process_pid]['pss'] uss_mem = memory_by_pid[process_pid]['uss'] vss_mem = memory_by_pid[process_pid]['vss'] swap_mem = memory_by_pid[process_pid]['swap'] process_results[process_name][process_pid][starttime]['rss'] = rss_mem process_results[process_name][process_pid][starttime]['pss'] = pss_mem process_results[process_name][process_pid][starttime]['uss'] = uss_mem process_results[process_name][process_pid][starttime]['vss'] = vss_mem process_results[process_name][process_pid][starttime]['swap'] = swap_mem del memory_by_pid[process_pid] else: logger.warning('Process {} PID, not found: {}'.format(process_name, process_pid)) def get_appliance_memory(self, appliance_results, plottime): # 5.5/5.6 - RHEL 7 / Centos 7 # Application Memory Used : MemTotal - (MemFree + Slab + Cached) # 5.4 - RHEL 6 / Centos 6 # Application Memory Used : MemTotal - (MemFree + Buffers + Cached) # Available memory could potentially be better metric appliance_results[plottime] = {} result = self.ssh_client.run_command('cat /proc/meminfo') if result.failed: logger.error('Exit_status nonzero in get_appliance_memory: {}, {}' .format(result.rc, result.output)) del appliance_results[plottime] else: meminfo_raw = result.output.replace('kB', '').strip() meminfo = OrderedDict((k.strip(), v.strip()) for k, v in (value.strip().split(':') for value in meminfo_raw.split('\n'))) appliance_results[plottime]['total'] = float(meminfo['MemTotal']) / 1024 appliance_results[plottime]['free'] = float(meminfo['MemFree']) / 1024 if 'MemAvailable' in meminfo: # 5.5, RHEL 7/Centos 7 self.use_slab = True mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float( meminfo['Slab']) + float(meminfo['Cached']))) / 1024 else: # 5.4, RHEL 6/Centos 6 mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float( meminfo['Buffers']) + float(meminfo['Cached']))) / 1024 appliance_results[plottime]['used'] = mem_used appliance_results[plottime]['buffers'] = float(meminfo['Buffers']) / 1024 appliance_results[plottime]['cached'] = float(meminfo['Cached']) / 1024 appliance_results[plottime]['slab'] = float(meminfo['Slab']) / 1024 appliance_results[plottime]['swap_total'] = float(meminfo['SwapTotal']) / 1024 appliance_results[plottime]['swap_free'] = float(meminfo['SwapFree']) / 1024 def get_evm_workers(self): result = self.ssh_client.run_command( 'psql -t -q -d vmdb_production -c ' '\"select pid,type from miq_workers where miq_server_id = \'{}\'\"'.format( self.miq_server_id)) if result.output.strip(): workers = {} for worker in result.output.strip().split('\n'): pid_worker = worker.strip().split('|') if len(pid_worker) == 2: workers[pid_worker[0].strip()] = pid_worker[1].strip() else: logger.error('Unexpected output from psql: {}'.format(worker)) return workers else: return {} # Old method of obtaining per process memory (Appliances without smem) # def get_pids_memory(self): # result = self.ssh_client.run_command( # 'ps -A -o pid,rss,vsz,comm,cmd | sed 1d') # pids_memory = result.output.strip().split('\n') # memory_by_pid = {} # for line in pids_memory: # values = [s for s in line.strip().split(' ') if s] # pid = values[0] # memory_by_pid[pid] = {} # memory_by_pid[pid]['rss'] = float(values[1]) / 1024 # memory_by_pid[pid]['vss'] = float(values[2]) / 1024 # memory_by_pid[pid]['name'] = values[3] # memory_by_pid[pid]['cmd'] = ' '.join(values[4:]) # return memory_by_pid def get_miq_server_id(self): # Obtain the Miq Server GUID: result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID') logger.info('Obtained appliance GUID: {}'.format(result.output.strip())) # Get server id: result = self.ssh_client.run_command( 'psql -t -q -d vmdb_production -c "select id from miq_servers where guid = \'{}\'"' ''.format(result.output.strip())) logger.info('Obtained miq_server_id: {}'.format(result.output.strip())) self.miq_server_id = result.output.strip() def get_pids_memory(self): result = self.ssh_client.run_command( 'smem -c \'pid rss pss uss vss swap name command\' | sed 1d') pids_memory = result.output.strip().split('\n') memory_by_pid = {} for line in pids_memory: if line.strip(): try: values = [s for s in line.strip().split(' ') if s] pid = values[0] int(pid) memory_by_pid[pid] = {} memory_by_pid[pid]['rss'] = float(values[1]) / 1024 memory_by_pid[pid]['pss'] = float(values[2]) / 1024 memory_by_pid[pid]['uss'] = float(values[3]) / 1024 memory_by_pid[pid]['vss'] = float(values[4]) / 1024 memory_by_pid[pid]['swap'] = float(values[5]) / 1024 memory_by_pid[pid]['name'] = values[6] memory_by_pid[pid]['cmd'] = ' '.join(values[7:]) except Exception as e: logger.error('Processing smem output error: {}'.format(e.__class__.__name__, e)) logger.error('Issue with pid: {} line: {}'.format(pid, line)) logger.error('Complete smem output: {}'.format(result.output)) return memory_by_pid def _real_run(self): """ Result dictionaries: appliance_results[timestamp][measurement] = value appliance_results[timestamp]['total'] = value appliance_results[timestamp]['free'] = value appliance_results[timestamp]['used'] = value appliance_results[timestamp]['buffers'] = value appliance_results[timestamp]['cached'] = value appliance_results[timestamp]['slab'] = value appliance_results[timestamp]['swap_total'] = value appliance_results[timestamp]['swap_free'] = value appliance measurements: total/free/used/buffers/cached/slab/swap_total/swap_free process_results[name][pid][timestamp][measurement] = value process_results[name][pid][timestamp]['rss'] = value process_results[name][pid][timestamp]['pss'] = value process_results[name][pid][timestamp]['uss'] = value process_results[name][pid][timestamp]['vss'] = value process_results[name][pid][timestamp]['swap'] = value """ appliance_results = OrderedDict() process_results = OrderedDict() install_smem(self.ssh_client) self.get_miq_server_id() logger.info('Starting Monitoring Thread.') while self.signal: starttime = time.time() plottime = datetime.now() self.get_appliance_memory(appliance_results, plottime) workers = self.get_evm_workers() memory_by_pid = self.get_pids_memory() for worker_pid in workers: self.create_process_result(process_results, plottime, worker_pid, workers[worker_pid], memory_by_pid) for pid in sorted(memory_by_pid.keys()): if memory_by_pid[pid]['name'] == 'httpd': self.create_process_result(process_results, plottime, pid, 'httpd', memory_by_pid) elif memory_by_pid[pid]['name'] == 'postgres': self.create_process_result(process_results, plottime, pid, 'postgres', memory_by_pid) elif memory_by_pid[pid]['name'] == 'postmaster': self.create_process_result(process_results, plottime, pid, 'postgres', memory_by_pid) elif memory_by_pid[pid]['name'] == 'memcached': self.create_process_result(process_results, plottime, pid, 'memcached', memory_by_pid) elif memory_by_pid[pid]['name'] == 'collectd': self.create_process_result(process_results, plottime, pid, 'collectd', memory_by_pid) elif memory_by_pid[pid]['name'] == 'ruby': if 'evm_server.rb' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'MIQ Server (evm_server.rb)', memory_by_pid) elif 'MIQ Server' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'MIQ Server (evm_server.rb)', memory_by_pid) elif 'evm_watchdog.rb' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'evm_watchdog.rb', memory_by_pid) elif 'appliance_console.rb' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'appliance_console.rb', memory_by_pid) elif 'evm:dbsync:replicate' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'evm:dbsync:replicate', memory_by_pid) else: logger.debug('Unaccounted for ruby pid: {}'.format(pid)) timediff = time.time() - starttime logger.debug('Monitoring sampled in {}s'.format(round(timediff, 4))) # Sleep Monitoring interval # Roughly 10s samples, accounts for collection of memory measurements time_to_sleep = abs(SAMPLE_INTERVAL - timediff) time.sleep(time_to_sleep) logger.info('Monitoring CFME Memory Terminating') create_report(self.scenario_data, appliance_results, process_results, self.use_slab, self.grafana_urls) def run(self): try: self._real_run() except Exception as e: logger.error('Error in Monitoring Thread: {}'.format(e)) logger.error('{}'.format(traceback.format_exc())) def install_smem(ssh_client): # smem is included by default in 5.6 appliances logger.info('Installing smem.') ver = get_version() if ver == '55': ssh_client.run_command('rpm -i {}'.format(cfme_performance['tools']['rpms']['epel7_rpm'])) ssh_client.run_command('yum install -y smem') # Patch smem to display longer command line names logger.info('Patching smem') ssh_client.run_command(r'sed -i s/\.27s/\.200s/g /usr/bin/smem') def create_report(scenario_data, appliance_results, process_results, use_slab, grafana_urls): logger.info('Creating Memory Monitoring Report.') ver = current_version() provider_names = 'No Providers' if 'providers' in scenario_data['scenario']: provider_names = ', '.join(scenario_data['scenario']['providers']) workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver)) if not os.path.exists(str(workload_path)): os.makedirs(str(workload_path)) scenario_path = workload_path.join(scenario_data['scenario']['name']) if os.path.exists(str(scenario_path)): logger.warning('Duplicate Workload-Scenario Name: {}'.format(scenario_path)) scenario_path = workload_path.join('{}-{}'.format(time.strftime('%Y%m%d%H%M%S'), scenario_data['scenario']['name'])) logger.warning('Using: {}'.format(scenario_path)) os.mkdir(str(scenario_path)) mem_graphs_path = scenario_path.join('graphs') if not os.path.exists(str(mem_graphs_path)): os.mkdir(str(mem_graphs_path)) mem_rawdata_path = scenario_path.join('rawdata') if not os.path.exists(str(mem_rawdata_path)): os.mkdir(str(mem_rawdata_path)) graph_appliance_measurements(mem_graphs_path, ver, appliance_results, use_slab, provider_names) graph_individual_process_measurements(mem_graphs_path, process_results, provider_names) graph_same_miq_workers(mem_graphs_path, process_results, provider_names) graph_all_miq_workers(mem_graphs_path, process_results, provider_names) # Dump scenario Yaml: with open(str(scenario_path.join('scenario.yml')), 'w') as scenario_file: yaml.safe_dump(dict(scenario_data['scenario']), scenario_file, default_flow_style=False) generate_summary_csv(scenario_path.join('{}-summary.csv'.format(ver)), appliance_results, process_results, provider_names, ver) generate_raw_data_csv(mem_rawdata_path, appliance_results, process_results) generate_summary_html(scenario_path, ver, appliance_results, process_results, scenario_data, provider_names, grafana_urls) generate_workload_html(scenario_path, ver, scenario_data, provider_names, grafana_urls) logger.info('Finished Creating Report') def compile_per_process_results(procs_to_compile, process_results, ts_end): alive_pids = 0 recycled_pids = 0 total_running_rss = 0 total_running_pss = 0 total_running_uss = 0 total_running_vss = 0 total_running_swap = 0 for process in procs_to_compile: if process in process_results: for pid in process_results[process]: if ts_end in process_results[process][pid]: alive_pids += 1 total_running_rss += process_results[process][pid][ts_end]['rss'] total_running_pss += process_results[process][pid][ts_end]['pss'] total_running_uss += process_results[process][pid][ts_end]['uss'] total_running_vss += process_results[process][pid][ts_end]['vss'] total_running_swap += process_results[process][pid][ts_end]['swap'] else: recycled_pids += 1 return alive_pids, recycled_pids, total_running_rss, total_running_pss, total_running_uss, \ total_running_vss, total_running_swap def generate_raw_data_csv(directory, appliance_results, process_results): starttime = time.time() file_name = str(directory.join('appliance.csv')) with open(file_name, 'w') as csv_file: csv_file.write('TimeStamp,Total,Free,Used,Buffers,Cached,Slab,Swap_Total,Swap_Free\n') for ts in appliance_results: csv_file.write('{},{},{},{},{},{},{},{},{}\n'.format(ts, appliance_results[ts]['total'], appliance_results[ts]['free'], appliance_results[ts]['used'], appliance_results[ts]['buffers'], appliance_results[ts]['cached'], appliance_results[ts]['slab'], appliance_results[ts]['swap_total'], appliance_results[ts]['swap_free'])) for process_name in process_results: for process_pid in process_results[process_name]: file_name = str(directory.join('{}-{}.csv'.format(process_pid, process_name))) with open(file_name, 'w') as csv_file: csv_file.write('TimeStamp,RSS,PSS,USS,VSS,SWAP\n') for ts in process_results[process_name][process_pid]: csv_file.write('{},{},{},{},{},{}\n'.format(ts, process_results[process_name][process_pid][ts]['rss'], process_results[process_name][process_pid][ts]['pss'], process_results[process_name][process_pid][ts]['uss'], process_results[process_name][process_pid][ts]['vss'], process_results[process_name][process_pid][ts]['swap'])) timediff = time.time() - starttime logger.info('Generated Raw Data CSVs in: {}'.format(timediff)) def generate_summary_csv(file_name, appliance_results, process_results, provider_names, version_string): starttime = time.time() with open(str(file_name), 'w') as csv_file: csv_file.write('Version: {}, Provider(s): {}\n'.format(version_string, provider_names)) csv_file.write('Measurement,Start of test,End of test\n') start = list(appliance_results.keys())[0] end = list(appliance_results.keys())[-1] csv_file.write('Appliance Total Memory,{},{}\n'.format( round(appliance_results[start]['total'], 2), round(appliance_results[end]['total'], 2))) csv_file.write('Appliance Free Memory,{},{}\n'.format( round(appliance_results[start]['free'], 2), round(appliance_results[end]['free'], 2))) csv_file.write('Appliance Used Memory,{},{}\n'.format( round(appliance_results[start]['used'], 2), round(appliance_results[end]['used'], 2))) csv_file.write('Appliance Buffers,{},{}\n'.format( round(appliance_results[start]['buffers'], 2), round(appliance_results[end]['buffers'], 2))) csv_file.write('Appliance Cached,{},{}\n'.format( round(appliance_results[start]['cached'], 2), round(appliance_results[end]['cached'], 2))) csv_file.write('Appliance Slab,{},{}\n'.format( round(appliance_results[start]['slab'], 2), round(appliance_results[end]['slab'], 2))) csv_file.write('Appliance Total Swap,{},{}\n'.format( round(appliance_results[start]['swap_total'], 2), round(appliance_results[end]['swap_total'], 2))) csv_file.write('Appliance Free Swap,{},{}\n'.format( round(appliance_results[start]['swap_free'], 2), round(appliance_results[end]['swap_free'], 2))) summary_csv_measurement_dump(csv_file, process_results, 'rss') summary_csv_measurement_dump(csv_file, process_results, 'pss') summary_csv_measurement_dump(csv_file, process_results, 'uss') summary_csv_measurement_dump(csv_file, process_results, 'vss') summary_csv_measurement_dump(csv_file, process_results, 'swap') timediff = time.time() - starttime logger.info('Generated Summary CSV in: {}'.format(timediff)) def generate_summary_html(directory, version_string, appliance_results, process_results, scenario_data, provider_names, grafana_urls): starttime = time.time() file_name = str(directory.join('index.html')) with open(file_name, 'w') as html_file: html_file.write('<html>\n') html_file.write('<head><title>{} - {} Memory Usage Performance</title></head>'.format( version_string, provider_names)) html_file.write('<body>\n') html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(version_string, scenario_data['test_name'].title())) html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format( scenario_data['appliance_roles'].replace(',', ', '))) html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names)) html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format( scenario_data['appliance_ip'], scenario_data['appliance_name'])) if grafana_urls: for g_name in sorted(grafana_urls.keys()): html_file.write( ' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name], g_name)) html_file.write('<br>\n') html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(version_string)) html_file.write(' : <b><a href=\'workload.html\'>Workload Info</a></b>') html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n') html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n') start = list(appliance_results.keys())[0] end = list(appliance_results.keys())[-1] timediff = end - start total_proc_count = 0 for proc_name in process_results: total_proc_count += len(list(process_results[proc_name].keys())) growth = appliance_results[end]['used'] - appliance_results[start]['used'] max_used_memory = 0 for ts in appliance_results: if appliance_results[ts]['used'] > max_used_memory: max_used_memory = appliance_results[ts]['used'] html_file.write('<table border="1">\n') html_file.write('<tr><td>\n') # Appliance Wide Results html_file.write('<table style="width:100%" border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b>Version</b></td>\n') html_file.write('<td><b>Start Time</b></td>\n') html_file.write('<td><b>End Time</b></td>\n') html_file.write('<td><b>Total Test Time</b></td>\n') html_file.write('<td><b>Total Memory</b></td>\n') html_file.write('<td><b>Start Used Memory</b></td>\n') html_file.write('<td><b>End Used Memory</b></td>\n') html_file.write('<td><b>Used Memory Growth</b></td>\n') html_file.write('<td><b>Max Used Memory</b></td>\n') html_file.write('<td><b>Total Tracked Processes</b></td>\n') html_file.write('</tr>\n') html_file.write('<td><a href=\'rawdata/appliance.csv\'>{}</a></td>\n'.format( version_string)) html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0])) html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['total'], 2))) html_file.write('<td>{}</td>\n'.format(round(appliance_results[start]['used'], 2))) html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['used'], 2))) html_file.write('<td>{}</td>\n'.format(round(growth, 2))) html_file.write('<td>{}</td>\n'.format(round(max_used_memory, 2))) html_file.write('<td>{}</td>\n'.format(total_proc_count)) html_file.write('</table>\n') # CFME/Miq Worker Results html_file.write('<table style="width:100%" border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b>Total CFME/Miq Workers</b></td>\n') html_file.write('<td><b>End Running Workers</b></td>\n') html_file.write('<td><b>Recycled Workers</b></td>\n') html_file.write('<td><b>End Total Worker RSS</b></td>\n') html_file.write('<td><b>End Total Worker PSS</b></td>\n') html_file.write('<td><b>End Total Worker USS</b></td>\n') html_file.write('<td><b>End Total Worker VSS</b></td>\n') html_file.write('<td><b>End Total Worker SWAP</b></td>\n') html_file.write('</tr>\n') a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( miq_workers, process_results, end) html_file.write('<tr>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') html_file.write('</table>\n') # Per Process Summaries: html_file.write('<table style="width:100%" border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b>Application/Process Group</b></td>\n') html_file.write('<td><b>Total Processes</b></td>\n') html_file.write('<td><b>End Running Processes</b></td>\n') html_file.write('<td><b>Recycled Processes</b></td>\n') html_file.write('<td><b>End Total Process RSS</b></td>\n') html_file.write('<td><b>End Total Process PSS</b></td>\n') html_file.write('<td><b>End Total Process USS</b></td>\n') html_file.write('<td><b>End Total Process VSS</b></td>\n') html_file.write('<td><b>End Total Process SWAP</b></td>\n') html_file.write('</tr>\n') a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ruby_processes, process_results, end) t_a_pids = a_pids t_r_pids = r_pids tt_rss = t_rss tt_pss = t_pss tt_uss = t_uss tt_vss = t_vss tt_swap = t_swap html_file.write('<tr>\n') html_file.write('<td>ruby</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # memcached Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ['memcached'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>memcached</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # Postgres Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ['postgres'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>postgres</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # httpd Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(['httpd'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>httpd</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # collectd Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ['collectd'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>collectd</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>total</td>\n') html_file.write('<td>{}</td>\n'.format(t_a_pids + t_r_pids)) html_file.write('<td>{}</td>\n'.format(t_a_pids)) html_file.write('<td>{}</td>\n'.format(t_r_pids)) html_file.write('<td>{}</td>\n'.format(round(tt_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_swap, 2))) html_file.write('</tr>\n') html_file.write('</table>\n') # Appliance Graph html_file.write('</td></tr><tr><td>\n') file_name = '{}-appliance_memory.png'.format(version_string) html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name)) file_name = '{}-appliance_swap.png'.format(version_string) # Check for swap usage through out time frame: max_swap_used = 0 for ts in appliance_results: swap_used = appliance_results[ts]['swap_total'] - appliance_results[ts]['swap_free'] if swap_used > max_swap_used: max_swap_used = swap_used if max_swap_used < 10: # Less than 10MiB Max, then hide graph html_file.write('<br><a href=\'graphs/{}\'>Swap Graph '.format(file_name)) html_file.write('(Hidden, max_swap_used < 10 MiB)</a>\n') else: html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name)) html_file.write('</td></tr><tr><td>\n') # Per Process Results html_file.write('<table style="width:100%" border="1"><tr>\n') html_file.write('<td><b>Process Name</b></td>\n') html_file.write('<td><b>Process Pid</b></td>\n') html_file.write('<td><b>Start Time</b></td>\n') html_file.write('<td><b>End Time</b></td>\n') html_file.write('<td><b>Time Alive</b></td>\n') html_file.write('<td><b>RSS Mem Start</b></td>\n') html_file.write('<td><b>RSS Mem End</b></td>\n') html_file.write('<td><b>RSS Mem Change</b></td>\n') html_file.write('<td><b>PSS Mem Start</b></td>\n') html_file.write('<td><b>PSS Mem End</b></td>\n') html_file.write('<td><b>PSS Mem Change</b></td>\n') html_file.write('<td><b>CSV</b></td>\n') html_file.write('</tr>\n') # By Worker Type Memory Used for ordered_name in process_order: if ordered_name in process_results: for pid in process_results[ordered_name]: start = list(process_results[ordered_name][pid].keys())[0] end = list(process_results[ordered_name][pid].keys())[-1] timediff = end - start html_file.write('<tr>\n') if len(process_results[ordered_name]) > 1: html_file.write('<td><a href=\'#{}\'>{}</a></td>\n'.format(ordered_name, ordered_name)) html_file.write('<td><a href=\'graphs/{}-{}.png\'>{}</a></td>\n'.format( ordered_name, pid, pid)) else: html_file.write('<td>{}</td>\n'.format(ordered_name)) html_file.write('<td><a href=\'#{}-{}.png\'>{}</a></td>\n'.format( ordered_name, pid, pid)) html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0])) rss_change = process_results[ordered_name][pid][end]['rss'] - \ process_results[ordered_name][pid][start]['rss'] html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][start]['rss'], 2))) html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][end]['rss'], 2))) html_file.write('<td>{}</td>\n'.format(round(rss_change, 2))) pss_change = process_results[ordered_name][pid][end]['pss'] - \ process_results[ordered_name][pid][start]['pss'] html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][start]['pss'], 2))) html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][end]['pss'], 2))) html_file.write('<td>{}</td>\n'.format(round(pss_change, 2))) html_file.write('<td><a href=\'rawdata/{}-{}.csv\'>csv</a></td>\n'.format( pid, ordered_name)) html_file.write('</tr>\n') else: logger.debug('Process/Worker not part of test: {}'.format(ordered_name)) html_file.write('</table>\n') # Worker Graphs for ordered_name in process_order: if ordered_name in process_results: html_file.write('<tr><td>\n') html_file.write('<div id=\'{}\'>Process name: {}</div><br>\n'.format( ordered_name, ordered_name)) if len(process_results[ordered_name]) > 1: file_name = '{}-all.png'.format(ordered_name) html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(file_name, file_name)) else: for pid in sorted(process_results[ordered_name]): file_name = '{}-{}.png'.format(ordered_name, pid) html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format( file_name, file_name)) html_file.write('</td></tr>\n') html_file.write('</table>\n') html_file.write('</body>\n') html_file.write('</html>\n') timediff = time.time() - starttime logger.info('Generated Summary html in: {}'.format(timediff)) def generate_workload_html(directory, ver, scenario_data, provider_names, grafana_urls): starttime = time.time() file_name = str(directory.join('workload.html')) with open(file_name, 'w') as html_file: html_file.write('<html>\n') html_file.write('<head><title>{} - {}</title></head>'.format( scenario_data['test_name'], provider_names)) html_file.write('<body>\n') html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(ver, scenario_data['test_name'].title())) html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format( scenario_data['appliance_roles'].replace(',', ', '))) html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names)) html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format( scenario_data['appliance_ip'], scenario_data['appliance_name'])) if grafana_urls: for g_name in sorted(grafana_urls.keys()): html_file.write( ' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name], g_name)) html_file.write('<br>\n') html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(ver)) html_file.write(' : <b><a href=\'index.html\'>Memory Info</a></b>') html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n') html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n') html_file.write('<br><b>Scenario Data: </b><br>\n') yaml_html = get_scenario_html(scenario_data['scenario']) html_file.write(yaml_html + '\n') html_file.write('<br>\n<br>\n<br>\n<b>Quantifier Data: </b>\n<br>\n<br>\n<br>\n<br>\n') html_file.write('<table border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> System Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') system_path = ('../version_info/system.csv') html_file.write('<a href="{}" download="System_Versions-{}-{}"> System Versions</a>' .format(system_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> Process Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') process_path = ('../version_info/processes.csv') html_file.write('<a href="{}" download="Process_Versions-{}-{}"> Process Versions</a>' .format(process_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> Ruby Gem Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') gems_path = ('../version_info/gems.csv') html_file.write('<a href="{}" download="Gem_Versions-{}-{}"> Ruby Gem Versions</a>' .format(gems_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> RPM Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') rpms_path = ('../version_info/rpms.csv') html_file.write('<a href="{}" download="RPM_Versions-{}-{}"> RPM Versions</a>' .format(rpms_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('</table>\n') html_file.write('</body>\n') html_file.write('</html>\n') timediff = time.time() - starttime logger.info('Generated Workload html in: {}'.format(timediff)) def add_workload_quantifiers(quantifiers, scenario_data): starttime = time.time() ver = current_version() workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver)) directory = workload_path.join(scenario_data['scenario']['name']) file_name = str(directory.join('workload.html')) marker = '<b>Quantifier Data: </b>' yaml_dict = quantifiers yaml_string = str(json.dumps(yaml_dict, indent=4)) yaml_html = yaml_string.replace('\n', '<br>\n') with open(file_name, 'r+') as html_file: line = '' while marker not in line: line = html_file.readline() marker_pos = html_file.tell() remainder = html_file.read() html_file.seek(marker_pos) html_file.write('{} \n'.format(yaml_html)) html_file.write(remainder) timediff = time.time() - starttime logger.info('Added quantifiers in: {}'.format(timediff)) def get_scenario_html(scenario_data): scenario_dict = create_dict(scenario_data) scenario_yaml = yaml.safe_dump(scenario_dict) scenario_html = scenario_yaml.replace('\n', '<br>\n') scenario_html = scenario_html.replace(', ', '<br>\n &nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;') scenario_html = scenario_html.replace(' ', '&nbsp;') scenario_html = scenario_html.replace('[', '<br>\n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;') scenario_html = scenario_html.replace(']', '\n') return scenario_html def create_dict(attr_dict): main_dict = dict(attr_dict) for key, value in main_dict.items(): if type(value) == AttrDict: main_dict[key] = create_dict(value) return main_dict def graph_appliance_measurements(graphs_path, ver, appliance_results, use_slab, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt from cycler import cycler starttime = time.time() dates = list(appliance_results.keys()) total_memory_list = list(appliance_results[ts]['total'] for ts in appliance_results.keys()) free_memory_list = list(appliance_results[ts]['free'] for ts in appliance_results.keys()) used_memory_list = list(appliance_results[ts]['used'] for ts in appliance_results.keys()) buffers_memory_list = list(appliance_results[ts]['buffers'] for ts in appliance_results.keys()) cache_memory_list = list(appliance_results[ts]['cached'] for ts in appliance_results.keys()) slab_memory_list = list(appliance_results[ts]['slab'] for ts in appliance_results.keys()) swap_total_list = list(appliance_results[ts]['swap_total'] for ts in appliance_results.keys()) swap_free_list = list(appliance_results[ts]['swap_free'] for ts in appliance_results.keys()) # Stack Plot Memory Usage file_name = graphs_path.join('{}-appliance_memory.png'.format(ver)) mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'coral', 'steelblue', 'forestgreen']) fig, ax = plt.subplots() plt.title('Provider(s): {}\nAppliance Memory'.format(provider_names)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') if use_slab: y = [used_memory_list, slab_memory_list, cache_memory_list, free_memory_list] else: y = [used_memory_list, buffers_memory_list, cache_memory_list, free_memory_list] plt.stackplot(dates, *y, baseline='zero') ax.annotate(str(round(total_memory_list[0], 2)), xy=(dates[0], total_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(total_memory_list[-1], 2)), xy=(dates[-1], total_memory_list[-1]), xytext=(4, -4), textcoords='offset points') if use_slab: ax.annotate(str(round(slab_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] + slab_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(slab_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] + slab_memory_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] + slab_memory_list[0] + cache_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[-1], 2)), xy=( dates[-1], used_memory_list[-1] + slab_memory_list[-1] + cache_memory_list[-1]), xytext=(4, -4), textcoords='offset points') else: ax.annotate(str(round(buffers_memory_list[0], 2)), xy=( dates[0], used_memory_list[0] + buffers_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(buffers_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] + buffers_memory_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] + buffers_memory_list[0] + cache_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[-1], 2)), xy=( dates[-1], used_memory_list[-1] + buffers_memory_list[-1] + cache_memory_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(used_memory_list[0], 2)), xy=(dates[0], used_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(used_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick') p2 = plt.Rectangle((0, 0), 1, 1, fc='coral') p3 = plt.Rectangle((0, 0), 1, 1, fc='steelblue') p4 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen') if use_slab: ax.legend([p1, p2, p3, p4], ['Used', 'Slab', 'Cached', 'Free'], bbox_to_anchor=(1.45, 0.22), fancybox=True) else: ax.legend([p1, p2, p3, p4], ['Used', 'Buffers', 'Cached', 'Free'], bbox_to_anchor=(1.45, 0.22), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() # Stack Plot Swap usage mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'forestgreen']) file_name = graphs_path.join('{}-appliance_swap.png'.format(ver)) fig, ax = plt.subplots() plt.title('Provider(s): {}\nAppliance Swap'.format(provider_names)) plt.xlabel('Date / Time') plt.ylabel('Swap (MiB)') swap_used_list = [t - f for f, t in zip(swap_free_list, swap_total_list)] y = [swap_used_list, swap_free_list] plt.stackplot(dates, *y, baseline='zero') ax.annotate(str(round(swap_total_list[0], 2)), xy=(dates[0], swap_total_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_total_list[-1], 2)), xy=(dates[-1], swap_total_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(swap_used_list[0], 2)), xy=(dates[0], swap_used_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_used_list[-1], 2)), xy=(dates[-1], swap_used_list[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick') p2 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen') ax.legend([p1, p2], ['Used Swap', 'Free Swap'], bbox_to_anchor=(1.45, 0.22), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() # Reset Colors mpl.rcdefaults() timediff = time.time() - starttime logger.info('Plotted Appliance Memory in: {}'.format(timediff)) def graph_all_miq_workers(graph_file_path, process_results, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt starttime = time.time() file_name = graph_file_path.join('all-processes.png') fig, ax = plt.subplots() plt.title('Provider(s): {}\nAll Workers/Monitored Processes'.format(provider_names)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') for process_name in process_results: if 'Worker' in process_name or 'Handler' in process_name or 'Catcher' in process_name: for process_pid in process_results[process_name]: dates = list(process_results[process_name][process_pid].keys()) rss_samples = list(process_results[process_name][process_pid][ts]['rss'] for ts in process_results[process_name][process_pid].keys()) vss_samples = list(process_results[process_name][process_pid][ts]['vss'] for ts in process_results[process_name][process_pid].keys()) plt.plot(dates, rss_samples, linewidth=1, label='{} {} RSS'.format(process_pid, process_name)) plt.plot(dates, vss_samples, linewidth=1, label='{} {} VSS'.format( process_pid, process_name)) datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() timediff = time.time() - starttime logger.info('Plotted All Type/Process Memory in: {}'.format(timediff)) def graph_individual_process_measurements(graph_file_path, process_results, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt starttime = time.time() for process_name in process_results: for process_pid in process_results[process_name]: file_name = graph_file_path.join('{}-{}.png'.format(process_name, process_pid)) dates = list(process_results[process_name][process_pid].keys()) rss_samples = list(process_results[process_name][process_pid][ts]['rss'] for ts in process_results[process_name][process_pid].keys()) pss_samples = list(process_results[process_name][process_pid][ts]['pss'] for ts in process_results[process_name][process_pid].keys()) uss_samples = list(process_results[process_name][process_pid][ts]['uss'] for ts in process_results[process_name][process_pid].keys()) vss_samples = list(process_results[process_name][process_pid][ts]['vss'] for ts in process_results[process_name][process_pid].keys()) swap_samples = list(process_results[process_name][process_pid][ts]['swap'] for ts in process_results[process_name][process_pid].keys()) fig, ax = plt.subplots() plt.title('Provider(s)/Size: {}\nProcess/Worker: {}\nPID: {}'.format(provider_names, process_name, process_pid)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') plt.plot(dates, rss_samples, linewidth=1, label='RSS') plt.plot(dates, pss_samples, linewidth=1, label='PSS') plt.plot(dates, uss_samples, linewidth=1, label='USS') plt.plot(dates, vss_samples, linewidth=1, label='VSS') plt.plot(dates, swap_samples, linewidth=1, label='Swap') if rss_samples: ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]), xytext=(4, -4), textcoords='offset points') if pss_samples: ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]), xytext=(4, -4), textcoords='offset points') if uss_samples: ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]), xytext=(4, -4), textcoords='offset points') if vss_samples: ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]), xytext=(4, -4), textcoords='offset points') if swap_samples: ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() timediff = time.time() - starttime logger.info('Plotted Individual Process Memory in: {}'.format(timediff)) def graph_same_miq_workers(graph_file_path, process_results, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt starttime = time.time() for process_name in process_results: if len(process_results[process_name]) > 1: logger.debug('Plotting {} {} processes on single graph.'.format( len(process_results[process_name]), process_name)) file_name = graph_file_path.join('{}-all.png'.format(process_name)) fig, ax = plt.subplots() pids = 'PIDs: ' for i, pid in enumerate(process_results[process_name], 1): pids = '{}{}'.format(pids, '{},{}'.format(pid, [' ', '\n'][i % 6 == 0])) pids = pids[0:-2] plt.title('Provider: {}\nProcess/Worker: {}\n{}'.format(provider_names, process_name, pids)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') for process_pid in process_results[process_name]: dates = list(process_results[process_name][process_pid].keys()) rss_samples = list(process_results[process_name][process_pid][ts]['rss'] for ts in process_results[process_name][process_pid].keys()) pss_samples = list(process_results[process_name][process_pid][ts]['pss'] for ts in process_results[process_name][process_pid].keys()) uss_samples = list(process_results[process_name][process_pid][ts]['uss'] for ts in process_results[process_name][process_pid].keys()) vss_samples = list(process_results[process_name][process_pid][ts]['vss'] for ts in process_results[process_name][process_pid].keys()) swap_samples = list(process_results[process_name][process_pid][ts]['swap'] for ts in process_results[process_name][process_pid].keys()) plt.plot(dates, rss_samples, linewidth=1, label='{} RSS'.format(process_pid)) plt.plot(dates, pss_samples, linewidth=1, label='{} PSS'.format(process_pid)) plt.plot(dates, uss_samples, linewidth=1, label='{} USS'.format(process_pid)) plt.plot(dates, vss_samples, linewidth=1, label='{} VSS'.format(process_pid)) plt.plot(dates, swap_samples, linewidth=1, label='{} SWAP'.format(process_pid)) if rss_samples: ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]), xytext=(4, -4), textcoords='offset points') if pss_samples: ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]), xytext=(4, -4), textcoords='offset points') if uss_samples: ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]), xytext=(4, -4), textcoords='offset points') if vss_samples: ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]), xytext=(4, -4), textcoords='offset points') if swap_samples: ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() timediff = time.time() - starttime logger.info('Plotted Same Type/Process Memory in: {}'.format(timediff)) def summary_csv_measurement_dump(csv_file, process_results, measurement): csv_file.write('---------------------------------------------\n') csv_file.write('Per Process {} Memory Usage\n'.format(measurement.upper())) csv_file.write('---------------------------------------------\n') csv_file.write('Process/Worker Type,PID,Start of test,End of test\n') for ordered_name in process_order: if ordered_name in process_results: for process_pid in sorted(process_results[ordered_name]): start = list(process_results[ordered_name][process_pid].keys())[0] end = list(process_results[ordered_name][process_pid].keys())[-1] csv_file.write('{},{},{},{}\n'.format(ordered_name, process_pid, round(process_results[ordered_name][process_pid][start][measurement], 2), round(process_results[ordered_name][process_pid][end][measurement], 2)))
from datetime import datetime from datetime import timedelta from functools import partial import fauxfactory import pytest from cfme import test_requirements from cfme.utils.browser import quit from cfme.utils.conf import cfme_data from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [test_requirements.configuration] @pytest.fixture def ntp_servers_keys(appliance): return appliance.server.settings.ntp_servers_fields_keys @pytest.fixture def empty_ntp_dict(ntp_servers_keys): return dict.fromkeys(ntp_servers_keys, '') def appliance_date(appliance): result = appliance.ssh_client.run_command("date --iso-8601=hours") return datetime.strptime(result.output.rsplit('-', 1)[0], '%Y-%m-%dT%H') def check_ntp_grep(appliance, clock): result = appliance.ssh_client.run_command( "cat /etc/chrony.conf| grep {}".format(clock)) return not bool(result.rc) def clear_ntp_settings(appliance, empty_ntp_dict): ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output appliance.server.settings.update_ntp_servers(empty_ntp_dict) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) @pytest.mark.tier(2) def test_ntp_crud(request, appliance, empty_ntp_dict, ntp_servers_keys): """ Polarion: assignee: tpapaioa casecomponent: Configuration caseimportance: low initialEstimate: 1/12h """ # Adding finalizer request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict)) """ Insert, Update and Delete the NTP servers """ # set from yaml file appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [ntp_server for ntp_server in cfme_data['clock_servers']])))) # Set from random values appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [fauxfactory.gen_alphanumeric() for _ in range(3)])))) # Deleting the ntp values appliance.server.settings.update_ntp_servers(empty_ntp_dict) @pytest.mark.tier(3) def test_ntp_server_max_character(request, appliance, ntp_servers_keys, empty_ntp_dict): """ Polarion: assignee: tpapaioa casecomponent: Configuration caseimportance: low initialEstimate: 1/8h """ request.addfinalizer(partial(clear_ntp_settings, appliance, empty_ntp_dict)) ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [fauxfactory.gen_alphanumeric() for _ in range(3)])))) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) @pytest.mark.tier(3) def test_ntp_conf_file_update_check(request, appliance, empty_ntp_dict, ntp_servers_keys): """ Polarion: assignee: tpapaioa casecomponent: Configuration initialEstimate: 1/4h """ request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict)) ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output # Adding the ntp server values appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [ntp_server for ntp_server in cfme_data['clock_servers']])))) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) for clock in cfme_data['clock_servers']: status, wait_time = wait_for(lambda: check_ntp_grep(appliance, clock), fail_condition=False, num_sec=60, delay=5) assert status is True, "Clock value {} not update in /etc/chrony.conf file".format(clock) # Unsetting the ntp server values ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output appliance.server.settings.update_ntp_servers(empty_ntp_dict) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) for clock in cfme_data['clock_servers']: status, wait_time = wait_for(lambda: check_ntp_grep(appliance, clock), fail_condition=True, num_sec=60, delay=5) assert status is False, "Found clock record '{}' in /etc/chrony.conf file".format(clock) @pytest.mark.tier(3) def test_ntp_server_check(request, appliance, ntp_servers_keys, empty_ntp_dict): """ Polarion: assignee: tpapaioa initialEstimate: 1/4h casecomponent: Configuration """ request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict)) orig_date = appliance_date(appliance) past_date = orig_date - timedelta(days=10) logger.info("dates: orig_date - %s, past_date - %s", orig_date, past_date) appliance.ssh_client.run_command("date +%Y%m%d -s \"{}\"" .format(past_date.strftime('%Y%m%d'))) new_date = appliance_date(appliance) if new_date != orig_date: logger.info("Successfully modified the date in the appliance") # Configuring the ntp server and restarting the appliance # checking if ntp property is available, adding if it is not available appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [ntp_server for ntp_server in cfme_data['clock_servers']])))) # adding the ntp interval to 1 minute and updating the configuration ntp_settings = appliance.advanced_settings.get('ntp', {}) # should have the servers in it ntp_settings['interval'] = '1.minutes' # just modify interval appliance.update_advanced_settings({'ntp': ntp_settings}) # restarting the evmserverd for NTP to work appliance.restart_evm_rude() appliance.wait_for_web_ui(timeout=1200) # Incase if ntpd service is stopped appliance.ssh_client.run_command("service chronyd restart") # Providing two hour runtime for the test run to avoid day changing problem # (in case if the is triggerred in the midnight) wait_for( lambda: (orig_date - appliance_date(appliance)).total_seconds() <= 7200, num_sec=300) else: raise Exception("Failed modifying the system date") # Calling the browser quit() method to compensate the session after the evm service restart quit() @pytest.mark.tier(3) def test_clear_ntp_settings(request, appliance, empty_ntp_dict): """ Polarion: assignee: tpapaioa casecomponent: Configuration caseimportance: low initialEstimate: 1/30h """ request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict))
Yadnyawalkya/integration_tests
cfme/tests/configure/test_ntp_server.py
cfme/utils/smem_memory_monitor.py
""" Fixtures for Capacity and Utilization """ import fauxfactory import pytest from cfme.utils import conf from cfme.utils.ssh import SSHClient @pytest.fixture(scope="module") def enable_candu(appliance): candu = appliance.collections.candus server_settings = appliance.server.settings original_roles = server_settings.server_roles_db server_settings.enable_server_roles( 'ems_metrics_coordinator', 'ems_metrics_collector', 'ems_metrics_processor' ) server_settings.disable_server_roles( 'automate', 'smartstate' ) candu.enable_all() yield candu.disable_all() server_settings.update_server_roles_db(original_roles) @pytest.fixture(scope="module") def collect_data(appliance, provider, interval='hourly', back='7.days'): """Collect hourly back data for vsphere provider""" vm_name = provider.data['cap_and_util']['chargeback_vm'] # Capture real-time C&U data ret = appliance.ssh_client.run_rails_command( "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\ vm.perf_capture({}, {}.ago.utc, Time.now.utc)\"" .format(provider.id, repr(vm_name), repr(interval), back)) return ret.success @pytest.fixture(scope="module") def enable_candu_category(appliance): """Enable capture C&U Data for tag category location by navigating to the Configuration -> Region page. Click 'Tags' tab , select required company category under 'My Company Categories' and enable 'Capture C & U Data' for the category. """ collection = appliance.collections.categories location_category = collection.instantiate(name="location", display_name="Location") if not location_category.capture_candu: location_category.update(updates={"capture_candu": True}) return location_category @pytest.fixture(scope="function") def candu_tag_vm(provider, enable_candu_category): """Add location tag to VM if not available""" collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate('cu-24x7', provider) tag = enable_candu_category.collections.tags.instantiate(name="london", display_name="London") vm.add_tag(tag, exists_check=True) return vm @pytest.fixture(scope="module") def temp_appliance_extended_db(temp_appliance_preconfig): app = temp_appliance_preconfig app.evmserverd.stop() app.db.extend_partition() app.evmserverd.start() return app @pytest.fixture(scope="module") def candu_db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db # get DB backup file db_storage_hostname = conf.cfme_data.bottlenecks.hostname db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials.bottlenecks) rand_filename = "/tmp/db.backup_{}".format(fauxfactory.gen_alphanumeric()) db_storage_ssh.get_file("{}/candu.db.backup".format( conf.cfme_data.bottlenecks.backup_path), rand_filename) app.ssh_client.put_file(rand_filename, "/tmp/evm_db.backup") app.evmserverd.stop() app.db.drop() app.db.create() app.db.restore() # When you load a database from an older version of the application, you always need to # run migrations. # https://bugzilla.redhat.com/show_bug.cgi?id=1643250 app.db.migrate() app.db.fix_auth_key() app.db.fix_auth_dbyml() app.evmserverd.start() app.wait_for_web_ui()
from datetime import datetime from datetime import timedelta from functools import partial import fauxfactory import pytest from cfme import test_requirements from cfme.utils.browser import quit from cfme.utils.conf import cfme_data from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [test_requirements.configuration] @pytest.fixture def ntp_servers_keys(appliance): return appliance.server.settings.ntp_servers_fields_keys @pytest.fixture def empty_ntp_dict(ntp_servers_keys): return dict.fromkeys(ntp_servers_keys, '') def appliance_date(appliance): result = appliance.ssh_client.run_command("date --iso-8601=hours") return datetime.strptime(result.output.rsplit('-', 1)[0], '%Y-%m-%dT%H') def check_ntp_grep(appliance, clock): result = appliance.ssh_client.run_command( "cat /etc/chrony.conf| grep {}".format(clock)) return not bool(result.rc) def clear_ntp_settings(appliance, empty_ntp_dict): ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output appliance.server.settings.update_ntp_servers(empty_ntp_dict) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) @pytest.mark.tier(2) def test_ntp_crud(request, appliance, empty_ntp_dict, ntp_servers_keys): """ Polarion: assignee: tpapaioa casecomponent: Configuration caseimportance: low initialEstimate: 1/12h """ # Adding finalizer request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict)) """ Insert, Update and Delete the NTP servers """ # set from yaml file appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [ntp_server for ntp_server in cfme_data['clock_servers']])))) # Set from random values appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [fauxfactory.gen_alphanumeric() for _ in range(3)])))) # Deleting the ntp values appliance.server.settings.update_ntp_servers(empty_ntp_dict) @pytest.mark.tier(3) def test_ntp_server_max_character(request, appliance, ntp_servers_keys, empty_ntp_dict): """ Polarion: assignee: tpapaioa casecomponent: Configuration caseimportance: low initialEstimate: 1/8h """ request.addfinalizer(partial(clear_ntp_settings, appliance, empty_ntp_dict)) ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [fauxfactory.gen_alphanumeric() for _ in range(3)])))) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) @pytest.mark.tier(3) def test_ntp_conf_file_update_check(request, appliance, empty_ntp_dict, ntp_servers_keys): """ Polarion: assignee: tpapaioa casecomponent: Configuration initialEstimate: 1/4h """ request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict)) ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output # Adding the ntp server values appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [ntp_server for ntp_server in cfme_data['clock_servers']])))) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) for clock in cfme_data['clock_servers']: status, wait_time = wait_for(lambda: check_ntp_grep(appliance, clock), fail_condition=False, num_sec=60, delay=5) assert status is True, "Clock value {} not update in /etc/chrony.conf file".format(clock) # Unsetting the ntp server values ntp_file_date_stamp = appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output appliance.server.settings.update_ntp_servers(empty_ntp_dict) wait_for(lambda: ntp_file_date_stamp != appliance.ssh_client.run_command( "stat --format '%y' /etc/chrony.conf").output, num_sec=60, delay=10) for clock in cfme_data['clock_servers']: status, wait_time = wait_for(lambda: check_ntp_grep(appliance, clock), fail_condition=True, num_sec=60, delay=5) assert status is False, "Found clock record '{}' in /etc/chrony.conf file".format(clock) @pytest.mark.tier(3) def test_ntp_server_check(request, appliance, ntp_servers_keys, empty_ntp_dict): """ Polarion: assignee: tpapaioa initialEstimate: 1/4h casecomponent: Configuration """ request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict)) orig_date = appliance_date(appliance) past_date = orig_date - timedelta(days=10) logger.info("dates: orig_date - %s, past_date - %s", orig_date, past_date) appliance.ssh_client.run_command("date +%Y%m%d -s \"{}\"" .format(past_date.strftime('%Y%m%d'))) new_date = appliance_date(appliance) if new_date != orig_date: logger.info("Successfully modified the date in the appliance") # Configuring the ntp server and restarting the appliance # checking if ntp property is available, adding if it is not available appliance.server.settings.update_ntp_servers(dict(list(zip( ntp_servers_keys, [ntp_server for ntp_server in cfme_data['clock_servers']])))) # adding the ntp interval to 1 minute and updating the configuration ntp_settings = appliance.advanced_settings.get('ntp', {}) # should have the servers in it ntp_settings['interval'] = '1.minutes' # just modify interval appliance.update_advanced_settings({'ntp': ntp_settings}) # restarting the evmserverd for NTP to work appliance.restart_evm_rude() appliance.wait_for_web_ui(timeout=1200) # Incase if ntpd service is stopped appliance.ssh_client.run_command("service chronyd restart") # Providing two hour runtime for the test run to avoid day changing problem # (in case if the is triggerred in the midnight) wait_for( lambda: (orig_date - appliance_date(appliance)).total_seconds() <= 7200, num_sec=300) else: raise Exception("Failed modifying the system date") # Calling the browser quit() method to compensate the session after the evm service restart quit() @pytest.mark.tier(3) def test_clear_ntp_settings(request, appliance, empty_ntp_dict): """ Polarion: assignee: tpapaioa casecomponent: Configuration caseimportance: low initialEstimate: 1/30h """ request.addfinalizer(lambda: appliance.server.settings.update_ntp_servers(empty_ntp_dict))
Yadnyawalkya/integration_tests
cfme/tests/configure/test_ntp_server.py
cfme/fixtures/candu.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import contextlib import re import sys import inspect import os from collections import OrderedDict from operator import itemgetter import numpy as np __all__ = ['register_reader', 'register_writer', 'register_identifier', 'identify_format', 'get_reader', 'get_writer', 'read', 'write', 'get_formats', 'IORegistryError', 'delay_doc_updates', 'UnifiedReadWriteMethod', 'UnifiedReadWrite'] __doctest_skip__ = ['register_identifier'] _readers = OrderedDict() _writers = OrderedDict() _identifiers = OrderedDict() PATH_TYPES = (str, os.PathLike) class IORegistryError(Exception): """Custom error for registry clashes. """ pass # If multiple formats are added to one class the update of the docs is quite # expensive. Classes for which the doc update is temporarly delayed are added # to this set. _delayed_docs_classes = set() @contextlib.contextmanager def delay_doc_updates(cls): """Contextmanager to disable documentation updates when registering reader and writer. The documentation is only built once when the contextmanager exits. .. versionadded:: 1.3 Parameters ---------- cls : class Class for which the documentation updates should be delayed. Notes ----- Registering multiple readers and writers can cause significant overhead because the documentation of the corresponding ``read`` and ``write`` methods are build every time. .. warning:: This contextmanager is experimental and may be replaced by a more general approach. Examples -------- see for example the source code of ``astropy.table.__init__``. """ _delayed_docs_classes.add(cls) yield _delayed_docs_classes.discard(cls) _update__doc__(cls, 'read') _update__doc__(cls, 'write') def get_formats(data_class=None, readwrite=None): """ Get the list of registered I/O formats as a Table. Parameters ---------- data_class : class, optional Filter readers/writer to match data class (default = all classes). readwrite : str or None, optional Search only for readers (``"Read"``) or writers (``"Write"``). If None search for both. Default is None. .. versionadded:: 1.3 Returns ------- format_table : :class:`~astropy.table.Table` Table of available I/O formats. """ from astropy.table import Table format_classes = sorted(set(_readers) | set(_writers), key=itemgetter(0)) rows = [] for format_class in format_classes: if (data_class is not None and not _is_best_match( data_class, format_class[1], format_classes)): continue has_read = 'Yes' if format_class in _readers else 'No' has_write = 'Yes' if format_class in _writers else 'No' has_identify = 'Yes' if format_class in _identifiers else 'No' # Check if this is a short name (e.g. 'rdb') which is deprecated in # favor of the full 'ascii.rdb'. ascii_format_class = ('ascii.' + format_class[0], format_class[1]) deprecated = 'Yes' if ascii_format_class in format_classes else '' rows.append((format_class[1].__name__, format_class[0], has_read, has_write, has_identify, deprecated)) if readwrite is not None: if readwrite == 'Read': rows = [row for row in rows if row[2] == 'Yes'] elif readwrite == 'Write': rows = [row for row in rows if row[3] == 'Yes'] else: raise ValueError('unrecognized value for "readwrite": {0}.\n' 'Allowed are "Read" and "Write" and None.') # Sorting the list of tuples is much faster than sorting it after the table # is created. (#5262) if rows: # Indices represent "Data Class", "Deprecated" and "Format". data = list(zip(*sorted(rows, key=itemgetter(0, 5, 1)))) else: data = None format_table = Table(data, names=('Data class', 'Format', 'Read', 'Write', 'Auto-identify', 'Deprecated')) if not np.any(format_table['Deprecated'] == 'Yes'): format_table.remove_column('Deprecated') return format_table def _update__doc__(data_class, readwrite): """ Update the docstring to include all the available readers / writers for the ``data_class.read`` or ``data_class.write`` functions (respectively). """ FORMATS_TEXT = 'The available built-in formats are:' # Get the existing read or write method and its docstring class_readwrite_func = getattr(data_class, readwrite) if not isinstance(class_readwrite_func.__doc__, str): # No docstring--could just be test code, or possibly code compiled # without docstrings return lines = class_readwrite_func.__doc__.splitlines() # Find the location of the existing formats table if it exists sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line] if sep_indices: # Chop off the existing formats table, including the initial blank line chop_index = sep_indices[0] lines = lines[:chop_index] # Find the minimum indent, skipping the first line because it might be odd matches = [re.search(r'(\S)', line) for line in lines[1:]] left_indent = ' ' * min(match.start() for match in matches if match) # Get the available unified I/O formats for this class # Include only formats that have a reader, and drop the 'Data class' column format_table = get_formats(data_class, readwrite.capitalize()) format_table.remove_column('Data class') # Get the available formats as a table, then munge the output of pformat() # a bit and put it into the docstring. new_lines = format_table.pformat(max_lines=-1, max_width=80) table_rst_sep = re.sub('-', '=', new_lines[1]) new_lines[1] = table_rst_sep new_lines.insert(0, table_rst_sep) new_lines.append(table_rst_sep) # Check for deprecated names and include a warning at the end. if 'Deprecated' in format_table.colnames: new_lines.extend(['', 'Deprecated format names like ``aastex`` will be ' 'removed in a future version. Use the full ', 'name (e.g. ``ascii.aastex``) instead.']) new_lines = [FORMATS_TEXT, ''] + new_lines lines.extend([left_indent + line for line in new_lines]) # Depending on Python version and whether class_readwrite_func is # an instancemethod or classmethod, one of the following will work. if isinstance(class_readwrite_func, UnifiedReadWrite): class_readwrite_func.__class__.__doc__ = '\n'.join(lines) else: try: class_readwrite_func.__doc__ = '\n'.join(lines) except AttributeError: class_readwrite_func.__func__.__doc__ = '\n'.join(lines) def register_reader(data_format, data_class, function, force=False, priority=0): """ Register a reader function. Parameters ---------- data_format : str The data format identifier. This is the string that will be used to specify the data type when reading. data_class : class The class of the object that the reader produces. function : function The function to read in a data object. force : bool, optional Whether to override any existing function if already present. Default is ``False``. priority : int, optional The priority of the reader, used to compare possible formats when trying to determine the best reader to use. Higher priorities are preferred over lower priorities, with the default priority being 0 (negative numbers are allowed though). """ if not (data_format, data_class) in _readers or force: _readers[(data_format, data_class)] = function, priority else: raise IORegistryError("Reader for format '{}' and class '{}' is " 'already defined' ''.format(data_format, data_class.__name__)) if data_class not in _delayed_docs_classes: _update__doc__(data_class, 'read') def unregister_reader(data_format, data_class): """ Unregister a reader function Parameters ---------- data_format : str The data format identifier. data_class : class The class of the object that the reader produces. """ if (data_format, data_class) in _readers: _readers.pop((data_format, data_class)) else: raise IORegistryError("No reader defined for format '{}' and class '{}'" ''.format(data_format, data_class.__name__)) if data_class not in _delayed_docs_classes: _update__doc__(data_class, 'read') def register_writer(data_format, data_class, function, force=False, priority=0): """ Register a table writer function. Parameters ---------- data_format : str The data format identifier. This is the string that will be used to specify the data type when writing. data_class : class The class of the object that can be written. function : function The function to write out a data object. force : bool, optional Whether to override any existing function if already present. Default is ``False``. priority : int, optional The priority of the writer, used to compare possible formats when trying to determine the best writer to use. Higher priorities are preferred over lower priorities, with the default priority being 0 (negative numbers are allowed though). """ if not (data_format, data_class) in _writers or force: _writers[(data_format, data_class)] = function, priority else: raise IORegistryError("Writer for format '{}' and class '{}' is " 'already defined' ''.format(data_format, data_class.__name__)) if data_class not in _delayed_docs_classes: _update__doc__(data_class, 'write') def unregister_writer(data_format, data_class): """ Unregister a writer function Parameters ---------- data_format : str The data format identifier. data_class : class The class of the object that can be written. """ if (data_format, data_class) in _writers: _writers.pop((data_format, data_class)) else: raise IORegistryError("No writer defined for format '{}' and class '{}'" ''.format(data_format, data_class.__name__)) if data_class not in _delayed_docs_classes: _update__doc__(data_class, 'write') def register_identifier(data_format, data_class, identifier, force=False): """ Associate an identifier function with a specific data type. Parameters ---------- data_format : str The data format identifier. This is the string that is used to specify the data type when reading/writing. data_class : class The class of the object that can be written. identifier : function A function that checks the argument specified to `read` or `write` to determine whether the input can be interpreted as a table of type ``data_format``. This function should take the following arguments: - ``origin``: A string ``"read"`` or ``"write"`` identifying whether the file is to be opened for reading or writing. - ``path``: The path to the file. - ``fileobj``: An open file object to read the file's contents, or `None` if the file could not be opened. - ``*args``: Positional arguments for the `read` or `write` function. - ``**kwargs``: Keyword arguments for the `read` or `write` function. One or both of ``path`` or ``fileobj`` may be `None`. If they are both `None`, the identifier will need to work from ``args[0]``. The function should return True if the input can be identified as being of format ``data_format``, and False otherwise. force : bool, optional Whether to override any existing function if already present. Default is ``False``. Examples -------- To set the identifier based on extensions, for formats that take a filename as a first argument, you can do for example:: >>> def my_identifier(*args, **kwargs): ... return isinstance(args[0], str) and args[0].endswith('.tbl') >>> register_identifier('ipac', Table, my_identifier) """ if not (data_format, data_class) in _identifiers or force: _identifiers[(data_format, data_class)] = identifier else: raise IORegistryError("Identifier for format '{}' and class '{}' is " 'already defined'.format(data_format, data_class.__name__)) def unregister_identifier(data_format, data_class): """ Unregister an identifier function Parameters ---------- data_format : str The data format identifier. data_class : class The class of the object that can be read/written. """ if (data_format, data_class) in _identifiers: _identifiers.pop((data_format, data_class)) else: raise IORegistryError("No identifier defined for format '{}' and class" " '{}'".format(data_format, data_class.__name__)) def identify_format(origin, data_class_required, path, fileobj, args, kwargs): """Loop through identifiers to see which formats match. Parameters ---------- origin : str A string ``"read`` or ``"write"`` identifying whether the file is to be opened for reading or writing. data_class_required : object The specified class for the result of `read` or the class that is to be written. path : str or path-like or None The path to the file or None. fileobj : file-like or None. An open file object to read the file's contents, or ``None`` if the file could not be opened. args : sequence Positional arguments for the `read` or `write` function. Note that these must be provided as sequence. kwargs : dict-like Keyword arguments for the `read` or `write` function. Note that this parameter must be `dict`-like. Returns ------- valid_formats : list List of matching formats. """ valid_formats = [] for data_format, data_class in _identifiers: if _is_best_match(data_class_required, data_class, _identifiers): if _identifiers[(data_format, data_class)]( origin, path, fileobj, *args, **kwargs): valid_formats.append(data_format) return valid_formats def _get_format_table_str(data_class, readwrite): format_table = get_formats(data_class, readwrite=readwrite) format_table.remove_column('Data class') format_table_str = '\n'.join(format_table.pformat(max_lines=-1)) return format_table_str def get_reader(data_format, data_class): """Get reader for ``data_format``. Parameters ---------- data_format : str The data format identifier. This is the string that is used to specify the data type when reading/writing. data_class : class The class of the object that can be written. Returns ------- reader : callable The registered reader function for this format and class. """ readers = [(fmt, cls) for fmt, cls in _readers if fmt == data_format] for reader_format, reader_class in readers: if _is_best_match(data_class, reader_class, readers): return _readers[(reader_format, reader_class)][0] else: format_table_str = _get_format_table_str(data_class, 'Read') raise IORegistryError( "No reader defined for format '{}' and class '{}'.\n\nThe " "available formats are:\n\n{}".format( data_format, data_class.__name__, format_table_str)) def get_writer(data_format, data_class): """Get writer for ``data_format``. Parameters ---------- data_format : str The data format identifier. This is the string that is used to specify the data type when reading/writing. data_class : class The class of the object that can be written. Returns ------- writer : callable The registered writer function for this format and class. """ writers = [(fmt, cls) for fmt, cls in _writers if fmt == data_format] for writer_format, writer_class in writers: if _is_best_match(data_class, writer_class, writers): return _writers[(writer_format, writer_class)][0] else: format_table_str = _get_format_table_str(data_class, 'Write') raise IORegistryError( "No writer defined for format '{}' and class '{}'.\n\nThe " "available formats are:\n\n{}".format( data_format, data_class.__name__, format_table_str)) def read(cls, *args, format=None, cache=False, **kwargs): """ Read in data. The arguments passed to this method depend on the format. """ ctx = None try: if format is None: path = None fileobj = None if len(args): if isinstance(args[0], PATH_TYPES) and not os.path.isdir(args[0]): from astropy.utils.data import get_readable_fileobj # path might be a os.PathLike object if isinstance(args[0], os.PathLike): args = (os.fspath(args[0]),) + args[1:] path = args[0] try: ctx = get_readable_fileobj(args[0], encoding='binary', cache=cache) fileobj = ctx.__enter__() except OSError: raise except Exception: fileobj = None else: args = [fileobj] + list(args[1:]) elif hasattr(args[0], 'read'): path = None fileobj = args[0] format = _get_valid_format( 'read', cls, path, fileobj, args, kwargs) reader = get_reader(format, cls) data = reader(*args, **kwargs) if not isinstance(data, cls): # User has read with a subclass where only the parent class is # registered. This returns the parent class, so try coercing # to desired subclass. try: data = cls(data) except Exception: raise TypeError('could not convert reader output to {} ' 'class.'.format(cls.__name__)) finally: if ctx is not None: ctx.__exit__(*sys.exc_info()) return data def write(data, *args, format=None, **kwargs): """ Write out data. The arguments passed to this method depend on the format. """ if format is None: path = None fileobj = None if len(args): if isinstance(args[0], PATH_TYPES): # path might be a os.PathLike object if isinstance(args[0], os.PathLike): args = (os.fspath(args[0]),) + args[1:] path = args[0] fileobj = None elif hasattr(args[0], 'read'): path = None fileobj = args[0] format = _get_valid_format( 'write', data.__class__, path, fileobj, args, kwargs) writer = get_writer(format, data.__class__) writer(data, *args, **kwargs) def _is_best_match(class1, class2, format_classes): """ Determine if class2 is the "best" match for class1 in the list of classes. It is assumed that (class2 in classes) is True. class2 is the the best match if: - ``class1`` is a subclass of ``class2`` AND - ``class2`` is the nearest ancestor of ``class1`` that is in classes (which includes the case that ``class1 is class2``) """ if issubclass(class1, class2): classes = {cls for fmt, cls in format_classes} for parent in class1.__mro__: if parent is class2: # class2 is closest registered ancestor return True if parent in classes: # class2 was superceded return False return False def _get_valid_format(mode, cls, path, fileobj, args, kwargs): """ Returns the first valid format that can be used to read/write the data in question. Mode can be either 'read' or 'write'. """ valid_formats = identify_format(mode, cls, path, fileobj, args, kwargs) if len(valid_formats) == 0: format_table_str = _get_format_table_str(cls, mode.capitalize()) raise IORegistryError("Format could not be identified based on the" " file name or contents, please provide a" " 'format' argument.\n" "The available formats are:\n" "{}".format(format_table_str)) elif len(valid_formats) > 1: return _get_highest_priority_format(mode, cls, valid_formats) return valid_formats[0] def _get_highest_priority_format(mode, cls, valid_formats): """ Returns the reader or writer with the highest priority. If it is a tie, error. """ if mode == "read": format_dict = _readers mode_loader = "reader" elif mode == "write": format_dict = _writers mode_loader = "writer" best_formats = [] current_priority = - np.inf for format in valid_formats: try: _, priority = format_dict[(format, cls)] except KeyError: # We could throw an exception here, but get_reader/get_writer handle # this case better, instead maximally deprioritise the format. priority = - np.inf if priority == current_priority: best_formats.append(format) elif priority > current_priority: best_formats = [format] current_priority = priority if len(best_formats) > 1: raise IORegistryError("Format is ambiguous - options are: {}".format( ', '.join(sorted(valid_formats, key=itemgetter(0))) )) return best_formats[0] class UnifiedReadWrite: """Base class for the worker object used in unified read() or write() methods. This lightweight object is created for each `read()` or `write()` call via ``read`` / ``write`` descriptors on the data object class. The key driver is to allow complete format-specific documentation of available method options via a ``help()`` method, e.g. ``Table.read.help('fits')``. Subclasses must define a ``__call__`` method which is what actually gets called when the data object ``read()`` or ``write()`` method is called. For the canonical example see the `~astropy.table.Table` class implementation (in particular the ``connect.py`` module there). Parameters ---------- instance : object Descriptor calling instance or None if no instance cls : type Descriptor calling class (either owner class or instance class) method_name : str Method name, either 'read' or 'write' """ def __init__(self, instance, cls, method_name): self._instance = instance self._cls = cls self._method_name = method_name # 'read' or 'write' def help(self, format=None, out=None): """Output help documentation for the specified unified I/O ``format``. By default the help output is printed to the console via ``pydoc.pager``. Instead one can supplied a file handle object as ``out`` and the output will be written to that handle. Parameters ---------- format : str Unified I/O format name, e.g. 'fits' or 'ascii.ecsv' out : None or path-like Output destination (default is stdout via a pager) """ cls = self._cls method_name = self._method_name # Get reader or writer function get_func = get_reader if method_name == 'read' else get_writer try: if format: read_write_func = get_func(format, cls) except IORegistryError as err: reader_doc = 'ERROR: ' + str(err) else: if format: # Format-specific header = ("{}.{}(format='{}') documentation\n" .format(cls.__name__, method_name, format)) doc = read_write_func.__doc__ else: # General docs header = f'{cls.__name__}.{method_name} general documentation\n' doc = getattr(cls, method_name).__doc__ reader_doc = re.sub('.', '=', header) reader_doc += header reader_doc += re.sub('.', '=', header) reader_doc += os.linesep if doc is not None: reader_doc += inspect.cleandoc(doc) if out is None: import pydoc pydoc.pager(reader_doc) else: out.write(reader_doc) def list_formats(self, out=None): """Print a list of available formats to console (or ``out`` filehandle) out : None or file handle object Output destination (default is stdout via a pager) """ tbl = get_formats(self._cls, self._method_name.capitalize()) del tbl['Data class'] if out is None: tbl.pprint(max_lines=-1, max_width=-1) else: out.write('\n'.join(tbl.pformat(max_lines=-1, max_width=-1))) return out class UnifiedReadWriteMethod(property): """Descriptor class for creating read() and write() methods in unified I/O. The canonical example is in the ``Table`` class, where the ``connect.py`` module creates subclasses of the ``UnifiedReadWrite`` class. These have custom ``__call__`` methods that do the setup work related to calling the registry read() or write() functions. With this, the ``Table`` class defines read and write methods as follows:: read = UnifiedReadWriteMethod(TableRead) write = UnifiedReadWriteMethod(TableWrite) Parameters ---------- func : `~astropy.io.registry.UnifiedReadWrite` subclass Class that defines read or write functionality """ # We subclass property to ensure that __set__ is defined and that, # therefore, we are a data descriptor, which cannot be overridden. # This also means we automatically inherit the __doc__ of fget (which will # be a UnifiedReadWrite subclass), and that this docstring gets recognized # and properly typeset by sphinx (which was previously an issue; see # gh-11554). # We override __get__ to pass both instance and class to UnifiedReadWrite. def __get__(self, instance, owner_cls): return self.fget(instance, owner_cls)
# Note that we test the main astropy.wcs.WCS class directly rather than testing # the mix-in class on its own (since it's not functional without being used as # a mix-in) import warnings from packaging.version import Version import numpy as np import pytest from numpy.testing import assert_equal, assert_allclose from itertools import product from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose from astropy.units import Quantity from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation from astropy.io.fits import Header from astropy.io.fits.verify import VerifyWarning from astropy.units.core import UnitsWarning from astropy.utils.data import get_pkg_data_filename from astropy.wcs.wcs import WCS, FITSFixedWarning from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES from astropy.wcs._wcs import __version__ as wcsver from astropy.utils import iers from astropy.utils.exceptions import AstropyUserWarning ############################################################################### # The following example is the simplest WCS with default values ############################################################################### WCS_EMPTY = WCS(naxis=1) WCS_EMPTY.wcs.crpix = [1] def test_empty(): wcs = WCS_EMPTY # Low-level API assert wcs.pixel_n_dim == 1 assert wcs.world_n_dim == 1 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == [None] assert wcs.world_axis_units == [''] assert wcs.pixel_axis_names == [''] assert wcs.world_axis_names == [''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('world', 0, 'value')] assert wcs.world_axis_object_classes['world'][0] is Quantity assert wcs.world_axis_object_classes['world'][1] == () assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one assert_allclose(wcs.pixel_to_world_values(29), 29) assert_allclose(wcs.array_index_to_world_values(29), 29) assert np.ndim(wcs.pixel_to_world_values(29)) == 0 assert np.ndim(wcs.array_index_to_world_values(29)) == 0 assert_allclose(wcs.world_to_pixel_values(29), 29) assert_equal(wcs.world_to_array_index_values(29), (29,)) assert np.ndim(wcs.world_to_pixel_values(29)) == 0 assert np.ndim(wcs.world_to_array_index_values(29)) == 0 # High-level API coord = wcs.pixel_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = wcs.array_index_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = 15 * u.one x = wcs.world_to_pixel(coord) assert_allclose(x, 15.) assert np.ndim(x) == 0 i = wcs.world_to_array_index(coord) assert_equal(i, 15) assert np.ndim(i) == 0 ############################################################################### # The following example is a simple 2D image with celestial coordinates ############################################################################### HEADER_SIMPLE_CELESTIAL = """ WCSAXES = 2 CTYPE1 = RA---TAN CTYPE2 = DEC--TAN CRVAL1 = 10 CRVAL2 = 20 CRPIX1 = 30 CRPIX2 = 40 CDELT1 = -0.1 CDELT2 = 0.1 CROTA2 = 0. CUNIT1 = deg CUNIT2 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring( HEADER_SIMPLE_CELESTIAL, sep='\n')) def test_simple_celestial(): wcs = WCS_SIMPLE_CELESTIAL # Low-level API assert wcs.pixel_n_dim == 2 assert wcs.world_n_dim == 2 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec'] assert wcs.world_axis_units == ['deg', 'deg'] assert wcs.pixel_axis_names == ['', ''] assert wcs.world_axis_names == ['', ''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20)) assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20)) assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.)) assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29)) # High-level API coord = wcs.pixel_to_world(29, 39) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = wcs.array_index_to_world(39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = SkyCoord(10, 20, unit='deg', frame='icrs') x, y = wcs.world_to_pixel(coord) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord) assert_equal(i, 39) assert_equal(j, 29) # Check that if the coordinates are passed in a different frame things still # work properly coord_galactic = coord.galactic x, y = wcs.world_to_pixel(coord_galactic) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord_galactic) assert_equal(i, 39) assert_equal(j, 29) # Check that we can actually index the array data = np.arange(3600).reshape((60, 60)) coord = SkyCoord(10, 20, unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], 2369) coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], [2369, 3550]) ############################################################################### # The following example is a spectral cube with axes in an unusual order ############################################################################### HEADER_SPECTRAL_CUBE = """ WCSAXES = 3 CTYPE1 = GLAT-CAR CTYPE2 = FREQ CTYPE3 = GLON-CAR CNAME1 = Latitude CNAME2 = Frequency CNAME3 = Longitude CRVAL1 = 10 CRVAL2 = 20 CRVAL3 = 25 CRPIX1 = 30 CRPIX2 = 40 CRPIX3 = 45 CDELT1 = -0.1 CDELT2 = 0.5 CDELT3 = 0.1 CUNIT1 = deg CUNIT2 = Hz CUNIT3 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n')) def test_spectral_cube(): # Spectral cube with a weird axis ordering wcs = WCS_SPECTRAL_CUBE # Low-level API assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]]) assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25)) assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25)) assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.)) assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29)) # High-level API coord, spec = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord, spec = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord = SkyCoord(25, 10, unit='deg', frame='galactic') spec = 20 * u.Hz with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(coord, spec) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(spec, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(coord, spec) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(spec, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """ PC2_3 = -0.5 PC3_2 = +0.5 """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring( HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n')) def test_spectral_cube_nonaligned(): # Make sure that correlation matrix gets adjusted if there are non-identity # CD matrix terms. wcs = WCS_SPECTRAL_CUBE_NONALIGNED assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]]) # NOTE: we check world_axis_object_components and world_axis_object_classes # again here because in the past this failed when non-aligned axes were # present, so this serves as a regression test. assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} ############################################################################### # The following example is from Rots et al (2015), Table 5. It represents a # cube with two spatial dimensions and one time dimension ############################################################################### HEADER_TIME_CUBE = """ SIMPLE = T / Fits standard BITPIX = -32 / Bits per pixel NAXIS = 3 / Number of axes NAXIS1 = 2048 / Axis length NAXIS2 = 2048 / Axis length NAXIS3 = 11 / Axis length DATE = '2008-10-28T14:39:06' / Date FITS file was generated OBJECT = '2008 TC3' / Name of the object observed EXPTIME = 1.0011 / Integration time MJD-OBS = 54746.02749237 / Obs start DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date TELESCOP= 'VISTA' / ESO Telescope Name INSTRUME= 'VIRCAM' / Instrument used. TIMESYS = 'UTC' / From Observatory Time System TREFPOS = 'TOPOCENT' / Topocentric MJDREF = 54746.0 / Time reference point in MJD RADESYS = 'ICRS' / Not equinoctal CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection CRVAL2 = 2.01824372640628 / RA at ref pixel CUNIT2 = 'deg' / Angles are degrees always CRPIX2 = 2956.6 / Pixel coordinate at ref point CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection CRVAL1 = 14.8289418840003 / Dec at ref pixel CUNIT1 = 'deg' / Angles are degrees always CRPIX1 = -448.2 / Pixel coordinate at ref point CTYPE3 = 'UTC' / linear time (UTC) CRVAL3 = 2375.341 / Relative time of first frame CUNIT3 = 's' / Time unit CRPIX3 = 1.0 / Pixel coordinate at ref point CTYPE3A = 'TT' / alternative linear time (TT) CRVAL3A = 2440.525 / Relative time of first frame CUNIT3A = 's' / Time unit CRPIX3A = 1.0 / Pixel coordinate at ref point OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+ OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+ OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid CRDER3 = 0.0819 / random error in timings from fit CSYER3 = 0.0100 / absolute time error PC1_1 = 0.999999971570892 / WCS transform matrix element PC1_2 = 0.000238449608932 / WCS transform matrix element PC2_1 = -0.000621542859395 / WCS transform matrix element PC2_2 = 0.999999806842218 / WCS transform matrix element CDELT1 = -9.48575432499806E-5 / Axis scale at reference point CDELT2 = 9.48683176211164E-5 / Axis scale at reference point CDELT3 = 13.3629 / Axis scale at reference point PV1_1 = 1. / ZPN linear term PV1_3 = 42. / ZPN cubic term """ with warnings.catch_warnings(): warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning)) WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n')) def test_time_cube(): # Spectral cube with a weird axis ordering wcs = WCS_TIME_CUBE assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape == (11, 2048, 2048) assert wcs.pixel_shape == (2048, 2048, 11) assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time'] assert wcs.world_axis_units == ['deg', 'deg', 's'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['', '', ''] assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]]) components = wcs.world_axis_object_components assert components[0] == ('celestial', 1, 'spherical.lat.degree') assert components[1] == ('celestial', 0, 'spherical.lon.degree') assert components[2][:2] == ('time', 0) assert callable(components[2][2]) assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['time'][0] is Time assert wcs.world_axis_object_classes['time'][1] == () assert wcs.world_axis_object_classes['time'][2] == {} assert callable(wcs.world_axis_object_classes['time'][3]) assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341), (-449.2, 2955.6, 0)) assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341), (0, 2956, -449)) # High-level API coord, time = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) coord, time = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) x, y, z = wcs.world_to_pixel(coord, time) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter x, y, z = wcs.world_to_pixel(time, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) i, j, k = wcs.world_to_array_index(coord, time) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter i, j, k = wcs.world_to_array_index(time, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) ############################################################################### # The following tests are to make sure that Time objects are constructed # correctly for a variety of combinations of WCS keywords ############################################################################### HEADER_TIME_1D = """ SIMPLE = T BITPIX = -32 NAXIS = 1 NAXIS1 = 2048 TIMESYS = 'UTC' TREFPOS = 'TOPOCENT' MJDREF = 50002.6 CTYPE1 = 'UTC' CRVAL1 = 5 CUNIT1 = 's' CRPIX1 = 1.0 CDELT1 = 2 OBSGEO-L= -20 OBSGEO-B= -70 OBSGEO-H= 2530 """ if Version(wcsver) >= Version('7.1'): HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n" @pytest.fixture def header_time_1d(): return Header.fromstring(HEADER_TIME_1D, sep='\n') def assert_time_at(header, position, jd1, jd2, scale, format): with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(position) assert_allclose(time.jd1, jd1, rtol=1e-10) assert_allclose(time.jd2, jd2, rtol=1e-10) assert time.format == format assert time.scale == scale @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local')) def test_time_1d_values(header_time_1d, scale): # Check that Time objects are instantiated with the correct values, # scales, and formats. header_time_1d['CTYPE1'] = scale.upper() assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd') def test_time_1d_values_gps(header_time_1d): # Special treatment for GPS scale header_time_1d['CTYPE1'] = 'GPS' assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd') def test_time_1d_values_deprecated(header_time_1d): # Deprecated (in FITS) scales header_time_1d['CTYPE1'] = 'TDT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') header_time_1d['CTYPE1'] = 'IAT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') header_time_1d['CTYPE1'] = 'GMT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['CTYPE1'] = 'ET' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') def test_time_1d_values_time(header_time_1d): header_time_1d['CTYPE1'] = 'TIME' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['TIMESYS'] = 'TAI' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') @pytest.mark.remote_data @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')) def test_time_1d_roundtrip(header_time_1d, scale): # Check that coordinates round-trip pixel_in = np.arange(3, 10) header_time_1d['CTYPE1'] = scale.upper() with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) # Simple test time = wcs.pixel_to_world(pixel_in) pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) # Test with an intermediate change to a different scale/format time = wcs.pixel_to_world(pixel_in).tdb time.format = 'isot' pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) def test_time_1d_high_precision(header_time_1d): # Case where the MJDREF is split into two for high precision del header_time_1d['MJDREF'] header_time_1d['MJDREFI'] = 52000. header_time_1d['MJDREFF'] = 1e-11 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) # Here we have to use a very small rtol to really test that MJDREFF is # taken into account assert_allclose(time.jd1, 2452001.0, rtol=1e-12) assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13) def test_time_1d_location_geodetic(header_time_1d): # Make sure that the location is correctly returned (geodetic case) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) lon, lat, alt = time.location.to_geodetic() # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976 # ellipsoid (https://github.com/astropy/astropy/issues/9420) assert_allclose(lon.degree, -20) assert_allclose(lat.degree, -70) # assert_allclose(alt.to_value(u.m), 2530.) @pytest.fixture def header_time_1d_no_obs(): header = Header.fromstring(HEADER_TIME_1D, sep='\n') del header['OBSGEO-L'] del header['OBSGEO-B'] del header['OBSGEO-H'] return header def test_time_1d_location_geocentric(header_time_1d_no_obs): # Make sure that the location is correctly returned (geocentric case) header = header_time_1d_no_obs header['OBSGEO-X'] = 10 header['OBSGEO-Y'] = -20 header['OBSGEO-Z'] = 30 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 10) assert_allclose(y.to_value(u.m), -20) assert_allclose(z.to_value(u.m), 30) def test_time_1d_location_geocenter(header_time_1d_no_obs): header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER' wcs = WCS(header_time_1d_no_obs) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 0) assert_allclose(y.to_value(u.m), 0) assert_allclose(z.to_value(u.m), 0) def test_time_1d_location_missing(header_time_1d_no_obs): # Check what happens when no location is present wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_incomplete(header_time_1d_no_obs): # Check what happens when location information is incomplete header_time_1d_no_obs['OBSGEO-L'] = 10. with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_unsupported(header_time_1d_no_obs): # Check what happens when TREFPOS is unsupported header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Observation location 'barycenter' is not " "supported, setting location in Time to None"): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_unsupported_ctype(header_time_1d_no_obs): # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale # Case where the MJDREF is split into two for high precision header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"): time = wcs.pixel_to_world(10) assert isinstance(time, Time) ############################################################################### # Extra corner cases ############################################################################### def test_unrecognized_unit(): # TODO: Determine whether the following behavior is desirable wcs = WCS(naxis=1) with pytest.warns(UnitsWarning): wcs.wcs.cunit = ['bananas // sekonds'] assert wcs.world_axis_units == ['bananas // sekonds'] def test_distortion_correlations(): filename = get_pkg_data_filename('../../tests/data/sip.fits') with pytest.warns(FITSFixedWarning): w = WCS(filename) assert_equal(w.axis_correlation_matrix, True) # Changing PC to an identity matrix doesn't change anything since # distortions are still present. w.wcs.pc = [[1, 0], [0, 1]] assert_equal(w.axis_correlation_matrix, True) # Nor does changing the name of the axes to make them non-celestial w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) # However once we turn off the distortions the matrix changes w.sip = None assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]]) # If we go back to celestial coordinates then the matrix is all True again w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_equal(w.axis_correlation_matrix, True) # Or if we change to X/Y but have a non-identity PC w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]] w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) def test_custom_ctype_to_ucd_mappings(): wcs = WCS(naxis=1) wcs.wcs.ctype = ['SPAM'] assert wcs.world_axis_physical_types == [None] # Check simple behavior with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == [None] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check nesting with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check priority in nesting with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): assert wcs.world_axis_physical_types == ['notfood'] def test_caching_components_and_classes(): # Make sure that when we change the WCS object, the classes and components # are updated (we use a cache internally, so we need to make sure the cache # is invalidated if needed) wcs = WCS_SIMPLE_CELESTIAL.deepcopy() assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg wcs.wcs.radesys = 'FK5' frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2000. wcs.wcs.equinox = 2010 frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2010. def test_sub_wcsapi_attributes(): # Regression test for a bug that caused some of the WCS attributes to be # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub # with lon/lat types). wcs = WCS_SPECTRAL_CUBE.deepcopy() wcs.pixel_shape = (30, 40, 50) wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)] # Use celestial shortcut wcs_sub1 = wcs.celestial assert wcs_sub1.pixel_n_dim == 2 assert wcs_sub1.world_n_dim == 2 assert wcs_sub1.array_shape == (50, 30) assert wcs_sub1.pixel_shape == (30, 50) assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)] assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon'] assert wcs_sub1.world_axis_units == ['deg', 'deg'] assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude'] # Try adding axes wcs_sub2 = wcs.sub([0, 2, 0]) assert wcs_sub2.pixel_n_dim == 3 assert wcs_sub2.world_n_dim == 3 assert wcs_sub2.array_shape == (None, 40, None) assert wcs_sub2.pixel_shape == (None, 40, None) assert wcs_sub2.pixel_bounds == [None, (-2, 18), None] assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None] assert wcs_sub2.world_axis_units == ['', 'Hz', ''] assert wcs_sub2.world_axis_names == ['', 'Frequency', ''] # Use strings wcs_sub3 = wcs.sub(['longitude', 'latitude']) assert wcs_sub3.pixel_n_dim == 2 assert wcs_sub3.world_n_dim == 2 assert wcs_sub3.array_shape == (30, 50) assert wcs_sub3.pixel_shape == (50, 30) assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub3.world_axis_units == ['deg', 'deg'] assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude'] # Now try without CNAME set wcs.wcs.cname = [''] * wcs.wcs.naxis wcs_sub4 = wcs.sub(['longitude', 'latitude']) assert wcs_sub4.pixel_n_dim == 2 assert wcs_sub4.world_n_dim == 2 assert wcs_sub4.array_shape == (30, 50) assert wcs_sub4.pixel_shape == (50, 30) assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub4.world_axis_units == ['deg', 'deg'] assert wcs_sub4.world_axis_names == ['', ''] HEADER_POLARIZED = """ CTYPE1 = 'HPLT-TAN' CTYPE2 = 'HPLN-TAN' CTYPE3 = 'STOKES' """ @pytest.fixture def header_polarized(): return Header.fromstring(HEADER_POLARIZED, sep='\n') def test_phys_type_polarization(header_polarized): w = WCS(header_polarized) assert w.world_axis_physical_types[2] == 'phys.polarization.stokes' ############################################################################### # Spectral transformations ############################################################################### HEADER_SPECTRAL_FRAMES = """ BUNIT = 'Jy/beam' EQUINOX = 2.000000000E+03 CTYPE1 = 'RA---SIN' CRVAL1 = 2.60108333333E+02 CDELT1 = -2.777777845E-04 CRPIX1 = 1.0 CUNIT1 = 'deg' CTYPE2 = 'DEC--SIN' CRVAL2 = -9.75000000000E-01 CDELT2 = 2.777777845E-04 CRPIX2 = 1.0 CUNIT2 = 'deg' CTYPE3 = 'FREQ' CRVAL3 = 1.37835117405E+09 CDELT3 = 9.765625000E+04 CRPIX3 = 32.0 CUNIT3 = 'Hz' SPECSYS = 'TOPOCENT' RESTFRQ = 1.420405752E+09 / [Hz] RADESYS = 'FK5' """ @pytest.fixture def header_spectral_frames(): return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n') def test_spectralcoord_frame(header_spectral_frames): # This is a test to check the numerical results of transformations between # different velocity frames. We simply make sure that the returned # SpectralCoords are in the right frame but don't check the transformations # since this is already done in test_spectralcoord_accuracy # in astropy.coordinates. with iers.conf.set_temp('auto_download', False): obstime = Time(f"2009-05-04T04:44:23", scale='utc') header = header_spectral_frames.copy() header['MJD-OBS'] = obstime.mjd header['CRVAL1'] = 16.33211 header['CRVAL2'] = -34.2221 header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. # We start off with a WCS defined in topocentric frequency with pytest.warns(FITSFixedWarning): wcs_topo = WCS(header) # We convert a single pixel coordinate to world coordinates and keep only # the second high level object - a SpectralCoord: sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1] # We check that this is in topocentric frame with zero velocities assert isinstance(sc_topo, SpectralCoord) assert isinstance(sc_topo.observer, ITRS) assert sc_topo.observer.obstime.isot == obstime.isot assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0) observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS()) assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km for specsys, expected_frame in VELOCITY_FRAMES.items(): header['SPECSYS'] = specsys with pytest.warns(FITSFixedWarning): wcs = WCS(header) sc = wcs.pixel_to_world(0, 0, 31)[1] # Now transform to the expected velocity frame, which should leave # the spectral coordinate unchanged sc_check = sc.with_observer_stationary_relative_to(expected_frame) assert_quantity_allclose(sc.quantity, sc_check.quantity) @pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True])) def test_different_ctypes(header_spectral_frames, ctype3, observer): header = header_spectral_frames.copy() header['CTYPE3'] = ctype3 header['CRVAL3'] = 0.1 header['CDELT3'] = 0.001 if ctype3[0] == 'V': header['CUNIT3'] = 'm s-1' else: header['CUNIT3'] = '' header['RESTWAV'] = 1.420405752E+09 header['MJD-OBS'] = 55197 if observer: header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. header['SPECSYS'] = 'BARYCENT' with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31) assert isinstance(spectralcoord, SpectralCoord) if observer: pix = wcs.world_to_pixel(skycoord, spectralcoord) else: with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): pix = wcs.world_to_pixel(skycoord, spectralcoord) assert_allclose(pix, [0, 0, 31], rtol=1e-6)
pllim/astropy
astropy/wcs/wcsapi/tests/test_fitswcs.py
astropy/io/registry.py
import os import abc import numpy as np __all__ = ['BaseLowLevelWCS', 'validate_physical_types'] class BaseLowLevelWCS(metaclass=abc.ABCMeta): """ Abstract base class for the low-level WCS interface. This is described in `APE 14: A shared Python interface for World Coordinate Systems <https://doi.org/10.5281/zenodo.1188875>`_. """ @property @abc.abstractmethod def pixel_n_dim(self): """ The number of axes in the pixel coordinate system. """ @property @abc.abstractmethod def world_n_dim(self): """ The number of axes in the world coordinate system. """ @property @abc.abstractmethod def world_axis_physical_types(self): """ An iterable of strings describing the physical type for each world axis. These should be names from the VO UCD1+ controlled Vocabulary (http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an arbitrary string. Alternatively, if the physical type is unknown/undefined, an element can be `None`. """ @property @abc.abstractmethod def world_axis_units(self): """ An iterable of strings given the units of the world coordinates for each axis. The strings should follow the `IVOA VOUnit standard <http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit specification document, units that do not follow this standard are still allowed, but just not recommended). """ @abc.abstractmethod def pixel_to_world_values(self, *pixel_arrays): """ Convert pixel coordinates to world coordinates. This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as input, and pixel coordinates should be zero-based. Returns `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are assumed to be 0 at the center of the first pixel in each dimension. If a pixel is in a region where the WCS is not defined, NaN can be returned. The coordinates should be specified in the ``(x, y)`` order, where for an image, ``x`` is the horizontal coordinate and ``y`` is the vertical coordinate. If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this method returns a single scalar or array, otherwise a tuple of scalars or arrays is returned. """ def array_index_to_world_values(self, *index_arrays): """ Convert array indices to world coordinates. This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that the indices should be given in ``(i, j)`` order, where for an image ``i`` is the row and ``j`` is the column (i.e. the opposite order to `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this method returns a single scalar or array, otherwise a tuple of scalars or arrays is returned. """ return self.pixel_to_world_values(*index_arrays[::-1]) @abc.abstractmethod def world_to_pixel_values(self, *world_arrays): """ Convert world coordinates to pixel coordinates. This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel coordinates are assumed to be 0 at the center of the first pixel in each dimension. If a world coordinate does not have a matching pixel coordinate, NaN can be returned. The coordinates should be returned in the ``(x, y)`` order, where for an image, ``x`` is the horizontal coordinate and ``y`` is the vertical coordinate. If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this method returns a single scalar or array, otherwise a tuple of scalars or arrays is returned. """ def world_to_array_index_values(self, *world_arrays): """ Convert world coordinates to array indices. This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that the indices should be returned in ``(i, j)`` order, where for an image ``i`` is the row and ``j`` is the column (i.e. the opposite order to `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be returned as rounded integers. If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this method returns a single scalar or array, otherwise a tuple of scalars or arrays is returned. """ pixel_arrays = self.world_to_pixel_values(*world_arrays) if self.pixel_n_dim == 1: pixel_arrays = (pixel_arrays,) else: pixel_arrays = pixel_arrays[::-1] array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays) return array_indices[0] if self.pixel_n_dim == 1 else array_indices @property @abc.abstractmethod def world_axis_object_components(self): """ A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information on constructing high-level objects for the world coordinates. Each element of the list is a tuple with three items: * The first is a name for the world object this world array corresponds to, which *must* match the string names used in `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might appear twice because two world arrays might correspond to a single world object (e.g. a celestial coordinate might have both “ra” and “dec” arrays, which correspond to a single sky coordinate object). * The second element is either a string keyword argument name or a positional index for the corresponding class from `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. * The third argument is a string giving the name of the property to access on the corresponding class from `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in order to get numerical values. Alternatively, this argument can be a callable Python object that taks a high-level coordinate object and returns the numerical values suitable for passing to the low-level WCS transformation methods. See the document `APE 14: A shared Python interface for World Coordinate Systems <https://doi.org/10.5281/zenodo.1188875>`_ for examples. """ @property @abc.abstractmethod def world_axis_object_classes(self): """ A dictionary giving information on constructing high-level objects for the world coordinates. Each key of the dictionary is a string key from `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a tuple with three elements or four elements: * The first element of the tuple must be a class or a string specifying the fully-qualified name of a class, which will specify the actual Python object to be created. * The second element, should be a tuple specifying the positional arguments required to initialize the class. If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the world coordinates should be passed as a positional argument, this this tuple should include `None` placeholders for the world coordinates. * The third tuple element must be a dictionary with the keyword arguments required to initialize the class. * Optionally, for advanced use cases, the fourth element (if present) should be a callable Python object that gets called instead of the class and gets passed the positional and keyword arguments. It should return an object of the type of the first element in the tuple. Note that we don't require the classes to be Astropy classes since there is no guarantee that Astropy will have all the classes to represent all kinds of world coordinates. Furthermore, we recommend that the output be kept as human-readable as possible. The classes used here should have the ability to do conversions by passing an instance as the first argument to the same class with different arguments (e.g. ``Time(Time(...), scale='tai')``). This is a requirement for the implementation of the high-level interface. The second and third tuple elements for each value of this dictionary can in turn contain either instances of classes, or if necessary can contain serialized versions that should take the same form as the main classes described above (a tuple with three elements with the fully qualified name of the class, then the positional arguments and the keyword arguments). For low-level API objects implemented in Python, we recommend simply returning the actual objects (not the serialized form) for optimal performance. Implementations should either always or never use serialized classes to represent Python objects, and should indicate which of these they follow using the `~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute. See the document `APE 14: A shared Python interface for World Coordinate Systems <https://doi.org/10.5281/zenodo.1188875>`_ for examples . """ # The following three properties have default fallback implementations, so # they are not abstract. @property def array_shape(self): """ The shape of the data that the WCS applies to as a tuple of length `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)`` order (the convention for arrays in Python). If the WCS is valid in the context of a dataset with a particular shape, then this property can be used to store the shape of the data. This can be used for example if implementing slicing of WCS objects. This is an optional property, and it should return `None` if a shape is not known or relevant. """ if self.pixel_shape is None: return None else: return self.pixel_shape[::-1] @property def pixel_shape(self): """ The shape of the data that the WCS applies to as a tuple of length `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)`` order (where for an image, ``x`` is the horizontal coordinate and ``y`` is the vertical coordinate). If the WCS is valid in the context of a dataset with a particular shape, then this property can be used to store the shape of the data. This can be used for example if implementing slicing of WCS objects. This is an optional property, and it should return `None` if a shape is not known or relevant. If you are interested in getting a shape that is comparable to that of a Numpy array, you should use `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead. """ return None @property def pixel_bounds(self): """ The bounds (in pixel coordinates) inside which the WCS is defined, as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` ``(min, max)`` tuples. The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]`` order. WCS solutions are sometimes only guaranteed to be accurate within a certain range of pixel values, for example when defining a WCS that includes fitted distortions. This is an optional property, and it should return `None` if a shape is not known or relevant. """ return None @property def pixel_axis_names(self): """ An iterable of strings describing the name for each pixel axis. If an axis does not have a name, an empty string should be returned (this is the default behavior for all axes if a subclass does not override this property). Note that these names are just for display purposes and are not standardized. """ return [''] * self.pixel_n_dim @property def world_axis_names(self): """ An iterable of strings describing the name for each world axis. If an axis does not have a name, an empty string should be returned (this is the default behavior for all axes if a subclass does not override this property). Note that these names are just for display purposes and are not standardized. For standardized axis types, see `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`. """ return [''] * self.world_n_dim @property def axis_correlation_matrix(self): """ Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`, `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that indicates using booleans whether a given world coordinate depends on a given pixel coordinate. This defaults to a matrix where all elements are `True` in the absence of any further information. For completely independent axes, the diagonal would be `True` and all other entries `False`. """ return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool) @property def serialized_classes(self): """ Indicates whether Python objects are given in serialized form or as actual Python objects. """ return False def _as_mpl_axes(self): """ Compatibility hook for Matplotlib and WCSAxes. With this method, one can do:: from astropy.wcs import WCS import matplotlib.pyplot as plt wcs = WCS('filename.fits') fig = plt.figure() ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs) ... and this will generate a plot with the correct WCS coordinates on the axes. """ from astropy.visualization.wcsaxes import WCSAxes return WCSAxes, {'wcs': self} UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt') with open(UCDS_FILE) as f: VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]]) def validate_physical_types(physical_types): """ Validate a list of physical types against the UCD1+ standard """ for physical_type in physical_types: if (physical_type is not None and physical_type not in VALID_UCDS and not physical_type.startswith('custom:')): raise ValueError( f"'{physical_type}' is not a valid IOVA UCD1+ physical type. " "It must be a string specified in the list (http://www.ivoa.net/documents/latest/UCDlist.html) " "or if no matching type exists it can be any string prepended with 'custom:'." )
# Note that we test the main astropy.wcs.WCS class directly rather than testing # the mix-in class on its own (since it's not functional without being used as # a mix-in) import warnings from packaging.version import Version import numpy as np import pytest from numpy.testing import assert_equal, assert_allclose from itertools import product from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose from astropy.units import Quantity from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation from astropy.io.fits import Header from astropy.io.fits.verify import VerifyWarning from astropy.units.core import UnitsWarning from astropy.utils.data import get_pkg_data_filename from astropy.wcs.wcs import WCS, FITSFixedWarning from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES from astropy.wcs._wcs import __version__ as wcsver from astropy.utils import iers from astropy.utils.exceptions import AstropyUserWarning ############################################################################### # The following example is the simplest WCS with default values ############################################################################### WCS_EMPTY = WCS(naxis=1) WCS_EMPTY.wcs.crpix = [1] def test_empty(): wcs = WCS_EMPTY # Low-level API assert wcs.pixel_n_dim == 1 assert wcs.world_n_dim == 1 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == [None] assert wcs.world_axis_units == [''] assert wcs.pixel_axis_names == [''] assert wcs.world_axis_names == [''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('world', 0, 'value')] assert wcs.world_axis_object_classes['world'][0] is Quantity assert wcs.world_axis_object_classes['world'][1] == () assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one assert_allclose(wcs.pixel_to_world_values(29), 29) assert_allclose(wcs.array_index_to_world_values(29), 29) assert np.ndim(wcs.pixel_to_world_values(29)) == 0 assert np.ndim(wcs.array_index_to_world_values(29)) == 0 assert_allclose(wcs.world_to_pixel_values(29), 29) assert_equal(wcs.world_to_array_index_values(29), (29,)) assert np.ndim(wcs.world_to_pixel_values(29)) == 0 assert np.ndim(wcs.world_to_array_index_values(29)) == 0 # High-level API coord = wcs.pixel_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = wcs.array_index_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = 15 * u.one x = wcs.world_to_pixel(coord) assert_allclose(x, 15.) assert np.ndim(x) == 0 i = wcs.world_to_array_index(coord) assert_equal(i, 15) assert np.ndim(i) == 0 ############################################################################### # The following example is a simple 2D image with celestial coordinates ############################################################################### HEADER_SIMPLE_CELESTIAL = """ WCSAXES = 2 CTYPE1 = RA---TAN CTYPE2 = DEC--TAN CRVAL1 = 10 CRVAL2 = 20 CRPIX1 = 30 CRPIX2 = 40 CDELT1 = -0.1 CDELT2 = 0.1 CROTA2 = 0. CUNIT1 = deg CUNIT2 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring( HEADER_SIMPLE_CELESTIAL, sep='\n')) def test_simple_celestial(): wcs = WCS_SIMPLE_CELESTIAL # Low-level API assert wcs.pixel_n_dim == 2 assert wcs.world_n_dim == 2 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec'] assert wcs.world_axis_units == ['deg', 'deg'] assert wcs.pixel_axis_names == ['', ''] assert wcs.world_axis_names == ['', ''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20)) assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20)) assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.)) assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29)) # High-level API coord = wcs.pixel_to_world(29, 39) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = wcs.array_index_to_world(39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = SkyCoord(10, 20, unit='deg', frame='icrs') x, y = wcs.world_to_pixel(coord) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord) assert_equal(i, 39) assert_equal(j, 29) # Check that if the coordinates are passed in a different frame things still # work properly coord_galactic = coord.galactic x, y = wcs.world_to_pixel(coord_galactic) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord_galactic) assert_equal(i, 39) assert_equal(j, 29) # Check that we can actually index the array data = np.arange(3600).reshape((60, 60)) coord = SkyCoord(10, 20, unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], 2369) coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], [2369, 3550]) ############################################################################### # The following example is a spectral cube with axes in an unusual order ############################################################################### HEADER_SPECTRAL_CUBE = """ WCSAXES = 3 CTYPE1 = GLAT-CAR CTYPE2 = FREQ CTYPE3 = GLON-CAR CNAME1 = Latitude CNAME2 = Frequency CNAME3 = Longitude CRVAL1 = 10 CRVAL2 = 20 CRVAL3 = 25 CRPIX1 = 30 CRPIX2 = 40 CRPIX3 = 45 CDELT1 = -0.1 CDELT2 = 0.5 CDELT3 = 0.1 CUNIT1 = deg CUNIT2 = Hz CUNIT3 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n')) def test_spectral_cube(): # Spectral cube with a weird axis ordering wcs = WCS_SPECTRAL_CUBE # Low-level API assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]]) assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25)) assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25)) assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.)) assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29)) # High-level API coord, spec = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord, spec = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord = SkyCoord(25, 10, unit='deg', frame='galactic') spec = 20 * u.Hz with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(coord, spec) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(spec, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(coord, spec) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(spec, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """ PC2_3 = -0.5 PC3_2 = +0.5 """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring( HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n')) def test_spectral_cube_nonaligned(): # Make sure that correlation matrix gets adjusted if there are non-identity # CD matrix terms. wcs = WCS_SPECTRAL_CUBE_NONALIGNED assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]]) # NOTE: we check world_axis_object_components and world_axis_object_classes # again here because in the past this failed when non-aligned axes were # present, so this serves as a regression test. assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} ############################################################################### # The following example is from Rots et al (2015), Table 5. It represents a # cube with two spatial dimensions and one time dimension ############################################################################### HEADER_TIME_CUBE = """ SIMPLE = T / Fits standard BITPIX = -32 / Bits per pixel NAXIS = 3 / Number of axes NAXIS1 = 2048 / Axis length NAXIS2 = 2048 / Axis length NAXIS3 = 11 / Axis length DATE = '2008-10-28T14:39:06' / Date FITS file was generated OBJECT = '2008 TC3' / Name of the object observed EXPTIME = 1.0011 / Integration time MJD-OBS = 54746.02749237 / Obs start DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date TELESCOP= 'VISTA' / ESO Telescope Name INSTRUME= 'VIRCAM' / Instrument used. TIMESYS = 'UTC' / From Observatory Time System TREFPOS = 'TOPOCENT' / Topocentric MJDREF = 54746.0 / Time reference point in MJD RADESYS = 'ICRS' / Not equinoctal CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection CRVAL2 = 2.01824372640628 / RA at ref pixel CUNIT2 = 'deg' / Angles are degrees always CRPIX2 = 2956.6 / Pixel coordinate at ref point CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection CRVAL1 = 14.8289418840003 / Dec at ref pixel CUNIT1 = 'deg' / Angles are degrees always CRPIX1 = -448.2 / Pixel coordinate at ref point CTYPE3 = 'UTC' / linear time (UTC) CRVAL3 = 2375.341 / Relative time of first frame CUNIT3 = 's' / Time unit CRPIX3 = 1.0 / Pixel coordinate at ref point CTYPE3A = 'TT' / alternative linear time (TT) CRVAL3A = 2440.525 / Relative time of first frame CUNIT3A = 's' / Time unit CRPIX3A = 1.0 / Pixel coordinate at ref point OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+ OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+ OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid CRDER3 = 0.0819 / random error in timings from fit CSYER3 = 0.0100 / absolute time error PC1_1 = 0.999999971570892 / WCS transform matrix element PC1_2 = 0.000238449608932 / WCS transform matrix element PC2_1 = -0.000621542859395 / WCS transform matrix element PC2_2 = 0.999999806842218 / WCS transform matrix element CDELT1 = -9.48575432499806E-5 / Axis scale at reference point CDELT2 = 9.48683176211164E-5 / Axis scale at reference point CDELT3 = 13.3629 / Axis scale at reference point PV1_1 = 1. / ZPN linear term PV1_3 = 42. / ZPN cubic term """ with warnings.catch_warnings(): warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning)) WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n')) def test_time_cube(): # Spectral cube with a weird axis ordering wcs = WCS_TIME_CUBE assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape == (11, 2048, 2048) assert wcs.pixel_shape == (2048, 2048, 11) assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time'] assert wcs.world_axis_units == ['deg', 'deg', 's'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['', '', ''] assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]]) components = wcs.world_axis_object_components assert components[0] == ('celestial', 1, 'spherical.lat.degree') assert components[1] == ('celestial', 0, 'spherical.lon.degree') assert components[2][:2] == ('time', 0) assert callable(components[2][2]) assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['time'][0] is Time assert wcs.world_axis_object_classes['time'][1] == () assert wcs.world_axis_object_classes['time'][2] == {} assert callable(wcs.world_axis_object_classes['time'][3]) assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341), (-449.2, 2955.6, 0)) assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341), (0, 2956, -449)) # High-level API coord, time = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) coord, time = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) x, y, z = wcs.world_to_pixel(coord, time) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter x, y, z = wcs.world_to_pixel(time, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) i, j, k = wcs.world_to_array_index(coord, time) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter i, j, k = wcs.world_to_array_index(time, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) ############################################################################### # The following tests are to make sure that Time objects are constructed # correctly for a variety of combinations of WCS keywords ############################################################################### HEADER_TIME_1D = """ SIMPLE = T BITPIX = -32 NAXIS = 1 NAXIS1 = 2048 TIMESYS = 'UTC' TREFPOS = 'TOPOCENT' MJDREF = 50002.6 CTYPE1 = 'UTC' CRVAL1 = 5 CUNIT1 = 's' CRPIX1 = 1.0 CDELT1 = 2 OBSGEO-L= -20 OBSGEO-B= -70 OBSGEO-H= 2530 """ if Version(wcsver) >= Version('7.1'): HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n" @pytest.fixture def header_time_1d(): return Header.fromstring(HEADER_TIME_1D, sep='\n') def assert_time_at(header, position, jd1, jd2, scale, format): with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(position) assert_allclose(time.jd1, jd1, rtol=1e-10) assert_allclose(time.jd2, jd2, rtol=1e-10) assert time.format == format assert time.scale == scale @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local')) def test_time_1d_values(header_time_1d, scale): # Check that Time objects are instantiated with the correct values, # scales, and formats. header_time_1d['CTYPE1'] = scale.upper() assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd') def test_time_1d_values_gps(header_time_1d): # Special treatment for GPS scale header_time_1d['CTYPE1'] = 'GPS' assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd') def test_time_1d_values_deprecated(header_time_1d): # Deprecated (in FITS) scales header_time_1d['CTYPE1'] = 'TDT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') header_time_1d['CTYPE1'] = 'IAT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') header_time_1d['CTYPE1'] = 'GMT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['CTYPE1'] = 'ET' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') def test_time_1d_values_time(header_time_1d): header_time_1d['CTYPE1'] = 'TIME' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['TIMESYS'] = 'TAI' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') @pytest.mark.remote_data @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')) def test_time_1d_roundtrip(header_time_1d, scale): # Check that coordinates round-trip pixel_in = np.arange(3, 10) header_time_1d['CTYPE1'] = scale.upper() with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) # Simple test time = wcs.pixel_to_world(pixel_in) pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) # Test with an intermediate change to a different scale/format time = wcs.pixel_to_world(pixel_in).tdb time.format = 'isot' pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) def test_time_1d_high_precision(header_time_1d): # Case where the MJDREF is split into two for high precision del header_time_1d['MJDREF'] header_time_1d['MJDREFI'] = 52000. header_time_1d['MJDREFF'] = 1e-11 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) # Here we have to use a very small rtol to really test that MJDREFF is # taken into account assert_allclose(time.jd1, 2452001.0, rtol=1e-12) assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13) def test_time_1d_location_geodetic(header_time_1d): # Make sure that the location is correctly returned (geodetic case) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) lon, lat, alt = time.location.to_geodetic() # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976 # ellipsoid (https://github.com/astropy/astropy/issues/9420) assert_allclose(lon.degree, -20) assert_allclose(lat.degree, -70) # assert_allclose(alt.to_value(u.m), 2530.) @pytest.fixture def header_time_1d_no_obs(): header = Header.fromstring(HEADER_TIME_1D, sep='\n') del header['OBSGEO-L'] del header['OBSGEO-B'] del header['OBSGEO-H'] return header def test_time_1d_location_geocentric(header_time_1d_no_obs): # Make sure that the location is correctly returned (geocentric case) header = header_time_1d_no_obs header['OBSGEO-X'] = 10 header['OBSGEO-Y'] = -20 header['OBSGEO-Z'] = 30 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 10) assert_allclose(y.to_value(u.m), -20) assert_allclose(z.to_value(u.m), 30) def test_time_1d_location_geocenter(header_time_1d_no_obs): header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER' wcs = WCS(header_time_1d_no_obs) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 0) assert_allclose(y.to_value(u.m), 0) assert_allclose(z.to_value(u.m), 0) def test_time_1d_location_missing(header_time_1d_no_obs): # Check what happens when no location is present wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_incomplete(header_time_1d_no_obs): # Check what happens when location information is incomplete header_time_1d_no_obs['OBSGEO-L'] = 10. with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_unsupported(header_time_1d_no_obs): # Check what happens when TREFPOS is unsupported header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Observation location 'barycenter' is not " "supported, setting location in Time to None"): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_unsupported_ctype(header_time_1d_no_obs): # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale # Case where the MJDREF is split into two for high precision header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"): time = wcs.pixel_to_world(10) assert isinstance(time, Time) ############################################################################### # Extra corner cases ############################################################################### def test_unrecognized_unit(): # TODO: Determine whether the following behavior is desirable wcs = WCS(naxis=1) with pytest.warns(UnitsWarning): wcs.wcs.cunit = ['bananas // sekonds'] assert wcs.world_axis_units == ['bananas // sekonds'] def test_distortion_correlations(): filename = get_pkg_data_filename('../../tests/data/sip.fits') with pytest.warns(FITSFixedWarning): w = WCS(filename) assert_equal(w.axis_correlation_matrix, True) # Changing PC to an identity matrix doesn't change anything since # distortions are still present. w.wcs.pc = [[1, 0], [0, 1]] assert_equal(w.axis_correlation_matrix, True) # Nor does changing the name of the axes to make them non-celestial w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) # However once we turn off the distortions the matrix changes w.sip = None assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]]) # If we go back to celestial coordinates then the matrix is all True again w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_equal(w.axis_correlation_matrix, True) # Or if we change to X/Y but have a non-identity PC w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]] w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) def test_custom_ctype_to_ucd_mappings(): wcs = WCS(naxis=1) wcs.wcs.ctype = ['SPAM'] assert wcs.world_axis_physical_types == [None] # Check simple behavior with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == [None] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check nesting with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check priority in nesting with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): assert wcs.world_axis_physical_types == ['notfood'] def test_caching_components_and_classes(): # Make sure that when we change the WCS object, the classes and components # are updated (we use a cache internally, so we need to make sure the cache # is invalidated if needed) wcs = WCS_SIMPLE_CELESTIAL.deepcopy() assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg wcs.wcs.radesys = 'FK5' frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2000. wcs.wcs.equinox = 2010 frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2010. def test_sub_wcsapi_attributes(): # Regression test for a bug that caused some of the WCS attributes to be # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub # with lon/lat types). wcs = WCS_SPECTRAL_CUBE.deepcopy() wcs.pixel_shape = (30, 40, 50) wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)] # Use celestial shortcut wcs_sub1 = wcs.celestial assert wcs_sub1.pixel_n_dim == 2 assert wcs_sub1.world_n_dim == 2 assert wcs_sub1.array_shape == (50, 30) assert wcs_sub1.pixel_shape == (30, 50) assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)] assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon'] assert wcs_sub1.world_axis_units == ['deg', 'deg'] assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude'] # Try adding axes wcs_sub2 = wcs.sub([0, 2, 0]) assert wcs_sub2.pixel_n_dim == 3 assert wcs_sub2.world_n_dim == 3 assert wcs_sub2.array_shape == (None, 40, None) assert wcs_sub2.pixel_shape == (None, 40, None) assert wcs_sub2.pixel_bounds == [None, (-2, 18), None] assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None] assert wcs_sub2.world_axis_units == ['', 'Hz', ''] assert wcs_sub2.world_axis_names == ['', 'Frequency', ''] # Use strings wcs_sub3 = wcs.sub(['longitude', 'latitude']) assert wcs_sub3.pixel_n_dim == 2 assert wcs_sub3.world_n_dim == 2 assert wcs_sub3.array_shape == (30, 50) assert wcs_sub3.pixel_shape == (50, 30) assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub3.world_axis_units == ['deg', 'deg'] assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude'] # Now try without CNAME set wcs.wcs.cname = [''] * wcs.wcs.naxis wcs_sub4 = wcs.sub(['longitude', 'latitude']) assert wcs_sub4.pixel_n_dim == 2 assert wcs_sub4.world_n_dim == 2 assert wcs_sub4.array_shape == (30, 50) assert wcs_sub4.pixel_shape == (50, 30) assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub4.world_axis_units == ['deg', 'deg'] assert wcs_sub4.world_axis_names == ['', ''] HEADER_POLARIZED = """ CTYPE1 = 'HPLT-TAN' CTYPE2 = 'HPLN-TAN' CTYPE3 = 'STOKES' """ @pytest.fixture def header_polarized(): return Header.fromstring(HEADER_POLARIZED, sep='\n') def test_phys_type_polarization(header_polarized): w = WCS(header_polarized) assert w.world_axis_physical_types[2] == 'phys.polarization.stokes' ############################################################################### # Spectral transformations ############################################################################### HEADER_SPECTRAL_FRAMES = """ BUNIT = 'Jy/beam' EQUINOX = 2.000000000E+03 CTYPE1 = 'RA---SIN' CRVAL1 = 2.60108333333E+02 CDELT1 = -2.777777845E-04 CRPIX1 = 1.0 CUNIT1 = 'deg' CTYPE2 = 'DEC--SIN' CRVAL2 = -9.75000000000E-01 CDELT2 = 2.777777845E-04 CRPIX2 = 1.0 CUNIT2 = 'deg' CTYPE3 = 'FREQ' CRVAL3 = 1.37835117405E+09 CDELT3 = 9.765625000E+04 CRPIX3 = 32.0 CUNIT3 = 'Hz' SPECSYS = 'TOPOCENT' RESTFRQ = 1.420405752E+09 / [Hz] RADESYS = 'FK5' """ @pytest.fixture def header_spectral_frames(): return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n') def test_spectralcoord_frame(header_spectral_frames): # This is a test to check the numerical results of transformations between # different velocity frames. We simply make sure that the returned # SpectralCoords are in the right frame but don't check the transformations # since this is already done in test_spectralcoord_accuracy # in astropy.coordinates. with iers.conf.set_temp('auto_download', False): obstime = Time(f"2009-05-04T04:44:23", scale='utc') header = header_spectral_frames.copy() header['MJD-OBS'] = obstime.mjd header['CRVAL1'] = 16.33211 header['CRVAL2'] = -34.2221 header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. # We start off with a WCS defined in topocentric frequency with pytest.warns(FITSFixedWarning): wcs_topo = WCS(header) # We convert a single pixel coordinate to world coordinates and keep only # the second high level object - a SpectralCoord: sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1] # We check that this is in topocentric frame with zero velocities assert isinstance(sc_topo, SpectralCoord) assert isinstance(sc_topo.observer, ITRS) assert sc_topo.observer.obstime.isot == obstime.isot assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0) observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS()) assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km for specsys, expected_frame in VELOCITY_FRAMES.items(): header['SPECSYS'] = specsys with pytest.warns(FITSFixedWarning): wcs = WCS(header) sc = wcs.pixel_to_world(0, 0, 31)[1] # Now transform to the expected velocity frame, which should leave # the spectral coordinate unchanged sc_check = sc.with_observer_stationary_relative_to(expected_frame) assert_quantity_allclose(sc.quantity, sc_check.quantity) @pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True])) def test_different_ctypes(header_spectral_frames, ctype3, observer): header = header_spectral_frames.copy() header['CTYPE3'] = ctype3 header['CRVAL3'] = 0.1 header['CDELT3'] = 0.001 if ctype3[0] == 'V': header['CUNIT3'] = 'm s-1' else: header['CUNIT3'] = '' header['RESTWAV'] = 1.420405752E+09 header['MJD-OBS'] = 55197 if observer: header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. header['SPECSYS'] = 'BARYCENT' with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31) assert isinstance(spectralcoord, SpectralCoord) if observer: pix = wcs.world_to_pixel(skycoord, spectralcoord) else: with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): pix = wcs.world_to_pixel(skycoord, spectralcoord) assert_allclose(pix, [0, 0, 31], rtol=1e-6)
pllim/astropy
astropy/wcs/wcsapi/tests/test_fitswcs.py
astropy/wcs/wcsapi/low_level_api.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants for Astropy v4.0. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import warnings from astropy.utils import find_current_module from . import utils as _utils from . import codata2018, iau2015 codata = codata2018 iaudata = iau2015 _utils._set_c(codata, iaudata, find_current_module()) # Overwrite the following for consistency. # https://github.com/astropy/astropy/issues/8920 with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'Constant .*already has a definition') # Solar mass (derived from mass parameter and gravitational constant) M_sun = iau2015.IAU2015( 'M_sun', "Solar mass", iau2015.GM_sun.value / codata2018.G.value, 'kg', ((codata2018.G.uncertainty / codata2018.G.value) * (iau2015.GM_sun.value / codata2018.G.value)), f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si') # Jupiter mass (derived from mass parameter and gravitational constant) M_jup = iau2015.IAU2015( 'M_jup', "Jupiter mass", iau2015.GM_jup.value / codata2018.G.value, 'kg', ((codata2018.G.uncertainty / codata2018.G.value) * (iau2015.GM_jup.value / codata2018.G.value)), f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si') # Earth mass (derived from mass parameter and gravitational constant) M_earth = iau2015.IAU2015( 'M_earth', "Earth mass", iau2015.GM_earth.value / codata2018.G.value, 'kg', ((codata2018.G.uncertainty / codata2018.G.value) * (iau2015.GM_earth.value / codata2018.G.value)), f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si') # Clean up namespace del warnings del find_current_module del _utils
# Note that we test the main astropy.wcs.WCS class directly rather than testing # the mix-in class on its own (since it's not functional without being used as # a mix-in) import warnings from packaging.version import Version import numpy as np import pytest from numpy.testing import assert_equal, assert_allclose from itertools import product from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose from astropy.units import Quantity from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation from astropy.io.fits import Header from astropy.io.fits.verify import VerifyWarning from astropy.units.core import UnitsWarning from astropy.utils.data import get_pkg_data_filename from astropy.wcs.wcs import WCS, FITSFixedWarning from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES from astropy.wcs._wcs import __version__ as wcsver from astropy.utils import iers from astropy.utils.exceptions import AstropyUserWarning ############################################################################### # The following example is the simplest WCS with default values ############################################################################### WCS_EMPTY = WCS(naxis=1) WCS_EMPTY.wcs.crpix = [1] def test_empty(): wcs = WCS_EMPTY # Low-level API assert wcs.pixel_n_dim == 1 assert wcs.world_n_dim == 1 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == [None] assert wcs.world_axis_units == [''] assert wcs.pixel_axis_names == [''] assert wcs.world_axis_names == [''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('world', 0, 'value')] assert wcs.world_axis_object_classes['world'][0] is Quantity assert wcs.world_axis_object_classes['world'][1] == () assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one assert_allclose(wcs.pixel_to_world_values(29), 29) assert_allclose(wcs.array_index_to_world_values(29), 29) assert np.ndim(wcs.pixel_to_world_values(29)) == 0 assert np.ndim(wcs.array_index_to_world_values(29)) == 0 assert_allclose(wcs.world_to_pixel_values(29), 29) assert_equal(wcs.world_to_array_index_values(29), (29,)) assert np.ndim(wcs.world_to_pixel_values(29)) == 0 assert np.ndim(wcs.world_to_array_index_values(29)) == 0 # High-level API coord = wcs.pixel_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = wcs.array_index_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = 15 * u.one x = wcs.world_to_pixel(coord) assert_allclose(x, 15.) assert np.ndim(x) == 0 i = wcs.world_to_array_index(coord) assert_equal(i, 15) assert np.ndim(i) == 0 ############################################################################### # The following example is a simple 2D image with celestial coordinates ############################################################################### HEADER_SIMPLE_CELESTIAL = """ WCSAXES = 2 CTYPE1 = RA---TAN CTYPE2 = DEC--TAN CRVAL1 = 10 CRVAL2 = 20 CRPIX1 = 30 CRPIX2 = 40 CDELT1 = -0.1 CDELT2 = 0.1 CROTA2 = 0. CUNIT1 = deg CUNIT2 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring( HEADER_SIMPLE_CELESTIAL, sep='\n')) def test_simple_celestial(): wcs = WCS_SIMPLE_CELESTIAL # Low-level API assert wcs.pixel_n_dim == 2 assert wcs.world_n_dim == 2 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec'] assert wcs.world_axis_units == ['deg', 'deg'] assert wcs.pixel_axis_names == ['', ''] assert wcs.world_axis_names == ['', ''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20)) assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20)) assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.)) assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29)) # High-level API coord = wcs.pixel_to_world(29, 39) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = wcs.array_index_to_world(39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = SkyCoord(10, 20, unit='deg', frame='icrs') x, y = wcs.world_to_pixel(coord) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord) assert_equal(i, 39) assert_equal(j, 29) # Check that if the coordinates are passed in a different frame things still # work properly coord_galactic = coord.galactic x, y = wcs.world_to_pixel(coord_galactic) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord_galactic) assert_equal(i, 39) assert_equal(j, 29) # Check that we can actually index the array data = np.arange(3600).reshape((60, 60)) coord = SkyCoord(10, 20, unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], 2369) coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], [2369, 3550]) ############################################################################### # The following example is a spectral cube with axes in an unusual order ############################################################################### HEADER_SPECTRAL_CUBE = """ WCSAXES = 3 CTYPE1 = GLAT-CAR CTYPE2 = FREQ CTYPE3 = GLON-CAR CNAME1 = Latitude CNAME2 = Frequency CNAME3 = Longitude CRVAL1 = 10 CRVAL2 = 20 CRVAL3 = 25 CRPIX1 = 30 CRPIX2 = 40 CRPIX3 = 45 CDELT1 = -0.1 CDELT2 = 0.5 CDELT3 = 0.1 CUNIT1 = deg CUNIT2 = Hz CUNIT3 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n')) def test_spectral_cube(): # Spectral cube with a weird axis ordering wcs = WCS_SPECTRAL_CUBE # Low-level API assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]]) assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25)) assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25)) assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.)) assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29)) # High-level API coord, spec = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord, spec = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord = SkyCoord(25, 10, unit='deg', frame='galactic') spec = 20 * u.Hz with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(coord, spec) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(spec, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(coord, spec) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(spec, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """ PC2_3 = -0.5 PC3_2 = +0.5 """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring( HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n')) def test_spectral_cube_nonaligned(): # Make sure that correlation matrix gets adjusted if there are non-identity # CD matrix terms. wcs = WCS_SPECTRAL_CUBE_NONALIGNED assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]]) # NOTE: we check world_axis_object_components and world_axis_object_classes # again here because in the past this failed when non-aligned axes were # present, so this serves as a regression test. assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} ############################################################################### # The following example is from Rots et al (2015), Table 5. It represents a # cube with two spatial dimensions and one time dimension ############################################################################### HEADER_TIME_CUBE = """ SIMPLE = T / Fits standard BITPIX = -32 / Bits per pixel NAXIS = 3 / Number of axes NAXIS1 = 2048 / Axis length NAXIS2 = 2048 / Axis length NAXIS3 = 11 / Axis length DATE = '2008-10-28T14:39:06' / Date FITS file was generated OBJECT = '2008 TC3' / Name of the object observed EXPTIME = 1.0011 / Integration time MJD-OBS = 54746.02749237 / Obs start DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date TELESCOP= 'VISTA' / ESO Telescope Name INSTRUME= 'VIRCAM' / Instrument used. TIMESYS = 'UTC' / From Observatory Time System TREFPOS = 'TOPOCENT' / Topocentric MJDREF = 54746.0 / Time reference point in MJD RADESYS = 'ICRS' / Not equinoctal CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection CRVAL2 = 2.01824372640628 / RA at ref pixel CUNIT2 = 'deg' / Angles are degrees always CRPIX2 = 2956.6 / Pixel coordinate at ref point CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection CRVAL1 = 14.8289418840003 / Dec at ref pixel CUNIT1 = 'deg' / Angles are degrees always CRPIX1 = -448.2 / Pixel coordinate at ref point CTYPE3 = 'UTC' / linear time (UTC) CRVAL3 = 2375.341 / Relative time of first frame CUNIT3 = 's' / Time unit CRPIX3 = 1.0 / Pixel coordinate at ref point CTYPE3A = 'TT' / alternative linear time (TT) CRVAL3A = 2440.525 / Relative time of first frame CUNIT3A = 's' / Time unit CRPIX3A = 1.0 / Pixel coordinate at ref point OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+ OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+ OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid CRDER3 = 0.0819 / random error in timings from fit CSYER3 = 0.0100 / absolute time error PC1_1 = 0.999999971570892 / WCS transform matrix element PC1_2 = 0.000238449608932 / WCS transform matrix element PC2_1 = -0.000621542859395 / WCS transform matrix element PC2_2 = 0.999999806842218 / WCS transform matrix element CDELT1 = -9.48575432499806E-5 / Axis scale at reference point CDELT2 = 9.48683176211164E-5 / Axis scale at reference point CDELT3 = 13.3629 / Axis scale at reference point PV1_1 = 1. / ZPN linear term PV1_3 = 42. / ZPN cubic term """ with warnings.catch_warnings(): warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning)) WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n')) def test_time_cube(): # Spectral cube with a weird axis ordering wcs = WCS_TIME_CUBE assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape == (11, 2048, 2048) assert wcs.pixel_shape == (2048, 2048, 11) assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time'] assert wcs.world_axis_units == ['deg', 'deg', 's'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['', '', ''] assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]]) components = wcs.world_axis_object_components assert components[0] == ('celestial', 1, 'spherical.lat.degree') assert components[1] == ('celestial', 0, 'spherical.lon.degree') assert components[2][:2] == ('time', 0) assert callable(components[2][2]) assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['time'][0] is Time assert wcs.world_axis_object_classes['time'][1] == () assert wcs.world_axis_object_classes['time'][2] == {} assert callable(wcs.world_axis_object_classes['time'][3]) assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341), (-449.2, 2955.6, 0)) assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341), (0, 2956, -449)) # High-level API coord, time = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) coord, time = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) x, y, z = wcs.world_to_pixel(coord, time) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter x, y, z = wcs.world_to_pixel(time, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) i, j, k = wcs.world_to_array_index(coord, time) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter i, j, k = wcs.world_to_array_index(time, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) ############################################################################### # The following tests are to make sure that Time objects are constructed # correctly for a variety of combinations of WCS keywords ############################################################################### HEADER_TIME_1D = """ SIMPLE = T BITPIX = -32 NAXIS = 1 NAXIS1 = 2048 TIMESYS = 'UTC' TREFPOS = 'TOPOCENT' MJDREF = 50002.6 CTYPE1 = 'UTC' CRVAL1 = 5 CUNIT1 = 's' CRPIX1 = 1.0 CDELT1 = 2 OBSGEO-L= -20 OBSGEO-B= -70 OBSGEO-H= 2530 """ if Version(wcsver) >= Version('7.1'): HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n" @pytest.fixture def header_time_1d(): return Header.fromstring(HEADER_TIME_1D, sep='\n') def assert_time_at(header, position, jd1, jd2, scale, format): with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(position) assert_allclose(time.jd1, jd1, rtol=1e-10) assert_allclose(time.jd2, jd2, rtol=1e-10) assert time.format == format assert time.scale == scale @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local')) def test_time_1d_values(header_time_1d, scale): # Check that Time objects are instantiated with the correct values, # scales, and formats. header_time_1d['CTYPE1'] = scale.upper() assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd') def test_time_1d_values_gps(header_time_1d): # Special treatment for GPS scale header_time_1d['CTYPE1'] = 'GPS' assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd') def test_time_1d_values_deprecated(header_time_1d): # Deprecated (in FITS) scales header_time_1d['CTYPE1'] = 'TDT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') header_time_1d['CTYPE1'] = 'IAT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') header_time_1d['CTYPE1'] = 'GMT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['CTYPE1'] = 'ET' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') def test_time_1d_values_time(header_time_1d): header_time_1d['CTYPE1'] = 'TIME' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['TIMESYS'] = 'TAI' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') @pytest.mark.remote_data @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')) def test_time_1d_roundtrip(header_time_1d, scale): # Check that coordinates round-trip pixel_in = np.arange(3, 10) header_time_1d['CTYPE1'] = scale.upper() with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) # Simple test time = wcs.pixel_to_world(pixel_in) pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) # Test with an intermediate change to a different scale/format time = wcs.pixel_to_world(pixel_in).tdb time.format = 'isot' pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) def test_time_1d_high_precision(header_time_1d): # Case where the MJDREF is split into two for high precision del header_time_1d['MJDREF'] header_time_1d['MJDREFI'] = 52000. header_time_1d['MJDREFF'] = 1e-11 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) # Here we have to use a very small rtol to really test that MJDREFF is # taken into account assert_allclose(time.jd1, 2452001.0, rtol=1e-12) assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13) def test_time_1d_location_geodetic(header_time_1d): # Make sure that the location is correctly returned (geodetic case) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) lon, lat, alt = time.location.to_geodetic() # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976 # ellipsoid (https://github.com/astropy/astropy/issues/9420) assert_allclose(lon.degree, -20) assert_allclose(lat.degree, -70) # assert_allclose(alt.to_value(u.m), 2530.) @pytest.fixture def header_time_1d_no_obs(): header = Header.fromstring(HEADER_TIME_1D, sep='\n') del header['OBSGEO-L'] del header['OBSGEO-B'] del header['OBSGEO-H'] return header def test_time_1d_location_geocentric(header_time_1d_no_obs): # Make sure that the location is correctly returned (geocentric case) header = header_time_1d_no_obs header['OBSGEO-X'] = 10 header['OBSGEO-Y'] = -20 header['OBSGEO-Z'] = 30 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 10) assert_allclose(y.to_value(u.m), -20) assert_allclose(z.to_value(u.m), 30) def test_time_1d_location_geocenter(header_time_1d_no_obs): header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER' wcs = WCS(header_time_1d_no_obs) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 0) assert_allclose(y.to_value(u.m), 0) assert_allclose(z.to_value(u.m), 0) def test_time_1d_location_missing(header_time_1d_no_obs): # Check what happens when no location is present wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_incomplete(header_time_1d_no_obs): # Check what happens when location information is incomplete header_time_1d_no_obs['OBSGEO-L'] = 10. with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_unsupported(header_time_1d_no_obs): # Check what happens when TREFPOS is unsupported header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Observation location 'barycenter' is not " "supported, setting location in Time to None"): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_unsupported_ctype(header_time_1d_no_obs): # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale # Case where the MJDREF is split into two for high precision header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"): time = wcs.pixel_to_world(10) assert isinstance(time, Time) ############################################################################### # Extra corner cases ############################################################################### def test_unrecognized_unit(): # TODO: Determine whether the following behavior is desirable wcs = WCS(naxis=1) with pytest.warns(UnitsWarning): wcs.wcs.cunit = ['bananas // sekonds'] assert wcs.world_axis_units == ['bananas // sekonds'] def test_distortion_correlations(): filename = get_pkg_data_filename('../../tests/data/sip.fits') with pytest.warns(FITSFixedWarning): w = WCS(filename) assert_equal(w.axis_correlation_matrix, True) # Changing PC to an identity matrix doesn't change anything since # distortions are still present. w.wcs.pc = [[1, 0], [0, 1]] assert_equal(w.axis_correlation_matrix, True) # Nor does changing the name of the axes to make them non-celestial w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) # However once we turn off the distortions the matrix changes w.sip = None assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]]) # If we go back to celestial coordinates then the matrix is all True again w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_equal(w.axis_correlation_matrix, True) # Or if we change to X/Y but have a non-identity PC w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]] w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) def test_custom_ctype_to_ucd_mappings(): wcs = WCS(naxis=1) wcs.wcs.ctype = ['SPAM'] assert wcs.world_axis_physical_types == [None] # Check simple behavior with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == [None] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check nesting with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check priority in nesting with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): assert wcs.world_axis_physical_types == ['notfood'] def test_caching_components_and_classes(): # Make sure that when we change the WCS object, the classes and components # are updated (we use a cache internally, so we need to make sure the cache # is invalidated if needed) wcs = WCS_SIMPLE_CELESTIAL.deepcopy() assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg wcs.wcs.radesys = 'FK5' frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2000. wcs.wcs.equinox = 2010 frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2010. def test_sub_wcsapi_attributes(): # Regression test for a bug that caused some of the WCS attributes to be # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub # with lon/lat types). wcs = WCS_SPECTRAL_CUBE.deepcopy() wcs.pixel_shape = (30, 40, 50) wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)] # Use celestial shortcut wcs_sub1 = wcs.celestial assert wcs_sub1.pixel_n_dim == 2 assert wcs_sub1.world_n_dim == 2 assert wcs_sub1.array_shape == (50, 30) assert wcs_sub1.pixel_shape == (30, 50) assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)] assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon'] assert wcs_sub1.world_axis_units == ['deg', 'deg'] assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude'] # Try adding axes wcs_sub2 = wcs.sub([0, 2, 0]) assert wcs_sub2.pixel_n_dim == 3 assert wcs_sub2.world_n_dim == 3 assert wcs_sub2.array_shape == (None, 40, None) assert wcs_sub2.pixel_shape == (None, 40, None) assert wcs_sub2.pixel_bounds == [None, (-2, 18), None] assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None] assert wcs_sub2.world_axis_units == ['', 'Hz', ''] assert wcs_sub2.world_axis_names == ['', 'Frequency', ''] # Use strings wcs_sub3 = wcs.sub(['longitude', 'latitude']) assert wcs_sub3.pixel_n_dim == 2 assert wcs_sub3.world_n_dim == 2 assert wcs_sub3.array_shape == (30, 50) assert wcs_sub3.pixel_shape == (50, 30) assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub3.world_axis_units == ['deg', 'deg'] assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude'] # Now try without CNAME set wcs.wcs.cname = [''] * wcs.wcs.naxis wcs_sub4 = wcs.sub(['longitude', 'latitude']) assert wcs_sub4.pixel_n_dim == 2 assert wcs_sub4.world_n_dim == 2 assert wcs_sub4.array_shape == (30, 50) assert wcs_sub4.pixel_shape == (50, 30) assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub4.world_axis_units == ['deg', 'deg'] assert wcs_sub4.world_axis_names == ['', ''] HEADER_POLARIZED = """ CTYPE1 = 'HPLT-TAN' CTYPE2 = 'HPLN-TAN' CTYPE3 = 'STOKES' """ @pytest.fixture def header_polarized(): return Header.fromstring(HEADER_POLARIZED, sep='\n') def test_phys_type_polarization(header_polarized): w = WCS(header_polarized) assert w.world_axis_physical_types[2] == 'phys.polarization.stokes' ############################################################################### # Spectral transformations ############################################################################### HEADER_SPECTRAL_FRAMES = """ BUNIT = 'Jy/beam' EQUINOX = 2.000000000E+03 CTYPE1 = 'RA---SIN' CRVAL1 = 2.60108333333E+02 CDELT1 = -2.777777845E-04 CRPIX1 = 1.0 CUNIT1 = 'deg' CTYPE2 = 'DEC--SIN' CRVAL2 = -9.75000000000E-01 CDELT2 = 2.777777845E-04 CRPIX2 = 1.0 CUNIT2 = 'deg' CTYPE3 = 'FREQ' CRVAL3 = 1.37835117405E+09 CDELT3 = 9.765625000E+04 CRPIX3 = 32.0 CUNIT3 = 'Hz' SPECSYS = 'TOPOCENT' RESTFRQ = 1.420405752E+09 / [Hz] RADESYS = 'FK5' """ @pytest.fixture def header_spectral_frames(): return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n') def test_spectralcoord_frame(header_spectral_frames): # This is a test to check the numerical results of transformations between # different velocity frames. We simply make sure that the returned # SpectralCoords are in the right frame but don't check the transformations # since this is already done in test_spectralcoord_accuracy # in astropy.coordinates. with iers.conf.set_temp('auto_download', False): obstime = Time(f"2009-05-04T04:44:23", scale='utc') header = header_spectral_frames.copy() header['MJD-OBS'] = obstime.mjd header['CRVAL1'] = 16.33211 header['CRVAL2'] = -34.2221 header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. # We start off with a WCS defined in topocentric frequency with pytest.warns(FITSFixedWarning): wcs_topo = WCS(header) # We convert a single pixel coordinate to world coordinates and keep only # the second high level object - a SpectralCoord: sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1] # We check that this is in topocentric frame with zero velocities assert isinstance(sc_topo, SpectralCoord) assert isinstance(sc_topo.observer, ITRS) assert sc_topo.observer.obstime.isot == obstime.isot assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0) observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS()) assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km for specsys, expected_frame in VELOCITY_FRAMES.items(): header['SPECSYS'] = specsys with pytest.warns(FITSFixedWarning): wcs = WCS(header) sc = wcs.pixel_to_world(0, 0, 31)[1] # Now transform to the expected velocity frame, which should leave # the spectral coordinate unchanged sc_check = sc.with_observer_stationary_relative_to(expected_frame) assert_quantity_allclose(sc.quantity, sc_check.quantity) @pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True])) def test_different_ctypes(header_spectral_frames, ctype3, observer): header = header_spectral_frames.copy() header['CTYPE3'] = ctype3 header['CRVAL3'] = 0.1 header['CDELT3'] = 0.001 if ctype3[0] == 'V': header['CUNIT3'] = 'm s-1' else: header['CUNIT3'] = '' header['RESTWAV'] = 1.420405752E+09 header['MJD-OBS'] = 55197 if observer: header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. header['SPECSYS'] = 'BARYCENT' with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31) assert isinstance(spectralcoord, SpectralCoord) if observer: pix = wcs.world_to_pixel(skycoord, spectralcoord) else: with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): pix = wcs.world_to_pixel(skycoord, spectralcoord) assert_allclose(pix, [0, 0, 31], rtol=1e-6)
pllim/astropy
astropy/wcs/wcsapi/tests/test_fitswcs.py
astropy/constants/astropyconst40.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """The ShapedLikeNDArray mixin class and shape-related functions.""" import abc from itertools import zip_longest import numpy as np __all__ = ['NDArrayShapeMethods', 'ShapedLikeNDArray', 'check_broadcast', 'IncompatibleShapeError', 'unbroadcast'] class NDArrayShapeMethods: """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class only provides the shape-changing methods and is meant in particular for `~numpy.ndarray` subclasses that need to keep track of other arrays. For other classes, `~astropy.utils.shapes.ShapedLikeNDArray` is recommended. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). def __getitem__(self, item): return self._apply('__getitem__', item) def copy(self, *args, **kwargs): """Return an instance containing copies of the internal data. Parameters are as for :meth:`~numpy.ndarray.copy`. """ return self._apply('copy', *args, **kwargs) def reshape(self, *args, **kwargs): """Returns an instance containing the same data with a new shape. Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is not always possible to change the shape of an array without copying the data (see :func:`~numpy.reshape` documentation). If you want an error to be raise if the data is copied, you should assign the new shape to the shape attribute (note: this may not be implemented for all classes using ``NDArrayShapeMethods``). """ return self._apply('reshape', *args, **kwargs) def ravel(self, *args, **kwargs): """Return an instance with the array collapsed into one dimension. Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is not always possible to unravel an array without copying the data. If you want an error to be raise if the data is copied, you should should assign shape ``(-1,)`` to the shape attribute. """ return self._apply('ravel', *args, **kwargs) def flatten(self, *args, **kwargs): """Return a copy with the array collapsed into one dimension. Parameters are as for :meth:`~numpy.ndarray.flatten`. """ return self._apply('flatten', *args, **kwargs) def transpose(self, *args, **kwargs): """Return an instance with the data transposed. Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal data are views of the data of the original. """ return self._apply('transpose', *args, **kwargs) @property def T(self): """Return an instance with the data transposed. Parameters are as for :attr:`~numpy.ndarray.T`. All internal data are views of the data of the original. """ if self.ndim < 2: return self else: return self.transpose() def swapaxes(self, *args, **kwargs): """Return an instance with the given axes interchanged. Parameters are as for :meth:`~numpy.ndarray.swapaxes`: ``axis1, axis2``. All internal data are views of the data of the original. """ return self._apply('swapaxes', *args, **kwargs) def diagonal(self, *args, **kwargs): """Return an instance with the specified diagonals. Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal data are views of the data of the original. """ return self._apply('diagonal', *args, **kwargs) def squeeze(self, *args, **kwargs): """Return an instance with single-dimensional shape entries removed Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal data are views of the data of the original. """ return self._apply('squeeze', *args, **kwargs) def take(self, indices, axis=None, out=None, mode='raise'): """Return a new instance formed from the elements at the given indices. Parameters are as for :meth:`~numpy.ndarray.take`, except that, obviously, no output array can be given. """ if out is not None: return NotImplementedError("cannot pass 'out' argument to 'take.") return self._apply('take', indices, axis=axis, mode=mode) class ShapedLikeNDArray(NDArrayShapeMethods, metaclass=abc.ABCMeta): """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class also defines default implementations for ``ndim`` and ``size`` properties, calculating those from the ``shape``. These can be overridden by subclasses if there are faster ways to obtain those numbers. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). @property @abc.abstractmethod def shape(self): """The shape of the underlying data.""" @abc.abstractmethod def _apply(method, *args, **kwargs): """Create a new instance, with ``method`` applied to underlying data. The method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.). It will be applied to the underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`), with the results used to create a new instance. Parameters ---------- method : str Method to be applied to the instance's internal data arrays. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. """ @property def ndim(self): """The number of dimensions of the instance and underlying arrays.""" return len(self.shape) @property def size(self): """The size of the object, as calculated from its shape.""" size = 1 for sh in self.shape: size *= sh return size @property def isscalar(self): return self.shape == () def __len__(self): if self.isscalar: raise TypeError("Scalar {!r} object has no len()" .format(self.__class__.__name__)) return self.shape[0] def __bool__(self): """Any instance should evaluate to True, except when it is empty.""" return self.size > 0 def __getitem__(self, item): try: return self._apply('__getitem__', item) except IndexError: if self.isscalar: raise TypeError('scalar {!r} object is not subscriptable.' .format(self.__class__.__name__)) else: raise def __iter__(self): if self.isscalar: raise TypeError('scalar {!r} object is not iterable.' .format(self.__class__.__name__)) # We cannot just write a generator here, since then the above error # would only be raised once we try to use the iterator, rather than # upon its definition using iter(self). def self_iter(): for idx in range(len(self)): yield self[idx] return self_iter() # Functions that change shape or essentially do indexing. _APPLICABLE_FUNCTIONS = { np.moveaxis, np.rollaxis, np.atleast_1d, np.atleast_2d, np.atleast_3d, np.expand_dims, np.broadcast_to, np.flip, np.fliplr, np.flipud, np.rot90, np.roll, np.delete, } # Functions that themselves defer to a method. Those are all # defined in np.core.fromnumeric, but exclude alen as well as # sort and partition, which make copies before calling the method. _METHOD_FUNCTIONS = {getattr(np, name): {'amax': 'max', 'amin': 'min', 'around': 'round', 'round_': 'round', 'alltrue': 'all', 'sometrue': 'any'}.get(name, name) for name in np.core.fromnumeric.__all__ if name not in ['alen', 'sort', 'partition']} # Add np.copy, which we may as well let defer to our method. _METHOD_FUNCTIONS[np.copy] = 'copy' # Could be made to work with a bit of effort: # np.where, np.compress, np.extract, # np.diag_indices_from, np.triu_indices_from, np.tril_indices_from # np.tile, np.repeat (need .repeat method) # TODO: create a proper implementation. # Furthermore, some arithmetic functions such as np.mean, np.median, # could work for Time, and many more for TimeDelta, so those should # override __array_function__. def __array_function__(self, function, types, args, kwargs): """Wrap numpy functions that make sense.""" if function in self._APPLICABLE_FUNCTIONS: if function is np.broadcast_to: # Ensure that any ndarray subclasses used are # properly propagated. kwargs.setdefault('subok', True) elif (function in {np.atleast_1d, np.atleast_2d, np.atleast_3d} and len(args) > 1): return tuple(function(arg, **kwargs) for arg in args) if self is not args[0]: return NotImplemented return self._apply(function, *args[1:], **kwargs) # For functions that defer to methods, use the corresponding # method/attribute if we have it. Otherwise, fall through. if self is args[0] and function in self._METHOD_FUNCTIONS: method = getattr(self, self._METHOD_FUNCTIONS[function], None) if method is not None: if callable(method): return method(*args[1:], **kwargs) else: # For np.shape, etc., just return the attribute. return method # Fall-back, just pass the arguments on since perhaps the function # works already (see above). return function.__wrapped__(*args, **kwargs) class IncompatibleShapeError(ValueError): def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx): super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx) def check_broadcast(*shapes): """ Determines whether two or more Numpy arrays can be broadcast with each other based on their shape tuple alone. Parameters ---------- *shapes : tuple All shapes to include in the comparison. If only one shape is given it is passed through unmodified. If no shapes are given returns an empty `tuple`. Returns ------- broadcast : `tuple` If all shapes are mutually broadcastable, returns a tuple of the full broadcast shape. """ if len(shapes) == 0: return () elif len(shapes) == 1: return shapes[0] reversed_shapes = (reversed(shape) for shape in shapes) full_shape = [] for dims in zip_longest(*reversed_shapes, fillvalue=1): max_dim = 1 max_dim_idx = None for idx, dim in enumerate(dims): if dim == 1: continue if max_dim == 1: # The first dimension of size greater than 1 max_dim = dim max_dim_idx = idx elif dim != max_dim: raise IncompatibleShapeError( shapes[max_dim_idx], max_dim_idx, shapes[idx], idx) full_shape.append(max_dim) return tuple(full_shape[::-1]) def unbroadcast(array): """ Given an array, return a new array that is the smallest subset of the original array that can be re-broadcasted back to the original array. See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays for more details. """ if array.ndim == 0: return array array = array[tuple((slice(0, 1) if stride == 0 else slice(None)) for stride in array.strides)] # Remove leading ones, which are not needed in numpy broadcasting. first_not_unity = next((i for (i, s) in enumerate(array.shape) if s > 1), array.ndim) return array.reshape(array.shape[first_not_unity:])
# Note that we test the main astropy.wcs.WCS class directly rather than testing # the mix-in class on its own (since it's not functional without being used as # a mix-in) import warnings from packaging.version import Version import numpy as np import pytest from numpy.testing import assert_equal, assert_allclose from itertools import product from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose from astropy.units import Quantity from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation from astropy.io.fits import Header from astropy.io.fits.verify import VerifyWarning from astropy.units.core import UnitsWarning from astropy.utils.data import get_pkg_data_filename from astropy.wcs.wcs import WCS, FITSFixedWarning from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES from astropy.wcs._wcs import __version__ as wcsver from astropy.utils import iers from astropy.utils.exceptions import AstropyUserWarning ############################################################################### # The following example is the simplest WCS with default values ############################################################################### WCS_EMPTY = WCS(naxis=1) WCS_EMPTY.wcs.crpix = [1] def test_empty(): wcs = WCS_EMPTY # Low-level API assert wcs.pixel_n_dim == 1 assert wcs.world_n_dim == 1 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == [None] assert wcs.world_axis_units == [''] assert wcs.pixel_axis_names == [''] assert wcs.world_axis_names == [''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('world', 0, 'value')] assert wcs.world_axis_object_classes['world'][0] is Quantity assert wcs.world_axis_object_classes['world'][1] == () assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one assert_allclose(wcs.pixel_to_world_values(29), 29) assert_allclose(wcs.array_index_to_world_values(29), 29) assert np.ndim(wcs.pixel_to_world_values(29)) == 0 assert np.ndim(wcs.array_index_to_world_values(29)) == 0 assert_allclose(wcs.world_to_pixel_values(29), 29) assert_equal(wcs.world_to_array_index_values(29), (29,)) assert np.ndim(wcs.world_to_pixel_values(29)) == 0 assert np.ndim(wcs.world_to_array_index_values(29)) == 0 # High-level API coord = wcs.pixel_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = wcs.array_index_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = 15 * u.one x = wcs.world_to_pixel(coord) assert_allclose(x, 15.) assert np.ndim(x) == 0 i = wcs.world_to_array_index(coord) assert_equal(i, 15) assert np.ndim(i) == 0 ############################################################################### # The following example is a simple 2D image with celestial coordinates ############################################################################### HEADER_SIMPLE_CELESTIAL = """ WCSAXES = 2 CTYPE1 = RA---TAN CTYPE2 = DEC--TAN CRVAL1 = 10 CRVAL2 = 20 CRPIX1 = 30 CRPIX2 = 40 CDELT1 = -0.1 CDELT2 = 0.1 CROTA2 = 0. CUNIT1 = deg CUNIT2 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring( HEADER_SIMPLE_CELESTIAL, sep='\n')) def test_simple_celestial(): wcs = WCS_SIMPLE_CELESTIAL # Low-level API assert wcs.pixel_n_dim == 2 assert wcs.world_n_dim == 2 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec'] assert wcs.world_axis_units == ['deg', 'deg'] assert wcs.pixel_axis_names == ['', ''] assert wcs.world_axis_names == ['', ''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20)) assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20)) assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.)) assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29)) # High-level API coord = wcs.pixel_to_world(29, 39) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = wcs.array_index_to_world(39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = SkyCoord(10, 20, unit='deg', frame='icrs') x, y = wcs.world_to_pixel(coord) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord) assert_equal(i, 39) assert_equal(j, 29) # Check that if the coordinates are passed in a different frame things still # work properly coord_galactic = coord.galactic x, y = wcs.world_to_pixel(coord_galactic) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord_galactic) assert_equal(i, 39) assert_equal(j, 29) # Check that we can actually index the array data = np.arange(3600).reshape((60, 60)) coord = SkyCoord(10, 20, unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], 2369) coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], [2369, 3550]) ############################################################################### # The following example is a spectral cube with axes in an unusual order ############################################################################### HEADER_SPECTRAL_CUBE = """ WCSAXES = 3 CTYPE1 = GLAT-CAR CTYPE2 = FREQ CTYPE3 = GLON-CAR CNAME1 = Latitude CNAME2 = Frequency CNAME3 = Longitude CRVAL1 = 10 CRVAL2 = 20 CRVAL3 = 25 CRPIX1 = 30 CRPIX2 = 40 CRPIX3 = 45 CDELT1 = -0.1 CDELT2 = 0.5 CDELT3 = 0.1 CUNIT1 = deg CUNIT2 = Hz CUNIT3 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n')) def test_spectral_cube(): # Spectral cube with a weird axis ordering wcs = WCS_SPECTRAL_CUBE # Low-level API assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]]) assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25)) assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25)) assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.)) assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29)) # High-level API coord, spec = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord, spec = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord = SkyCoord(25, 10, unit='deg', frame='galactic') spec = 20 * u.Hz with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(coord, spec) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(spec, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(coord, spec) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(spec, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """ PC2_3 = -0.5 PC3_2 = +0.5 """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring( HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n')) def test_spectral_cube_nonaligned(): # Make sure that correlation matrix gets adjusted if there are non-identity # CD matrix terms. wcs = WCS_SPECTRAL_CUBE_NONALIGNED assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]]) # NOTE: we check world_axis_object_components and world_axis_object_classes # again here because in the past this failed when non-aligned axes were # present, so this serves as a regression test. assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} ############################################################################### # The following example is from Rots et al (2015), Table 5. It represents a # cube with two spatial dimensions and one time dimension ############################################################################### HEADER_TIME_CUBE = """ SIMPLE = T / Fits standard BITPIX = -32 / Bits per pixel NAXIS = 3 / Number of axes NAXIS1 = 2048 / Axis length NAXIS2 = 2048 / Axis length NAXIS3 = 11 / Axis length DATE = '2008-10-28T14:39:06' / Date FITS file was generated OBJECT = '2008 TC3' / Name of the object observed EXPTIME = 1.0011 / Integration time MJD-OBS = 54746.02749237 / Obs start DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date TELESCOP= 'VISTA' / ESO Telescope Name INSTRUME= 'VIRCAM' / Instrument used. TIMESYS = 'UTC' / From Observatory Time System TREFPOS = 'TOPOCENT' / Topocentric MJDREF = 54746.0 / Time reference point in MJD RADESYS = 'ICRS' / Not equinoctal CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection CRVAL2 = 2.01824372640628 / RA at ref pixel CUNIT2 = 'deg' / Angles are degrees always CRPIX2 = 2956.6 / Pixel coordinate at ref point CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection CRVAL1 = 14.8289418840003 / Dec at ref pixel CUNIT1 = 'deg' / Angles are degrees always CRPIX1 = -448.2 / Pixel coordinate at ref point CTYPE3 = 'UTC' / linear time (UTC) CRVAL3 = 2375.341 / Relative time of first frame CUNIT3 = 's' / Time unit CRPIX3 = 1.0 / Pixel coordinate at ref point CTYPE3A = 'TT' / alternative linear time (TT) CRVAL3A = 2440.525 / Relative time of first frame CUNIT3A = 's' / Time unit CRPIX3A = 1.0 / Pixel coordinate at ref point OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+ OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+ OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid CRDER3 = 0.0819 / random error in timings from fit CSYER3 = 0.0100 / absolute time error PC1_1 = 0.999999971570892 / WCS transform matrix element PC1_2 = 0.000238449608932 / WCS transform matrix element PC2_1 = -0.000621542859395 / WCS transform matrix element PC2_2 = 0.999999806842218 / WCS transform matrix element CDELT1 = -9.48575432499806E-5 / Axis scale at reference point CDELT2 = 9.48683176211164E-5 / Axis scale at reference point CDELT3 = 13.3629 / Axis scale at reference point PV1_1 = 1. / ZPN linear term PV1_3 = 42. / ZPN cubic term """ with warnings.catch_warnings(): warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning)) WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n')) def test_time_cube(): # Spectral cube with a weird axis ordering wcs = WCS_TIME_CUBE assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape == (11, 2048, 2048) assert wcs.pixel_shape == (2048, 2048, 11) assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time'] assert wcs.world_axis_units == ['deg', 'deg', 's'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['', '', ''] assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]]) components = wcs.world_axis_object_components assert components[0] == ('celestial', 1, 'spherical.lat.degree') assert components[1] == ('celestial', 0, 'spherical.lon.degree') assert components[2][:2] == ('time', 0) assert callable(components[2][2]) assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['time'][0] is Time assert wcs.world_axis_object_classes['time'][1] == () assert wcs.world_axis_object_classes['time'][2] == {} assert callable(wcs.world_axis_object_classes['time'][3]) assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341), (-449.2, 2955.6, 0)) assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341), (0, 2956, -449)) # High-level API coord, time = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) coord, time = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) x, y, z = wcs.world_to_pixel(coord, time) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter x, y, z = wcs.world_to_pixel(time, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) i, j, k = wcs.world_to_array_index(coord, time) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter i, j, k = wcs.world_to_array_index(time, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) ############################################################################### # The following tests are to make sure that Time objects are constructed # correctly for a variety of combinations of WCS keywords ############################################################################### HEADER_TIME_1D = """ SIMPLE = T BITPIX = -32 NAXIS = 1 NAXIS1 = 2048 TIMESYS = 'UTC' TREFPOS = 'TOPOCENT' MJDREF = 50002.6 CTYPE1 = 'UTC' CRVAL1 = 5 CUNIT1 = 's' CRPIX1 = 1.0 CDELT1 = 2 OBSGEO-L= -20 OBSGEO-B= -70 OBSGEO-H= 2530 """ if Version(wcsver) >= Version('7.1'): HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n" @pytest.fixture def header_time_1d(): return Header.fromstring(HEADER_TIME_1D, sep='\n') def assert_time_at(header, position, jd1, jd2, scale, format): with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(position) assert_allclose(time.jd1, jd1, rtol=1e-10) assert_allclose(time.jd2, jd2, rtol=1e-10) assert time.format == format assert time.scale == scale @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local')) def test_time_1d_values(header_time_1d, scale): # Check that Time objects are instantiated with the correct values, # scales, and formats. header_time_1d['CTYPE1'] = scale.upper() assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd') def test_time_1d_values_gps(header_time_1d): # Special treatment for GPS scale header_time_1d['CTYPE1'] = 'GPS' assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd') def test_time_1d_values_deprecated(header_time_1d): # Deprecated (in FITS) scales header_time_1d['CTYPE1'] = 'TDT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') header_time_1d['CTYPE1'] = 'IAT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') header_time_1d['CTYPE1'] = 'GMT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['CTYPE1'] = 'ET' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') def test_time_1d_values_time(header_time_1d): header_time_1d['CTYPE1'] = 'TIME' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['TIMESYS'] = 'TAI' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') @pytest.mark.remote_data @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')) def test_time_1d_roundtrip(header_time_1d, scale): # Check that coordinates round-trip pixel_in = np.arange(3, 10) header_time_1d['CTYPE1'] = scale.upper() with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) # Simple test time = wcs.pixel_to_world(pixel_in) pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) # Test with an intermediate change to a different scale/format time = wcs.pixel_to_world(pixel_in).tdb time.format = 'isot' pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) def test_time_1d_high_precision(header_time_1d): # Case where the MJDREF is split into two for high precision del header_time_1d['MJDREF'] header_time_1d['MJDREFI'] = 52000. header_time_1d['MJDREFF'] = 1e-11 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) # Here we have to use a very small rtol to really test that MJDREFF is # taken into account assert_allclose(time.jd1, 2452001.0, rtol=1e-12) assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13) def test_time_1d_location_geodetic(header_time_1d): # Make sure that the location is correctly returned (geodetic case) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) lon, lat, alt = time.location.to_geodetic() # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976 # ellipsoid (https://github.com/astropy/astropy/issues/9420) assert_allclose(lon.degree, -20) assert_allclose(lat.degree, -70) # assert_allclose(alt.to_value(u.m), 2530.) @pytest.fixture def header_time_1d_no_obs(): header = Header.fromstring(HEADER_TIME_1D, sep='\n') del header['OBSGEO-L'] del header['OBSGEO-B'] del header['OBSGEO-H'] return header def test_time_1d_location_geocentric(header_time_1d_no_obs): # Make sure that the location is correctly returned (geocentric case) header = header_time_1d_no_obs header['OBSGEO-X'] = 10 header['OBSGEO-Y'] = -20 header['OBSGEO-Z'] = 30 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 10) assert_allclose(y.to_value(u.m), -20) assert_allclose(z.to_value(u.m), 30) def test_time_1d_location_geocenter(header_time_1d_no_obs): header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER' wcs = WCS(header_time_1d_no_obs) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 0) assert_allclose(y.to_value(u.m), 0) assert_allclose(z.to_value(u.m), 0) def test_time_1d_location_missing(header_time_1d_no_obs): # Check what happens when no location is present wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_incomplete(header_time_1d_no_obs): # Check what happens when location information is incomplete header_time_1d_no_obs['OBSGEO-L'] = 10. with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_unsupported(header_time_1d_no_obs): # Check what happens when TREFPOS is unsupported header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Observation location 'barycenter' is not " "supported, setting location in Time to None"): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_unsupported_ctype(header_time_1d_no_obs): # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale # Case where the MJDREF is split into two for high precision header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"): time = wcs.pixel_to_world(10) assert isinstance(time, Time) ############################################################################### # Extra corner cases ############################################################################### def test_unrecognized_unit(): # TODO: Determine whether the following behavior is desirable wcs = WCS(naxis=1) with pytest.warns(UnitsWarning): wcs.wcs.cunit = ['bananas // sekonds'] assert wcs.world_axis_units == ['bananas // sekonds'] def test_distortion_correlations(): filename = get_pkg_data_filename('../../tests/data/sip.fits') with pytest.warns(FITSFixedWarning): w = WCS(filename) assert_equal(w.axis_correlation_matrix, True) # Changing PC to an identity matrix doesn't change anything since # distortions are still present. w.wcs.pc = [[1, 0], [0, 1]] assert_equal(w.axis_correlation_matrix, True) # Nor does changing the name of the axes to make them non-celestial w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) # However once we turn off the distortions the matrix changes w.sip = None assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]]) # If we go back to celestial coordinates then the matrix is all True again w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_equal(w.axis_correlation_matrix, True) # Or if we change to X/Y but have a non-identity PC w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]] w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) def test_custom_ctype_to_ucd_mappings(): wcs = WCS(naxis=1) wcs.wcs.ctype = ['SPAM'] assert wcs.world_axis_physical_types == [None] # Check simple behavior with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == [None] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check nesting with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check priority in nesting with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): assert wcs.world_axis_physical_types == ['notfood'] def test_caching_components_and_classes(): # Make sure that when we change the WCS object, the classes and components # are updated (we use a cache internally, so we need to make sure the cache # is invalidated if needed) wcs = WCS_SIMPLE_CELESTIAL.deepcopy() assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg wcs.wcs.radesys = 'FK5' frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2000. wcs.wcs.equinox = 2010 frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2010. def test_sub_wcsapi_attributes(): # Regression test for a bug that caused some of the WCS attributes to be # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub # with lon/lat types). wcs = WCS_SPECTRAL_CUBE.deepcopy() wcs.pixel_shape = (30, 40, 50) wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)] # Use celestial shortcut wcs_sub1 = wcs.celestial assert wcs_sub1.pixel_n_dim == 2 assert wcs_sub1.world_n_dim == 2 assert wcs_sub1.array_shape == (50, 30) assert wcs_sub1.pixel_shape == (30, 50) assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)] assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon'] assert wcs_sub1.world_axis_units == ['deg', 'deg'] assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude'] # Try adding axes wcs_sub2 = wcs.sub([0, 2, 0]) assert wcs_sub2.pixel_n_dim == 3 assert wcs_sub2.world_n_dim == 3 assert wcs_sub2.array_shape == (None, 40, None) assert wcs_sub2.pixel_shape == (None, 40, None) assert wcs_sub2.pixel_bounds == [None, (-2, 18), None] assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None] assert wcs_sub2.world_axis_units == ['', 'Hz', ''] assert wcs_sub2.world_axis_names == ['', 'Frequency', ''] # Use strings wcs_sub3 = wcs.sub(['longitude', 'latitude']) assert wcs_sub3.pixel_n_dim == 2 assert wcs_sub3.world_n_dim == 2 assert wcs_sub3.array_shape == (30, 50) assert wcs_sub3.pixel_shape == (50, 30) assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub3.world_axis_units == ['deg', 'deg'] assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude'] # Now try without CNAME set wcs.wcs.cname = [''] * wcs.wcs.naxis wcs_sub4 = wcs.sub(['longitude', 'latitude']) assert wcs_sub4.pixel_n_dim == 2 assert wcs_sub4.world_n_dim == 2 assert wcs_sub4.array_shape == (30, 50) assert wcs_sub4.pixel_shape == (50, 30) assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub4.world_axis_units == ['deg', 'deg'] assert wcs_sub4.world_axis_names == ['', ''] HEADER_POLARIZED = """ CTYPE1 = 'HPLT-TAN' CTYPE2 = 'HPLN-TAN' CTYPE3 = 'STOKES' """ @pytest.fixture def header_polarized(): return Header.fromstring(HEADER_POLARIZED, sep='\n') def test_phys_type_polarization(header_polarized): w = WCS(header_polarized) assert w.world_axis_physical_types[2] == 'phys.polarization.stokes' ############################################################################### # Spectral transformations ############################################################################### HEADER_SPECTRAL_FRAMES = """ BUNIT = 'Jy/beam' EQUINOX = 2.000000000E+03 CTYPE1 = 'RA---SIN' CRVAL1 = 2.60108333333E+02 CDELT1 = -2.777777845E-04 CRPIX1 = 1.0 CUNIT1 = 'deg' CTYPE2 = 'DEC--SIN' CRVAL2 = -9.75000000000E-01 CDELT2 = 2.777777845E-04 CRPIX2 = 1.0 CUNIT2 = 'deg' CTYPE3 = 'FREQ' CRVAL3 = 1.37835117405E+09 CDELT3 = 9.765625000E+04 CRPIX3 = 32.0 CUNIT3 = 'Hz' SPECSYS = 'TOPOCENT' RESTFRQ = 1.420405752E+09 / [Hz] RADESYS = 'FK5' """ @pytest.fixture def header_spectral_frames(): return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n') def test_spectralcoord_frame(header_spectral_frames): # This is a test to check the numerical results of transformations between # different velocity frames. We simply make sure that the returned # SpectralCoords are in the right frame but don't check the transformations # since this is already done in test_spectralcoord_accuracy # in astropy.coordinates. with iers.conf.set_temp('auto_download', False): obstime = Time(f"2009-05-04T04:44:23", scale='utc') header = header_spectral_frames.copy() header['MJD-OBS'] = obstime.mjd header['CRVAL1'] = 16.33211 header['CRVAL2'] = -34.2221 header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. # We start off with a WCS defined in topocentric frequency with pytest.warns(FITSFixedWarning): wcs_topo = WCS(header) # We convert a single pixel coordinate to world coordinates and keep only # the second high level object - a SpectralCoord: sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1] # We check that this is in topocentric frame with zero velocities assert isinstance(sc_topo, SpectralCoord) assert isinstance(sc_topo.observer, ITRS) assert sc_topo.observer.obstime.isot == obstime.isot assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0) observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS()) assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km for specsys, expected_frame in VELOCITY_FRAMES.items(): header['SPECSYS'] = specsys with pytest.warns(FITSFixedWarning): wcs = WCS(header) sc = wcs.pixel_to_world(0, 0, 31)[1] # Now transform to the expected velocity frame, which should leave # the spectral coordinate unchanged sc_check = sc.with_observer_stationary_relative_to(expected_frame) assert_quantity_allclose(sc.quantity, sc_check.quantity) @pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True])) def test_different_ctypes(header_spectral_frames, ctype3, observer): header = header_spectral_frames.copy() header['CTYPE3'] = ctype3 header['CRVAL3'] = 0.1 header['CDELT3'] = 0.001 if ctype3[0] == 'V': header['CUNIT3'] = 'm s-1' else: header['CUNIT3'] = '' header['RESTWAV'] = 1.420405752E+09 header['MJD-OBS'] = 55197 if observer: header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. header['SPECSYS'] = 'BARYCENT' with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31) assert isinstance(spectralcoord, SpectralCoord) if observer: pix = wcs.world_to_pixel(skycoord, spectralcoord) else: with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): pix = wcs.world_to_pixel(skycoord, spectralcoord) assert_allclose(pix, [0, 0, 31], rtol=1e-6)
pllim/astropy
astropy/wcs/wcsapi/tests/test_fitswcs.py
astropy/utils/shapes.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This file contains a contains the high-level functions to read a VOTable file. """ # STDLIB import io import os import sys import textwrap import warnings # LOCAL from . import exceptions from . import tree from astropy.utils.xml import iterparser from astropy.utils import data from astropy.utils.decorators import deprecated_renamed_argument from astropy.utils.exceptions import AstropyDeprecationWarning __all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate', 'reset_vo_warnings'] VERIFY_OPTIONS = ['ignore', 'warn', 'exception'] @deprecated_renamed_argument('pedantic', 'verify', pending=True, since='4.0') def parse(source, columns=None, invalid='exception', verify=None, chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None, table_id=None, filename=None, unit_format=None, datatype_mapping=None, _debug_python_based_parser=False): """ Parses a VOTABLE_ xml file (or file-like object), and returns a `~astropy.io.votable.tree.VOTableFile` object. Parameters ---------- source : path-like or file-like Path or file-like object containing a VOTABLE_ xml file. If file, must be readable. columns : sequence of str, optional List of field names to include in the output. The default is to include all fields. invalid : str, optional One of the following values: - 'exception': throw an exception when an invalid value is encountered (default) - 'mask': mask out invalid values verify : {'ignore', 'warn', 'exception'}, optional When ``'exception'``, raise an error when the file violates the spec, otherwise either issue a warning (``'warn'``) or silently continue (``'ignore'``). Warnings may be controlled using the standard Python mechanisms. See the `warnings` module in the Python standard library for more information. When not provided, uses the configuration setting ``astropy.io.votable.verify``, which defaults to 'ignore'. .. versionchanged:: 4.0 ``verify`` replaces the ``pedantic`` argument, which will be deprecated in future. chunk_size : int, optional The number of rows to read before converting to an array. Higher numbers are likely to be faster, but will consume more memory. table_number : int, optional The number of table in the file to read in. If `None`, all tables will be read. If a number, 0 refers to the first table in the file, and only that numbered table will be parsed and read in. Should not be used with ``table_id``. table_id : str, optional The ID of the table in the file to read in. Should not be used with ``table_number``. filename : str, optional A filename, URL or other identifier to use in error messages. If *filename* is None and *source* is a string (i.e. a path), then *source* will be used as a filename for error messages. Therefore, *filename* is only required when source is a file-like object. unit_format : str, astropy.units.format.Base instance or None, optional The unit format to use when parsing unit attributes. If a string, must be the name of a unit formatter. The built-in formats include ``generic``, ``fits``, ``cds``, and ``vounit``. A custom formatter may be provided by passing a `~astropy.units.UnitBase` instance. If `None` (default), the unit format to use will be the one specified by the VOTable specification (which is ``cds`` up to version 1.3 of VOTable, and ``vounit`` in more recent versions of the spec). datatype_mapping : dict, optional A mapping of datatype names (`str`) to valid VOTable datatype names (str). For example, if the file being read contains the datatype "unsignedInt" (an invalid datatype in VOTable), include the mapping ``{"unsignedInt": "long"}``. Returns ------- votable : `~astropy.io.votable.tree.VOTableFile` object See also -------- astropy.io.votable.exceptions : The exceptions this function may raise. """ from . import conf invalid = invalid.lower() if invalid not in ('exception', 'mask'): raise ValueError("accepted values of ``invalid`` are: " "``'exception'`` or ``'mask'``.") if verify is None: # NOTE: since the pedantic argument isn't fully deprecated yet, we need # to catch the deprecation warning that occurs when accessing the # configuration item, but only if it is for the pedantic option in the # [io.votable] section. with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"Config parameter \'pedantic\' in section \[io.votable\]", AstropyDeprecationWarning) conf_verify_lowercase = conf.verify.lower() # We need to allow verify to be booleans as strings since the # configuration framework doesn't make it easy/possible to have mixed # types. if conf_verify_lowercase in ['false', 'true']: verify = conf_verify_lowercase == 'true' else: verify = conf_verify_lowercase if isinstance(verify, bool): verify = 'exception' if verify else 'warn' elif verify not in VERIFY_OPTIONS: raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}") if datatype_mapping is None: datatype_mapping = {} config = { 'columns': columns, 'invalid': invalid, 'verify': verify, 'chunk_size': chunk_size, 'table_number': table_number, 'filename': filename, 'unit_format': unit_format, 'datatype_mapping': datatype_mapping } if filename is None and isinstance(source, str): config['filename'] = source with iterparser.get_xml_iterator( source, _debug_python_based_parser=_debug_python_based_parser) as iterator: return tree.VOTableFile( config=config, pos=(1, 1)).parse(iterator, config) def parse_single_table(source, **kwargs): """ Parses a VOTABLE_ xml file (or file-like object), reading and returning only the first `~astropy.io.votable.tree.Table` instance. See `parse` for a description of the keyword arguments. Returns ------- votable : `~astropy.io.votable.tree.Table` object """ if kwargs.get('table_number') is None: kwargs['table_number'] = 0 votable = parse(source, **kwargs) return votable.get_first_table() def writeto(table, file, tabledata_format=None): """ Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file. Parameters ---------- table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance. file : str or writable file-like Path or file object to write to tabledata_format : str, optional Override the format of the table(s) data to write. Must be one of ``tabledata`` (text representation), ``binary`` or ``binary2``. By default, use the format that was specified in each ``table`` object as it was created or read in. See :ref:`astropy:astropy:votable-serialization`. """ from astropy.table import Table if isinstance(table, Table): table = tree.VOTableFile.from_table(table) elif not isinstance(table, tree.VOTableFile): raise TypeError( "first argument must be astropy.io.vo.VOTableFile or " "astropy.table.Table instance") table.to_xml(file, tabledata_format=tabledata_format, _debug_python_based_parser=True) def validate(source, output=None, xmllint=False, filename=None): """ Prints a validation report for the given file. Parameters ---------- source : path-like or file-like Path to a VOTABLE_ xml file or `~pathlib.Path` object having Path to a VOTABLE_ xml file. If file-like object, must be readable. output : file-like, optional Where to output the report. Defaults to ``sys.stdout``. If `None`, the output will be returned as a string. Must be writable. xmllint : bool, optional When `True`, also send the file to ``xmllint`` for schema and DTD validation. Requires that ``xmllint`` is installed. The default is `False`. ``source`` must be a file on the local filesystem in order for ``xmllint`` to work. filename : str, optional A filename to use in the error messages. If not provided, one will be automatically determined from ``source``. Returns ------- is_valid : bool or str Returns `True` if no warnings were found. If ``output`` is `None`, the return value will be a string. """ from astropy.utils.console import print_code_line, color_print if output is None: output = sys.stdout return_as_str = False if output is None: output = io.StringIO() lines = [] votable = None reset_vo_warnings() with data.get_readable_fileobj(source, encoding='binary') as fd: content = fd.read() content_buffer = io.BytesIO(content) content_buffer.seek(0) if filename is None: if isinstance(source, str): filename = source elif hasattr(source, 'name'): filename = source.name elif hasattr(source, 'url'): filename = source.url else: filename = "<unknown>" with warnings.catch_warnings(record=True) as warning_lines: warnings.resetwarnings() warnings.simplefilter("always", exceptions.VOWarning, append=True) try: votable = parse(content_buffer, verify='warn', filename=filename) except ValueError as e: lines.append(str(e)) lines = [str(x.message) for x in warning_lines if issubclass(x.category, exceptions.VOWarning)] + lines content_buffer.seek(0) output.write(f"Validation report for {filename}\n\n") if len(lines): xml_lines = iterparser.xml_readlines(content_buffer) for warning in lines: w = exceptions.parse_vowarning(warning) if not w['is_something']: output.write(w['message']) output.write('\n\n') else: line = xml_lines[w['nline'] - 1] warning = w['warning'] if w['is_warning']: color = 'yellow' else: color = 'red' color_print( f"{w['nline']:d}: ", '', warning or 'EXC', color, ': ', '', textwrap.fill( w['message'], initial_indent=' ', subsequent_indent=' ').lstrip(), file=output) print_code_line(line, w['nchar'], file=output) output.write('\n') else: output.write('astropy.io.votable found no violations.\n\n') success = 0 if xmllint and os.path.exists(filename): from . import xmlutil if votable is None: version = "1.1" else: version = votable.version success, stdout, stderr = xmlutil.validate_schema( filename, version) if success != 0: output.write( 'xmllint schema violations:\n\n') output.write(stderr.decode('utf-8')) else: output.write('xmllint passed\n') if return_as_str: return output.getvalue() return len(lines) == 0 and success == 0 def from_table(table, table_id=None): """ Given an `~astropy.table.Table` object, return a `~astropy.io.votable.tree.VOTableFile` file structure containing just that single table. Parameters ---------- table : `~astropy.table.Table` instance table_id : str, optional If not `None`, set the given id on the returned `~astropy.io.votable.tree.Table` instance. Returns ------- votable : `~astropy.io.votable.tree.VOTableFile` instance """ return tree.VOTableFile.from_table(table, table_id=table_id) def is_votable(source): """ Reads the header of a file to determine if it is a VOTable file. Parameters ---------- source : path-like or file-like Path or file object containing a VOTABLE_ xml file. If file, must be readable. Returns ------- is_votable : bool Returns `True` if the given file is a VOTable file. """ try: with iterparser.get_xml_iterator(source) as iterator: for start, tag, d, pos in iterator: if tag != 'xml': return False break for start, tag, d, pos in iterator: if tag != 'VOTABLE': return False break return True except ValueError: return False def reset_vo_warnings(): """ Resets all of the vo warning state so that warnings that have already been emitted will be emitted again. This is used, for example, by `validate` which must emit all warnings each time it is called. """ from . import converters, xmlutil # -----------------------------------------------------------# # This is a special variable used by the Python warnings # # infrastructure to keep track of warnings that have # # already been seen. Since we want to get every single # # warning out of this, we have to delete all of them first. # # -----------------------------------------------------------# for module in (converters, exceptions, tree, xmlutil): if hasattr(module, '__warningregistry__'): del module.__warningregistry__
# Note that we test the main astropy.wcs.WCS class directly rather than testing # the mix-in class on its own (since it's not functional without being used as # a mix-in) import warnings from packaging.version import Version import numpy as np import pytest from numpy.testing import assert_equal, assert_allclose from itertools import product from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose from astropy.units import Quantity from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation from astropy.io.fits import Header from astropy.io.fits.verify import VerifyWarning from astropy.units.core import UnitsWarning from astropy.utils.data import get_pkg_data_filename from astropy.wcs.wcs import WCS, FITSFixedWarning from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES from astropy.wcs._wcs import __version__ as wcsver from astropy.utils import iers from astropy.utils.exceptions import AstropyUserWarning ############################################################################### # The following example is the simplest WCS with default values ############################################################################### WCS_EMPTY = WCS(naxis=1) WCS_EMPTY.wcs.crpix = [1] def test_empty(): wcs = WCS_EMPTY # Low-level API assert wcs.pixel_n_dim == 1 assert wcs.world_n_dim == 1 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == [None] assert wcs.world_axis_units == [''] assert wcs.pixel_axis_names == [''] assert wcs.world_axis_names == [''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('world', 0, 'value')] assert wcs.world_axis_object_classes['world'][0] is Quantity assert wcs.world_axis_object_classes['world'][1] == () assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one assert_allclose(wcs.pixel_to_world_values(29), 29) assert_allclose(wcs.array_index_to_world_values(29), 29) assert np.ndim(wcs.pixel_to_world_values(29)) == 0 assert np.ndim(wcs.array_index_to_world_values(29)) == 0 assert_allclose(wcs.world_to_pixel_values(29), 29) assert_equal(wcs.world_to_array_index_values(29), (29,)) assert np.ndim(wcs.world_to_pixel_values(29)) == 0 assert np.ndim(wcs.world_to_array_index_values(29)) == 0 # High-level API coord = wcs.pixel_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = wcs.array_index_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = 15 * u.one x = wcs.world_to_pixel(coord) assert_allclose(x, 15.) assert np.ndim(x) == 0 i = wcs.world_to_array_index(coord) assert_equal(i, 15) assert np.ndim(i) == 0 ############################################################################### # The following example is a simple 2D image with celestial coordinates ############################################################################### HEADER_SIMPLE_CELESTIAL = """ WCSAXES = 2 CTYPE1 = RA---TAN CTYPE2 = DEC--TAN CRVAL1 = 10 CRVAL2 = 20 CRPIX1 = 30 CRPIX2 = 40 CDELT1 = -0.1 CDELT2 = 0.1 CROTA2 = 0. CUNIT1 = deg CUNIT2 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring( HEADER_SIMPLE_CELESTIAL, sep='\n')) def test_simple_celestial(): wcs = WCS_SIMPLE_CELESTIAL # Low-level API assert wcs.pixel_n_dim == 2 assert wcs.world_n_dim == 2 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec'] assert wcs.world_axis_units == ['deg', 'deg'] assert wcs.pixel_axis_names == ['', ''] assert wcs.world_axis_names == ['', ''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20)) assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20)) assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.)) assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29)) # High-level API coord = wcs.pixel_to_world(29, 39) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = wcs.array_index_to_world(39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = SkyCoord(10, 20, unit='deg', frame='icrs') x, y = wcs.world_to_pixel(coord) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord) assert_equal(i, 39) assert_equal(j, 29) # Check that if the coordinates are passed in a different frame things still # work properly coord_galactic = coord.galactic x, y = wcs.world_to_pixel(coord_galactic) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord_galactic) assert_equal(i, 39) assert_equal(j, 29) # Check that we can actually index the array data = np.arange(3600).reshape((60, 60)) coord = SkyCoord(10, 20, unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], 2369) coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], [2369, 3550]) ############################################################################### # The following example is a spectral cube with axes in an unusual order ############################################################################### HEADER_SPECTRAL_CUBE = """ WCSAXES = 3 CTYPE1 = GLAT-CAR CTYPE2 = FREQ CTYPE3 = GLON-CAR CNAME1 = Latitude CNAME2 = Frequency CNAME3 = Longitude CRVAL1 = 10 CRVAL2 = 20 CRVAL3 = 25 CRPIX1 = 30 CRPIX2 = 40 CRPIX3 = 45 CDELT1 = -0.1 CDELT2 = 0.5 CDELT3 = 0.1 CUNIT1 = deg CUNIT2 = Hz CUNIT3 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n')) def test_spectral_cube(): # Spectral cube with a weird axis ordering wcs = WCS_SPECTRAL_CUBE # Low-level API assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]]) assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25)) assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25)) assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.)) assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29)) # High-level API coord, spec = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord, spec = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord = SkyCoord(25, 10, unit='deg', frame='galactic') spec = 20 * u.Hz with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(coord, spec) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(spec, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(coord, spec) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(spec, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """ PC2_3 = -0.5 PC3_2 = +0.5 """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring( HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n')) def test_spectral_cube_nonaligned(): # Make sure that correlation matrix gets adjusted if there are non-identity # CD matrix terms. wcs = WCS_SPECTRAL_CUBE_NONALIGNED assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]]) # NOTE: we check world_axis_object_components and world_axis_object_classes # again here because in the past this failed when non-aligned axes were # present, so this serves as a regression test. assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} ############################################################################### # The following example is from Rots et al (2015), Table 5. It represents a # cube with two spatial dimensions and one time dimension ############################################################################### HEADER_TIME_CUBE = """ SIMPLE = T / Fits standard BITPIX = -32 / Bits per pixel NAXIS = 3 / Number of axes NAXIS1 = 2048 / Axis length NAXIS2 = 2048 / Axis length NAXIS3 = 11 / Axis length DATE = '2008-10-28T14:39:06' / Date FITS file was generated OBJECT = '2008 TC3' / Name of the object observed EXPTIME = 1.0011 / Integration time MJD-OBS = 54746.02749237 / Obs start DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date TELESCOP= 'VISTA' / ESO Telescope Name INSTRUME= 'VIRCAM' / Instrument used. TIMESYS = 'UTC' / From Observatory Time System TREFPOS = 'TOPOCENT' / Topocentric MJDREF = 54746.0 / Time reference point in MJD RADESYS = 'ICRS' / Not equinoctal CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection CRVAL2 = 2.01824372640628 / RA at ref pixel CUNIT2 = 'deg' / Angles are degrees always CRPIX2 = 2956.6 / Pixel coordinate at ref point CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection CRVAL1 = 14.8289418840003 / Dec at ref pixel CUNIT1 = 'deg' / Angles are degrees always CRPIX1 = -448.2 / Pixel coordinate at ref point CTYPE3 = 'UTC' / linear time (UTC) CRVAL3 = 2375.341 / Relative time of first frame CUNIT3 = 's' / Time unit CRPIX3 = 1.0 / Pixel coordinate at ref point CTYPE3A = 'TT' / alternative linear time (TT) CRVAL3A = 2440.525 / Relative time of first frame CUNIT3A = 's' / Time unit CRPIX3A = 1.0 / Pixel coordinate at ref point OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+ OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+ OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid CRDER3 = 0.0819 / random error in timings from fit CSYER3 = 0.0100 / absolute time error PC1_1 = 0.999999971570892 / WCS transform matrix element PC1_2 = 0.000238449608932 / WCS transform matrix element PC2_1 = -0.000621542859395 / WCS transform matrix element PC2_2 = 0.999999806842218 / WCS transform matrix element CDELT1 = -9.48575432499806E-5 / Axis scale at reference point CDELT2 = 9.48683176211164E-5 / Axis scale at reference point CDELT3 = 13.3629 / Axis scale at reference point PV1_1 = 1. / ZPN linear term PV1_3 = 42. / ZPN cubic term """ with warnings.catch_warnings(): warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning)) WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n')) def test_time_cube(): # Spectral cube with a weird axis ordering wcs = WCS_TIME_CUBE assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape == (11, 2048, 2048) assert wcs.pixel_shape == (2048, 2048, 11) assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time'] assert wcs.world_axis_units == ['deg', 'deg', 's'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['', '', ''] assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]]) components = wcs.world_axis_object_components assert components[0] == ('celestial', 1, 'spherical.lat.degree') assert components[1] == ('celestial', 0, 'spherical.lon.degree') assert components[2][:2] == ('time', 0) assert callable(components[2][2]) assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['time'][0] is Time assert wcs.world_axis_object_classes['time'][1] == () assert wcs.world_axis_object_classes['time'][2] == {} assert callable(wcs.world_axis_object_classes['time'][3]) assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341), (-449.2, 2955.6, 0)) assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341), (0, 2956, -449)) # High-level API coord, time = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) coord, time = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) x, y, z = wcs.world_to_pixel(coord, time) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter x, y, z = wcs.world_to_pixel(time, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) i, j, k = wcs.world_to_array_index(coord, time) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter i, j, k = wcs.world_to_array_index(time, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) ############################################################################### # The following tests are to make sure that Time objects are constructed # correctly for a variety of combinations of WCS keywords ############################################################################### HEADER_TIME_1D = """ SIMPLE = T BITPIX = -32 NAXIS = 1 NAXIS1 = 2048 TIMESYS = 'UTC' TREFPOS = 'TOPOCENT' MJDREF = 50002.6 CTYPE1 = 'UTC' CRVAL1 = 5 CUNIT1 = 's' CRPIX1 = 1.0 CDELT1 = 2 OBSGEO-L= -20 OBSGEO-B= -70 OBSGEO-H= 2530 """ if Version(wcsver) >= Version('7.1'): HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n" @pytest.fixture def header_time_1d(): return Header.fromstring(HEADER_TIME_1D, sep='\n') def assert_time_at(header, position, jd1, jd2, scale, format): with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(position) assert_allclose(time.jd1, jd1, rtol=1e-10) assert_allclose(time.jd2, jd2, rtol=1e-10) assert time.format == format assert time.scale == scale @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local')) def test_time_1d_values(header_time_1d, scale): # Check that Time objects are instantiated with the correct values, # scales, and formats. header_time_1d['CTYPE1'] = scale.upper() assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd') def test_time_1d_values_gps(header_time_1d): # Special treatment for GPS scale header_time_1d['CTYPE1'] = 'GPS' assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd') def test_time_1d_values_deprecated(header_time_1d): # Deprecated (in FITS) scales header_time_1d['CTYPE1'] = 'TDT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') header_time_1d['CTYPE1'] = 'IAT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') header_time_1d['CTYPE1'] = 'GMT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['CTYPE1'] = 'ET' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') def test_time_1d_values_time(header_time_1d): header_time_1d['CTYPE1'] = 'TIME' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['TIMESYS'] = 'TAI' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') @pytest.mark.remote_data @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')) def test_time_1d_roundtrip(header_time_1d, scale): # Check that coordinates round-trip pixel_in = np.arange(3, 10) header_time_1d['CTYPE1'] = scale.upper() with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) # Simple test time = wcs.pixel_to_world(pixel_in) pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) # Test with an intermediate change to a different scale/format time = wcs.pixel_to_world(pixel_in).tdb time.format = 'isot' pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) def test_time_1d_high_precision(header_time_1d): # Case where the MJDREF is split into two for high precision del header_time_1d['MJDREF'] header_time_1d['MJDREFI'] = 52000. header_time_1d['MJDREFF'] = 1e-11 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) # Here we have to use a very small rtol to really test that MJDREFF is # taken into account assert_allclose(time.jd1, 2452001.0, rtol=1e-12) assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13) def test_time_1d_location_geodetic(header_time_1d): # Make sure that the location is correctly returned (geodetic case) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) lon, lat, alt = time.location.to_geodetic() # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976 # ellipsoid (https://github.com/astropy/astropy/issues/9420) assert_allclose(lon.degree, -20) assert_allclose(lat.degree, -70) # assert_allclose(alt.to_value(u.m), 2530.) @pytest.fixture def header_time_1d_no_obs(): header = Header.fromstring(HEADER_TIME_1D, sep='\n') del header['OBSGEO-L'] del header['OBSGEO-B'] del header['OBSGEO-H'] return header def test_time_1d_location_geocentric(header_time_1d_no_obs): # Make sure that the location is correctly returned (geocentric case) header = header_time_1d_no_obs header['OBSGEO-X'] = 10 header['OBSGEO-Y'] = -20 header['OBSGEO-Z'] = 30 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 10) assert_allclose(y.to_value(u.m), -20) assert_allclose(z.to_value(u.m), 30) def test_time_1d_location_geocenter(header_time_1d_no_obs): header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER' wcs = WCS(header_time_1d_no_obs) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 0) assert_allclose(y.to_value(u.m), 0) assert_allclose(z.to_value(u.m), 0) def test_time_1d_location_missing(header_time_1d_no_obs): # Check what happens when no location is present wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_incomplete(header_time_1d_no_obs): # Check what happens when location information is incomplete header_time_1d_no_obs['OBSGEO-L'] = 10. with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_unsupported(header_time_1d_no_obs): # Check what happens when TREFPOS is unsupported header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Observation location 'barycenter' is not " "supported, setting location in Time to None"): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_unsupported_ctype(header_time_1d_no_obs): # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale # Case where the MJDREF is split into two for high precision header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"): time = wcs.pixel_to_world(10) assert isinstance(time, Time) ############################################################################### # Extra corner cases ############################################################################### def test_unrecognized_unit(): # TODO: Determine whether the following behavior is desirable wcs = WCS(naxis=1) with pytest.warns(UnitsWarning): wcs.wcs.cunit = ['bananas // sekonds'] assert wcs.world_axis_units == ['bananas // sekonds'] def test_distortion_correlations(): filename = get_pkg_data_filename('../../tests/data/sip.fits') with pytest.warns(FITSFixedWarning): w = WCS(filename) assert_equal(w.axis_correlation_matrix, True) # Changing PC to an identity matrix doesn't change anything since # distortions are still present. w.wcs.pc = [[1, 0], [0, 1]] assert_equal(w.axis_correlation_matrix, True) # Nor does changing the name of the axes to make them non-celestial w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) # However once we turn off the distortions the matrix changes w.sip = None assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]]) # If we go back to celestial coordinates then the matrix is all True again w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_equal(w.axis_correlation_matrix, True) # Or if we change to X/Y but have a non-identity PC w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]] w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) def test_custom_ctype_to_ucd_mappings(): wcs = WCS(naxis=1) wcs.wcs.ctype = ['SPAM'] assert wcs.world_axis_physical_types == [None] # Check simple behavior with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == [None] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check nesting with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check priority in nesting with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): assert wcs.world_axis_physical_types == ['notfood'] def test_caching_components_and_classes(): # Make sure that when we change the WCS object, the classes and components # are updated (we use a cache internally, so we need to make sure the cache # is invalidated if needed) wcs = WCS_SIMPLE_CELESTIAL.deepcopy() assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg wcs.wcs.radesys = 'FK5' frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2000. wcs.wcs.equinox = 2010 frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2010. def test_sub_wcsapi_attributes(): # Regression test for a bug that caused some of the WCS attributes to be # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub # with lon/lat types). wcs = WCS_SPECTRAL_CUBE.deepcopy() wcs.pixel_shape = (30, 40, 50) wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)] # Use celestial shortcut wcs_sub1 = wcs.celestial assert wcs_sub1.pixel_n_dim == 2 assert wcs_sub1.world_n_dim == 2 assert wcs_sub1.array_shape == (50, 30) assert wcs_sub1.pixel_shape == (30, 50) assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)] assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon'] assert wcs_sub1.world_axis_units == ['deg', 'deg'] assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude'] # Try adding axes wcs_sub2 = wcs.sub([0, 2, 0]) assert wcs_sub2.pixel_n_dim == 3 assert wcs_sub2.world_n_dim == 3 assert wcs_sub2.array_shape == (None, 40, None) assert wcs_sub2.pixel_shape == (None, 40, None) assert wcs_sub2.pixel_bounds == [None, (-2, 18), None] assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None] assert wcs_sub2.world_axis_units == ['', 'Hz', ''] assert wcs_sub2.world_axis_names == ['', 'Frequency', ''] # Use strings wcs_sub3 = wcs.sub(['longitude', 'latitude']) assert wcs_sub3.pixel_n_dim == 2 assert wcs_sub3.world_n_dim == 2 assert wcs_sub3.array_shape == (30, 50) assert wcs_sub3.pixel_shape == (50, 30) assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub3.world_axis_units == ['deg', 'deg'] assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude'] # Now try without CNAME set wcs.wcs.cname = [''] * wcs.wcs.naxis wcs_sub4 = wcs.sub(['longitude', 'latitude']) assert wcs_sub4.pixel_n_dim == 2 assert wcs_sub4.world_n_dim == 2 assert wcs_sub4.array_shape == (30, 50) assert wcs_sub4.pixel_shape == (50, 30) assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub4.world_axis_units == ['deg', 'deg'] assert wcs_sub4.world_axis_names == ['', ''] HEADER_POLARIZED = """ CTYPE1 = 'HPLT-TAN' CTYPE2 = 'HPLN-TAN' CTYPE3 = 'STOKES' """ @pytest.fixture def header_polarized(): return Header.fromstring(HEADER_POLARIZED, sep='\n') def test_phys_type_polarization(header_polarized): w = WCS(header_polarized) assert w.world_axis_physical_types[2] == 'phys.polarization.stokes' ############################################################################### # Spectral transformations ############################################################################### HEADER_SPECTRAL_FRAMES = """ BUNIT = 'Jy/beam' EQUINOX = 2.000000000E+03 CTYPE1 = 'RA---SIN' CRVAL1 = 2.60108333333E+02 CDELT1 = -2.777777845E-04 CRPIX1 = 1.0 CUNIT1 = 'deg' CTYPE2 = 'DEC--SIN' CRVAL2 = -9.75000000000E-01 CDELT2 = 2.777777845E-04 CRPIX2 = 1.0 CUNIT2 = 'deg' CTYPE3 = 'FREQ' CRVAL3 = 1.37835117405E+09 CDELT3 = 9.765625000E+04 CRPIX3 = 32.0 CUNIT3 = 'Hz' SPECSYS = 'TOPOCENT' RESTFRQ = 1.420405752E+09 / [Hz] RADESYS = 'FK5' """ @pytest.fixture def header_spectral_frames(): return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n') def test_spectralcoord_frame(header_spectral_frames): # This is a test to check the numerical results of transformations between # different velocity frames. We simply make sure that the returned # SpectralCoords are in the right frame but don't check the transformations # since this is already done in test_spectralcoord_accuracy # in astropy.coordinates. with iers.conf.set_temp('auto_download', False): obstime = Time(f"2009-05-04T04:44:23", scale='utc') header = header_spectral_frames.copy() header['MJD-OBS'] = obstime.mjd header['CRVAL1'] = 16.33211 header['CRVAL2'] = -34.2221 header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. # We start off with a WCS defined in topocentric frequency with pytest.warns(FITSFixedWarning): wcs_topo = WCS(header) # We convert a single pixel coordinate to world coordinates and keep only # the second high level object - a SpectralCoord: sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1] # We check that this is in topocentric frame with zero velocities assert isinstance(sc_topo, SpectralCoord) assert isinstance(sc_topo.observer, ITRS) assert sc_topo.observer.obstime.isot == obstime.isot assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0) observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS()) assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km for specsys, expected_frame in VELOCITY_FRAMES.items(): header['SPECSYS'] = specsys with pytest.warns(FITSFixedWarning): wcs = WCS(header) sc = wcs.pixel_to_world(0, 0, 31)[1] # Now transform to the expected velocity frame, which should leave # the spectral coordinate unchanged sc_check = sc.with_observer_stationary_relative_to(expected_frame) assert_quantity_allclose(sc.quantity, sc_check.quantity) @pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True])) def test_different_ctypes(header_spectral_frames, ctype3, observer): header = header_spectral_frames.copy() header['CTYPE3'] = ctype3 header['CRVAL3'] = 0.1 header['CDELT3'] = 0.001 if ctype3[0] == 'V': header['CUNIT3'] = 'm s-1' else: header['CUNIT3'] = '' header['RESTWAV'] = 1.420405752E+09 header['MJD-OBS'] = 55197 if observer: header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. header['SPECSYS'] = 'BARYCENT' with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31) assert isinstance(spectralcoord, SpectralCoord) if observer: pix = wcs.world_to_pixel(skycoord, spectralcoord) else: with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): pix = wcs.world_to_pixel(skycoord, spectralcoord) assert_allclose(pix, [0, 0, 31], rtol=1e-6)
pllim/astropy
astropy/wcs/wcsapi/tests/test_fitswcs.py
astropy/io/votable/table.py
import numbers from collections import defaultdict import numpy as np from astropy.utils import isiterable from astropy.utils.decorators import lazyproperty from ..low_level_api import BaseLowLevelWCS from .base import BaseWCSWrapper __all__ = ['sanitize_slices', 'SlicedLowLevelWCS'] def sanitize_slices(slices, ndim): """ Given a slice as input sanitise it to an easier to parse format.format This function returns a list ``ndim`` long containing slice objects (or ints). """ if not isinstance(slices, (tuple, list)): # We just have a single int slices = (slices,) if len(slices) > ndim: raise ValueError( f"The dimensionality of the specified slice {slices} can not be greater " f"than the dimensionality ({ndim}) of the wcs.") if any((isiterable(s) for s in slices)): raise IndexError("This slice is invalid, only integer or range slices are supported.") slices = list(slices) if Ellipsis in slices: if slices.count(Ellipsis) > 1: raise IndexError("an index can only have a single ellipsis ('...')") # Replace the Ellipsis with the correct number of slice(None)s e_ind = slices.index(Ellipsis) slices.remove(Ellipsis) n_e = ndim - len(slices) for i in range(n_e): ind = e_ind + i slices.insert(ind, slice(None)) for i in range(ndim): if i < len(slices): slc = slices[i] if isinstance(slc, slice): if slc.step and slc.step != 1: raise IndexError("Slicing WCS with a step is not supported.") elif not isinstance(slc, numbers.Integral): raise IndexError("Only integer or range slices are accepted.") else: slices.append(slice(None)) return slices def combine_slices(slice1, slice2): """ Given two slices that can be applied to a 1-d array, find the resulting slice that corresponds to the combination of both slices. We assume that slice2 can be an integer, but slice1 cannot. """ if isinstance(slice1, slice) and slice1.step is not None: raise ValueError('Only slices with steps of 1 are supported') if isinstance(slice2, slice) and slice2.step is not None: raise ValueError('Only slices with steps of 1 are supported') if isinstance(slice2, numbers.Integral): if slice1.start is None: return slice2 else: return slice2 + slice1.start if slice1.start is None: if slice1.stop is None: return slice2 else: if slice2.stop is None: return slice(slice2.start, slice1.stop) else: return slice(slice2.start, min(slice1.stop, slice2.stop)) else: if slice2.start is None: start = slice1.start else: start = slice1.start + slice2.start if slice2.stop is None: stop = slice1.stop else: if slice1.start is None: stop = slice2.stop else: stop = slice2.stop + slice1.start if slice1.stop is not None: stop = min(slice1.stop, stop) return slice(start, stop) class SlicedLowLevelWCS(BaseWCSWrapper): """ A Low Level WCS wrapper which applies an array slice to a WCS. This class does not modify the underlying WCS object and can therefore drop coupled dimensions as it stores which pixel and world dimensions have been sliced out (or modified) in the underlying WCS and returns the modified results on all the Low Level WCS methods. Parameters ---------- wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS` The WCS to slice. slices : `slice` or `tuple` or `int` A valid array slice to apply to the WCS. """ def __init__(self, wcs, slices): slices = sanitize_slices(slices, wcs.pixel_n_dim) if isinstance(wcs, SlicedLowLevelWCS): # Here we combine the current slices with the previous slices # to avoid ending up with many nested WCSes self._wcs = wcs._wcs slices_original = wcs._slices_array.copy() for ipixel in range(wcs.pixel_n_dim): ipixel_orig = wcs._wcs.pixel_n_dim - 1 - wcs._pixel_keep[ipixel] ipixel_new = wcs.pixel_n_dim - 1 - ipixel slices_original[ipixel_orig] = combine_slices(slices_original[ipixel_orig], slices[ipixel_new]) self._slices_array = slices_original else: self._wcs = wcs self._slices_array = slices self._slices_pixel = self._slices_array[::-1] # figure out which pixel dimensions have been kept, then use axis correlation # matrix to figure out which world dims are kept self._pixel_keep = np.nonzero([not isinstance(self._slices_pixel[ip], numbers.Integral) for ip in range(self._wcs.pixel_n_dim)])[0] # axis_correlation_matrix[world, pixel] self._world_keep = np.nonzero( self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1))[0] if len(self._pixel_keep) == 0 or len(self._world_keep) == 0: raise ValueError("Cannot slice WCS: the resulting WCS should have " "at least one pixel and one world dimension.") @lazyproperty def dropped_world_dimensions(self): """ Information describing the dropped world dimensions. """ world_coords = self._pixel_to_world_values_all(*[0]*len(self._pixel_keep)) dropped_info = defaultdict(list) for i in range(self._wcs.world_n_dim): if i in self._world_keep: continue if "world_axis_object_classes" not in dropped_info: dropped_info["world_axis_object_classes"] = dict() wao_classes = self._wcs.world_axis_object_classes wao_components = self._wcs.world_axis_object_components dropped_info["value"].append(world_coords[i]) dropped_info["world_axis_names"].append(self._wcs.world_axis_names[i]) dropped_info["world_axis_physical_types"].append(self._wcs.world_axis_physical_types[i]) dropped_info["world_axis_units"].append(self._wcs.world_axis_units[i]) dropped_info["world_axis_object_components"].append(wao_components[i]) dropped_info["world_axis_object_classes"].update(dict( filter( lambda x: x[0] == wao_components[i][0], wao_classes.items() ) )) dropped_info["serialized_classes"] = self.serialized_classes return dict(dropped_info) @property def pixel_n_dim(self): return len(self._pixel_keep) @property def world_n_dim(self): return len(self._world_keep) @property def world_axis_physical_types(self): return [self._wcs.world_axis_physical_types[i] for i in self._world_keep] @property def world_axis_units(self): return [self._wcs.world_axis_units[i] for i in self._world_keep] @property def pixel_axis_names(self): return [self._wcs.pixel_axis_names[i] for i in self._pixel_keep] @property def world_axis_names(self): return [self._wcs.world_axis_names[i] for i in self._world_keep] def _pixel_to_world_values_all(self, *pixel_arrays): pixel_arrays = tuple(map(np.asanyarray, pixel_arrays)) pixel_arrays_new = [] ipix_curr = -1 for ipix in range(self._wcs.pixel_n_dim): if isinstance(self._slices_pixel[ipix], int): pixel_arrays_new.append(self._slices_pixel[ipix]) else: ipix_curr += 1 if self._slices_pixel[ipix].start is not None: pixel_arrays_new.append(pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start) else: pixel_arrays_new.append(pixel_arrays[ipix_curr]) pixel_arrays_new = np.broadcast_arrays(*pixel_arrays_new) return self._wcs.pixel_to_world_values(*pixel_arrays_new) def pixel_to_world_values(self, *pixel_arrays): world_arrays = self._pixel_to_world_values_all(*pixel_arrays) # Detect the case of a length 0 array if isinstance(world_arrays, np.ndarray) and not world_arrays.shape: return world_arrays if self._wcs.world_n_dim > 1: # Select the dimensions of the original WCS we are keeping. world_arrays = [world_arrays[iw] for iw in self._world_keep] # If there is only one world dimension (after slicing) we shouldn't return a tuple. if self.world_n_dim == 1: world_arrays = world_arrays[0] return world_arrays def world_to_pixel_values(self, *world_arrays): world_arrays = tuple(map(np.asanyarray, world_arrays)) world_arrays_new = [] iworld_curr = -1 for iworld in range(self._wcs.world_n_dim): if iworld in self._world_keep: iworld_curr += 1 world_arrays_new.append(world_arrays[iworld_curr]) else: world_arrays_new.append(1.) world_arrays_new = np.broadcast_arrays(*world_arrays_new) pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new)) for ipixel in range(self._wcs.pixel_n_dim): if isinstance(self._slices_pixel[ipixel], slice) and self._slices_pixel[ipixel].start is not None: pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start # Detect the case of a length 0 array if isinstance(pixel_arrays, np.ndarray) and not pixel_arrays.shape: return pixel_arrays pixel = tuple(pixel_arrays[ip] for ip in self._pixel_keep) if self.pixel_n_dim == 1 and self._wcs.pixel_n_dim > 1: pixel = pixel[0] return pixel @property def world_axis_object_components(self): return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep] @property def world_axis_object_classes(self): keys_keep = [item[0] for item in self.world_axis_object_components] return dict([item for item in self._wcs.world_axis_object_classes.items() if item[0] in keys_keep]) @property def array_shape(self): if self._wcs.array_shape: return np.broadcast_to(0, self._wcs.array_shape)[tuple(self._slices_array)].shape @property def pixel_shape(self): if self.array_shape: return tuple(self.array_shape[::-1]) @property def pixel_bounds(self): if self._wcs.pixel_bounds is None: return bounds = [] for idx in self._pixel_keep: if self._slices_pixel[idx].start is None: bounds.append(self._wcs.pixel_bounds[idx]) else: imin, imax = self._wcs.pixel_bounds[idx] start = self._slices_pixel[idx].start bounds.append((imin - start, imax - start)) return tuple(bounds) @property def axis_correlation_matrix(self): return self._wcs.axis_correlation_matrix[self._world_keep][:, self._pixel_keep]
# Note that we test the main astropy.wcs.WCS class directly rather than testing # the mix-in class on its own (since it's not functional without being used as # a mix-in) import warnings from packaging.version import Version import numpy as np import pytest from numpy.testing import assert_equal, assert_allclose from itertools import product from astropy import units as u from astropy.time import Time from astropy.tests.helper import assert_quantity_allclose from astropy.units import Quantity from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation from astropy.io.fits import Header from astropy.io.fits.verify import VerifyWarning from astropy.units.core import UnitsWarning from astropy.utils.data import get_pkg_data_filename from astropy.wcs.wcs import WCS, FITSFixedWarning from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES from astropy.wcs._wcs import __version__ as wcsver from astropy.utils import iers from astropy.utils.exceptions import AstropyUserWarning ############################################################################### # The following example is the simplest WCS with default values ############################################################################### WCS_EMPTY = WCS(naxis=1) WCS_EMPTY.wcs.crpix = [1] def test_empty(): wcs = WCS_EMPTY # Low-level API assert wcs.pixel_n_dim == 1 assert wcs.world_n_dim == 1 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == [None] assert wcs.world_axis_units == [''] assert wcs.pixel_axis_names == [''] assert wcs.world_axis_names == [''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('world', 0, 'value')] assert wcs.world_axis_object_classes['world'][0] is Quantity assert wcs.world_axis_object_classes['world'][1] == () assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one assert_allclose(wcs.pixel_to_world_values(29), 29) assert_allclose(wcs.array_index_to_world_values(29), 29) assert np.ndim(wcs.pixel_to_world_values(29)) == 0 assert np.ndim(wcs.array_index_to_world_values(29)) == 0 assert_allclose(wcs.world_to_pixel_values(29), 29) assert_equal(wcs.world_to_array_index_values(29), (29,)) assert np.ndim(wcs.world_to_pixel_values(29)) == 0 assert np.ndim(wcs.world_to_array_index_values(29)) == 0 # High-level API coord = wcs.pixel_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = wcs.array_index_to_world(29) assert_quantity_allclose(coord, 29 * u.one) assert np.ndim(coord) == 0 coord = 15 * u.one x = wcs.world_to_pixel(coord) assert_allclose(x, 15.) assert np.ndim(x) == 0 i = wcs.world_to_array_index(coord) assert_equal(i, 15) assert np.ndim(i) == 0 ############################################################################### # The following example is a simple 2D image with celestial coordinates ############################################################################### HEADER_SIMPLE_CELESTIAL = """ WCSAXES = 2 CTYPE1 = RA---TAN CTYPE2 = DEC--TAN CRVAL1 = 10 CRVAL2 = 20 CRPIX1 = 30 CRPIX2 = 40 CDELT1 = -0.1 CDELT2 = 0.1 CROTA2 = 0. CUNIT1 = deg CUNIT2 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring( HEADER_SIMPLE_CELESTIAL, sep='\n')) def test_simple_celestial(): wcs = WCS_SIMPLE_CELESTIAL # Low-level API assert wcs.pixel_n_dim == 2 assert wcs.world_n_dim == 2 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec'] assert wcs.world_axis_units == ['deg', 'deg'] assert wcs.pixel_axis_names == ['', ''] assert wcs.world_axis_names == ['', ''] assert_equal(wcs.axis_correlation_matrix, True) assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20)) assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20)) assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.)) assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29)) # High-level API coord = wcs.pixel_to_world(29, 39) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = wcs.array_index_to_world(39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 10) assert_allclose(coord.dec.deg, 20) coord = SkyCoord(10, 20, unit='deg', frame='icrs') x, y = wcs.world_to_pixel(coord) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord) assert_equal(i, 39) assert_equal(j, 29) # Check that if the coordinates are passed in a different frame things still # work properly coord_galactic = coord.galactic x, y = wcs.world_to_pixel(coord_galactic) assert_allclose(x, 29.) assert_allclose(y, 39.) i, j = wcs.world_to_array_index(coord_galactic) assert_equal(i, 39) assert_equal(j, 29) # Check that we can actually index the array data = np.arange(3600).reshape((60, 60)) coord = SkyCoord(10, 20, unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], 2369) coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs') index = wcs.world_to_array_index(coord) assert_equal(data[index], [2369, 3550]) ############################################################################### # The following example is a spectral cube with axes in an unusual order ############################################################################### HEADER_SPECTRAL_CUBE = """ WCSAXES = 3 CTYPE1 = GLAT-CAR CTYPE2 = FREQ CTYPE3 = GLON-CAR CNAME1 = Latitude CNAME2 = Frequency CNAME3 = Longitude CRVAL1 = 10 CRVAL2 = 20 CRVAL3 = 25 CRPIX1 = 30 CRPIX2 = 40 CRPIX3 = 45 CDELT1 = -0.1 CDELT2 = 0.5 CDELT3 = 0.1 CUNIT1 = deg CUNIT2 = Hz CUNIT3 = deg """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n')) def test_spectral_cube(): # Spectral cube with a weird axis ordering wcs = WCS_SPECTRAL_CUBE # Low-level API assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape is None assert wcs.pixel_shape is None assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]]) assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25)) assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25)) assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.)) assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29)) # High-level API coord, spec = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord, spec = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, Galactic) assert_allclose(coord.l.deg, 25) assert_allclose(coord.b.deg, 10) assert isinstance(spec, SpectralCoord) assert_allclose(spec.to_value(u.Hz), 20) coord = SkyCoord(25, 10, unit='deg', frame='galactic') spec = 20 * u.Hz with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(coord, spec) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): x, y, z = wcs.world_to_pixel(spec, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(coord, spec) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): i, j, k = wcs.world_to_array_index(spec, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """ PC2_3 = -0.5 PC3_2 = +0.5 """ with warnings.catch_warnings(): warnings.simplefilter('ignore', VerifyWarning) WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring( HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n')) def test_spectral_cube_nonaligned(): # Make sure that correlation matrix gets adjusted if there are non-identity # CD matrix terms. wcs = WCS_SPECTRAL_CUBE_NONALIGNED assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon'] assert wcs.world_axis_units == ['deg', 'Hz', 'deg'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude'] assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]]) # NOTE: we check world_axis_object_components and world_axis_object_classes # again here because in the past this failed when non-aligned axes were # present, so this serves as a regression test. assert len(wcs.world_axis_object_components) == 3 assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree') assert wcs.world_axis_object_components[1][:2] == ('spectral', 0) assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree') assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['spectral'][0] is Quantity assert wcs.world_axis_object_classes['spectral'][1] == () assert wcs.world_axis_object_classes['spectral'][2] == {} ############################################################################### # The following example is from Rots et al (2015), Table 5. It represents a # cube with two spatial dimensions and one time dimension ############################################################################### HEADER_TIME_CUBE = """ SIMPLE = T / Fits standard BITPIX = -32 / Bits per pixel NAXIS = 3 / Number of axes NAXIS1 = 2048 / Axis length NAXIS2 = 2048 / Axis length NAXIS3 = 11 / Axis length DATE = '2008-10-28T14:39:06' / Date FITS file was generated OBJECT = '2008 TC3' / Name of the object observed EXPTIME = 1.0011 / Integration time MJD-OBS = 54746.02749237 / Obs start DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date TELESCOP= 'VISTA' / ESO Telescope Name INSTRUME= 'VIRCAM' / Instrument used. TIMESYS = 'UTC' / From Observatory Time System TREFPOS = 'TOPOCENT' / Topocentric MJDREF = 54746.0 / Time reference point in MJD RADESYS = 'ICRS' / Not equinoctal CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection CRVAL2 = 2.01824372640628 / RA at ref pixel CUNIT2 = 'deg' / Angles are degrees always CRPIX2 = 2956.6 / Pixel coordinate at ref point CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection CRVAL1 = 14.8289418840003 / Dec at ref pixel CUNIT1 = 'deg' / Angles are degrees always CRPIX1 = -448.2 / Pixel coordinate at ref point CTYPE3 = 'UTC' / linear time (UTC) CRVAL3 = 2375.341 / Relative time of first frame CUNIT3 = 's' / Time unit CRPIX3 = 1.0 / Pixel coordinate at ref point CTYPE3A = 'TT' / alternative linear time (TT) CRVAL3A = 2440.525 / Relative time of first frame CUNIT3A = 's' / Time unit CRPIX3A = 1.0 / Pixel coordinate at ref point OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+ OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+ OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid CRDER3 = 0.0819 / random error in timings from fit CSYER3 = 0.0100 / absolute time error PC1_1 = 0.999999971570892 / WCS transform matrix element PC1_2 = 0.000238449608932 / WCS transform matrix element PC2_1 = -0.000621542859395 / WCS transform matrix element PC2_2 = 0.999999806842218 / WCS transform matrix element CDELT1 = -9.48575432499806E-5 / Axis scale at reference point CDELT2 = 9.48683176211164E-5 / Axis scale at reference point CDELT3 = 13.3629 / Axis scale at reference point PV1_1 = 1. / ZPN linear term PV1_3 = 42. / ZPN cubic term """ with warnings.catch_warnings(): warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning)) WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n')) def test_time_cube(): # Spectral cube with a weird axis ordering wcs = WCS_TIME_CUBE assert wcs.pixel_n_dim == 3 assert wcs.world_n_dim == 3 assert wcs.array_shape == (11, 2048, 2048) assert wcs.pixel_shape == (2048, 2048, 11) assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time'] assert wcs.world_axis_units == ['deg', 'deg', 's'] assert wcs.pixel_axis_names == ['', '', ''] assert wcs.world_axis_names == ['', '', ''] assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]]) components = wcs.world_axis_object_components assert components[0] == ('celestial', 1, 'spherical.lat.degree') assert components[1] == ('celestial', 0, 'spherical.lon.degree') assert components[2][:2] == ('time', 0) assert callable(components[2][2]) assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg assert wcs.world_axis_object_classes['time'][0] is Time assert wcs.world_axis_object_classes['time'][1] == () assert wcs.world_axis_object_classes['time'][2] == {} assert callable(wcs.world_axis_object_classes['time'][3]) assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2), (14.8289418840003, 2.01824372640628, 2375.341)) assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341), (-449.2, 2955.6, 0)) assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341), (0, 2956, -449)) # High-level API coord, time = wcs.pixel_to_world(29, 39, 44) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) coord, time = wcs.array_index_to_world(44, 39, 29) assert isinstance(coord, SkyCoord) assert isinstance(coord.frame, ICRS) assert_allclose(coord.ra.deg, 1.7323356692202325) assert_allclose(coord.dec.deg, 14.783516054817797) assert isinstance(time, Time) assert_allclose(time.mjd, 54746.03429755324) x, y, z = wcs.world_to_pixel(coord, time) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) # Order of world coordinates shouldn't matter x, y, z = wcs.world_to_pixel(time, coord) assert_allclose(x, 29.) assert_allclose(y, 39.) assert_allclose(z, 44.) i, j, k = wcs.world_to_array_index(coord, time) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) # Order of world coordinates shouldn't matter i, j, k = wcs.world_to_array_index(time, coord) assert_equal(i, 44) assert_equal(j, 39) assert_equal(k, 29) ############################################################################### # The following tests are to make sure that Time objects are constructed # correctly for a variety of combinations of WCS keywords ############################################################################### HEADER_TIME_1D = """ SIMPLE = T BITPIX = -32 NAXIS = 1 NAXIS1 = 2048 TIMESYS = 'UTC' TREFPOS = 'TOPOCENT' MJDREF = 50002.6 CTYPE1 = 'UTC' CRVAL1 = 5 CUNIT1 = 's' CRPIX1 = 1.0 CDELT1 = 2 OBSGEO-L= -20 OBSGEO-B= -70 OBSGEO-H= 2530 """ if Version(wcsver) >= Version('7.1'): HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n" @pytest.fixture def header_time_1d(): return Header.fromstring(HEADER_TIME_1D, sep='\n') def assert_time_at(header, position, jd1, jd2, scale, format): with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(position) assert_allclose(time.jd1, jd1, rtol=1e-10) assert_allclose(time.jd2, jd2, rtol=1e-10) assert time.format == format assert time.scale == scale @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local')) def test_time_1d_values(header_time_1d, scale): # Check that Time objects are instantiated with the correct values, # scales, and formats. header_time_1d['CTYPE1'] = scale.upper() assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd') def test_time_1d_values_gps(header_time_1d): # Special treatment for GPS scale header_time_1d['CTYPE1'] = 'GPS' assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd') def test_time_1d_values_deprecated(header_time_1d): # Deprecated (in FITS) scales header_time_1d['CTYPE1'] = 'TDT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') header_time_1d['CTYPE1'] = 'IAT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') header_time_1d['CTYPE1'] = 'GMT' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['CTYPE1'] = 'ET' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd') def test_time_1d_values_time(header_time_1d): header_time_1d['CTYPE1'] = 'TIME' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd') header_time_1d['TIMESYS'] = 'TAI' assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd') @pytest.mark.remote_data @pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')) def test_time_1d_roundtrip(header_time_1d, scale): # Check that coordinates round-trip pixel_in = np.arange(3, 10) header_time_1d['CTYPE1'] = scale.upper() with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) # Simple test time = wcs.pixel_to_world(pixel_in) pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) # Test with an intermediate change to a different scale/format time = wcs.pixel_to_world(pixel_in).tdb time.format = 'isot' pixel_out = wcs.world_to_pixel(time) assert_allclose(pixel_in, pixel_out) def test_time_1d_high_precision(header_time_1d): # Case where the MJDREF is split into two for high precision del header_time_1d['MJDREF'] header_time_1d['MJDREFI'] = 52000. header_time_1d['MJDREFF'] = 1e-11 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) # Here we have to use a very small rtol to really test that MJDREFF is # taken into account assert_allclose(time.jd1, 2452001.0, rtol=1e-12) assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13) def test_time_1d_location_geodetic(header_time_1d): # Make sure that the location is correctly returned (geodetic case) with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d) time = wcs.pixel_to_world(10) lon, lat, alt = time.location.to_geodetic() # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976 # ellipsoid (https://github.com/astropy/astropy/issues/9420) assert_allclose(lon.degree, -20) assert_allclose(lat.degree, -70) # assert_allclose(alt.to_value(u.m), 2530.) @pytest.fixture def header_time_1d_no_obs(): header = Header.fromstring(HEADER_TIME_1D, sep='\n') del header['OBSGEO-L'] del header['OBSGEO-B'] del header['OBSGEO-H'] return header def test_time_1d_location_geocentric(header_time_1d_no_obs): # Make sure that the location is correctly returned (geocentric case) header = header_time_1d_no_obs header['OBSGEO-X'] = 10 header['OBSGEO-Y'] = -20 header['OBSGEO-Z'] = 30 with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 10) assert_allclose(y.to_value(u.m), -20) assert_allclose(z.to_value(u.m), 30) def test_time_1d_location_geocenter(header_time_1d_no_obs): header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER' wcs = WCS(header_time_1d_no_obs) time = wcs.pixel_to_world(10) x, y, z = time.location.to_geocentric() assert_allclose(x.to_value(u.m), 0) assert_allclose(y.to_value(u.m), 0) assert_allclose(z.to_value(u.m), 0) def test_time_1d_location_missing(header_time_1d_no_obs): # Check what happens when no location is present wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_incomplete(header_time_1d_no_obs): # Check what happens when location information is incomplete header_time_1d_no_obs['OBSGEO-L'] = 10. with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match='Missing or incomplete observer location ' 'information, setting location in Time to None'): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_location_unsupported(header_time_1d_no_obs): # Check what happens when TREFPOS is unsupported header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Observation location 'barycenter' is not " "supported, setting location in Time to None"): time = wcs.pixel_to_world(10) assert time.location is None def test_time_1d_unsupported_ctype(header_time_1d_no_obs): # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale # Case where the MJDREF is split into two for high precision header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)' wcs = WCS(header_time_1d_no_obs) with pytest.warns(UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"): time = wcs.pixel_to_world(10) assert isinstance(time, Time) ############################################################################### # Extra corner cases ############################################################################### def test_unrecognized_unit(): # TODO: Determine whether the following behavior is desirable wcs = WCS(naxis=1) with pytest.warns(UnitsWarning): wcs.wcs.cunit = ['bananas // sekonds'] assert wcs.world_axis_units == ['bananas // sekonds'] def test_distortion_correlations(): filename = get_pkg_data_filename('../../tests/data/sip.fits') with pytest.warns(FITSFixedWarning): w = WCS(filename) assert_equal(w.axis_correlation_matrix, True) # Changing PC to an identity matrix doesn't change anything since # distortions are still present. w.wcs.pc = [[1, 0], [0, 1]] assert_equal(w.axis_correlation_matrix, True) # Nor does changing the name of the axes to make them non-celestial w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) # However once we turn off the distortions the matrix changes w.sip = None assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]]) # If we go back to celestial coordinates then the matrix is all True again w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_equal(w.axis_correlation_matrix, True) # Or if we change to X/Y but have a non-identity PC w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]] w.wcs.ctype = ['X', 'Y'] assert_equal(w.axis_correlation_matrix, True) def test_custom_ctype_to_ucd_mappings(): wcs = WCS(naxis=1) wcs.wcs.ctype = ['SPAM'] assert wcs.world_axis_physical_types == [None] # Check simple behavior with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == [None] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check nesting with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] # Check priority in nesting with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): assert wcs.world_axis_physical_types == ['food.spam'] with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}): with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}): assert wcs.world_axis_physical_types == ['notfood'] def test_caching_components_and_classes(): # Make sure that when we change the WCS object, the classes and components # are updated (we use a cache internally, so we need to make sure the cache # is invalidated if needed) wcs = WCS_SIMPLE_CELESTIAL.deepcopy() assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'), ('celestial', 1, 'spherical.lat.degree')] assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord assert wcs.world_axis_object_classes['celestial'][1] == () assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS) assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg wcs.wcs.radesys = 'FK5' frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2000. wcs.wcs.equinox = 2010 frame = wcs.world_axis_object_classes['celestial'][2]['frame'] assert isinstance(frame, FK5) assert frame.equinox.jyear == 2010. def test_sub_wcsapi_attributes(): # Regression test for a bug that caused some of the WCS attributes to be # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub # with lon/lat types). wcs = WCS_SPECTRAL_CUBE.deepcopy() wcs.pixel_shape = (30, 40, 50) wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)] # Use celestial shortcut wcs_sub1 = wcs.celestial assert wcs_sub1.pixel_n_dim == 2 assert wcs_sub1.world_n_dim == 2 assert wcs_sub1.array_shape == (50, 30) assert wcs_sub1.pixel_shape == (30, 50) assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)] assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon'] assert wcs_sub1.world_axis_units == ['deg', 'deg'] assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude'] # Try adding axes wcs_sub2 = wcs.sub([0, 2, 0]) assert wcs_sub2.pixel_n_dim == 3 assert wcs_sub2.world_n_dim == 3 assert wcs_sub2.array_shape == (None, 40, None) assert wcs_sub2.pixel_shape == (None, 40, None) assert wcs_sub2.pixel_bounds == [None, (-2, 18), None] assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None] assert wcs_sub2.world_axis_units == ['', 'Hz', ''] assert wcs_sub2.world_axis_names == ['', 'Frequency', ''] # Use strings wcs_sub3 = wcs.sub(['longitude', 'latitude']) assert wcs_sub3.pixel_n_dim == 2 assert wcs_sub3.world_n_dim == 2 assert wcs_sub3.array_shape == (30, 50) assert wcs_sub3.pixel_shape == (50, 30) assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub3.world_axis_units == ['deg', 'deg'] assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude'] # Now try without CNAME set wcs.wcs.cname = [''] * wcs.wcs.naxis wcs_sub4 = wcs.sub(['longitude', 'latitude']) assert wcs_sub4.pixel_n_dim == 2 assert wcs_sub4.world_n_dim == 2 assert wcs_sub4.array_shape == (30, 50) assert wcs_sub4.pixel_shape == (50, 30) assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)] assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat'] assert wcs_sub4.world_axis_units == ['deg', 'deg'] assert wcs_sub4.world_axis_names == ['', ''] HEADER_POLARIZED = """ CTYPE1 = 'HPLT-TAN' CTYPE2 = 'HPLN-TAN' CTYPE3 = 'STOKES' """ @pytest.fixture def header_polarized(): return Header.fromstring(HEADER_POLARIZED, sep='\n') def test_phys_type_polarization(header_polarized): w = WCS(header_polarized) assert w.world_axis_physical_types[2] == 'phys.polarization.stokes' ############################################################################### # Spectral transformations ############################################################################### HEADER_SPECTRAL_FRAMES = """ BUNIT = 'Jy/beam' EQUINOX = 2.000000000E+03 CTYPE1 = 'RA---SIN' CRVAL1 = 2.60108333333E+02 CDELT1 = -2.777777845E-04 CRPIX1 = 1.0 CUNIT1 = 'deg' CTYPE2 = 'DEC--SIN' CRVAL2 = -9.75000000000E-01 CDELT2 = 2.777777845E-04 CRPIX2 = 1.0 CUNIT2 = 'deg' CTYPE3 = 'FREQ' CRVAL3 = 1.37835117405E+09 CDELT3 = 9.765625000E+04 CRPIX3 = 32.0 CUNIT3 = 'Hz' SPECSYS = 'TOPOCENT' RESTFRQ = 1.420405752E+09 / [Hz] RADESYS = 'FK5' """ @pytest.fixture def header_spectral_frames(): return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n') def test_spectralcoord_frame(header_spectral_frames): # This is a test to check the numerical results of transformations between # different velocity frames. We simply make sure that the returned # SpectralCoords are in the right frame but don't check the transformations # since this is already done in test_spectralcoord_accuracy # in astropy.coordinates. with iers.conf.set_temp('auto_download', False): obstime = Time(f"2009-05-04T04:44:23", scale='utc') header = header_spectral_frames.copy() header['MJD-OBS'] = obstime.mjd header['CRVAL1'] = 16.33211 header['CRVAL2'] = -34.2221 header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. # We start off with a WCS defined in topocentric frequency with pytest.warns(FITSFixedWarning): wcs_topo = WCS(header) # We convert a single pixel coordinate to world coordinates and keep only # the second high level object - a SpectralCoord: sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1] # We check that this is in topocentric frame with zero velocities assert isinstance(sc_topo, SpectralCoord) assert isinstance(sc_topo.observer, ITRS) assert sc_topo.observer.obstime.isot == obstime.isot assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0) observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS()) assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km for specsys, expected_frame in VELOCITY_FRAMES.items(): header['SPECSYS'] = specsys with pytest.warns(FITSFixedWarning): wcs = WCS(header) sc = wcs.pixel_to_world(0, 0, 31)[1] # Now transform to the expected velocity frame, which should leave # the spectral coordinate unchanged sc_check = sc.with_observer_stationary_relative_to(expected_frame) assert_quantity_allclose(sc.quantity, sc_check.quantity) @pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True])) def test_different_ctypes(header_spectral_frames, ctype3, observer): header = header_spectral_frames.copy() header['CTYPE3'] = ctype3 header['CRVAL3'] = 0.1 header['CDELT3'] = 0.001 if ctype3[0] == 'V': header['CUNIT3'] = 'm s-1' else: header['CUNIT3'] = '' header['RESTWAV'] = 1.420405752E+09 header['MJD-OBS'] = 55197 if observer: header['OBSGEO-L'] = 144.2 header['OBSGEO-B'] = -20.2 header['OBSGEO-H'] = 0. header['SPECSYS'] = 'BARYCENT' with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) wcs = WCS(header) skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31) assert isinstance(spectralcoord, SpectralCoord) if observer: pix = wcs.world_to_pixel(skycoord, spectralcoord) else: with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'): pix = wcs.world_to_pixel(skycoord, spectralcoord) assert_allclose(pix, [0, 0, 31], rtol=1e-6)
pllim/astropy
astropy/wcs/wcsapi/tests/test_fitswcs.py
astropy/wcs/wcsapi/wrappers/sliced_wcs.py
""" Docstrings for generated ufuncs The syntax is designed to look like the function add_newdoc is being called from numpy.lib, but in this file add_newdoc puts the docstrings in a dictionary. This dictionary is used in numpy/core/code_generators/generate_umath.py to generate the docstrings for the ufuncs in numpy.core at the C level when the ufuncs are created at compile time. """ import textwrap docdict = {} def get(name): return docdict.get(name) # common parameter text to all ufuncs subst = { 'PARAMS': textwrap.dedent(""" out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where : array_like, optional This condition is broadcast over the input. At locations where the condition is True, the `out` array will be set to the ufunc result. Elsewhere, the `out` array will retain its original value. Note that if an uninitialized `out` array is created via the default ``out=None``, locations within it where the condition is False will remain uninitialized. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. """).strip(), 'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be " "broadcastable to a common\n shape (which becomes " "the shape of the output)."), 'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.", 'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.", } def add_newdoc(place, name, doc): doc = textwrap.dedent(doc).strip() skip = ( # gufuncs do not use the OUT_SCALAR replacement strings 'matmul', # clip has 3 inputs, which is not handled by this 'clip', ) if name[0] != '_' and name not in skip: if '\nx :' in doc: assert '$OUT_SCALAR_1' in doc, "in {}".format(name) elif '\nx2 :' in doc or '\nx1, x2 :' in doc: assert '$OUT_SCALAR_2' in doc, "in {}".format(name) else: assert False, "Could not detect number of inputs in {}".format(name) for k, v in subst.items(): doc = doc.replace('$' + k, v) docdict['.'.join((place, name))] = doc add_newdoc('numpy.core.umath', 'absolute', """ Calculate the absolute value element-wise. ``np.abs`` is a shorthand for this function. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- absolute : ndarray An ndarray containing the absolute value of each element in `x`. For complex input, ``a + ib``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. $OUT_SCALAR_1 Examples -------- >>> x = np.array([-1.2, 1.2]) >>> np.absolute(x) array([ 1.2, 1.2]) >>> np.absolute(1.2 + 1j) 1.5620499351813308 Plot the function over ``[-10, 10]``: >>> import matplotlib.pyplot as plt >>> x = np.linspace(start=-10, stop=10, num=101) >>> plt.plot(x, np.absolute(x)) >>> plt.show() Plot the function over the complex plane: >>> xx = x + 1j * x[:, np.newaxis] >>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray') >>> plt.show() The `abs` function can be used as a shorthand for ``np.absolute`` on ndarrays. >>> x = np.array([-1.2, 1.2]) >>> abs(x) array([1.2, 1.2]) """) add_newdoc('numpy.core.umath', 'add', """ Add arguments element-wise. Parameters ---------- x1, x2 : array_like The arrays to be added. $BROADCASTABLE_2 $PARAMS Returns ------- add : ndarray or scalar The sum of `x1` and `x2`, element-wise. $OUT_SCALAR_2 Notes ----- Equivalent to `x1` + `x2` in terms of array broadcasting. Examples -------- >>> np.add(1.0, 4.0) 5.0 >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> np.add(x1, x2) array([[ 0., 2., 4.], [ 3., 5., 7.], [ 6., 8., 10.]]) The ``+`` operator can be used as a shorthand for ``np.add`` on ndarrays. >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> x1 + x2 array([[ 0., 2., 4.], [ 3., 5., 7.], [ 6., 8., 10.]]) """) add_newdoc('numpy.core.umath', 'arccos', """ Trigonometric inverse cosine, element-wise. The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``. Parameters ---------- x : array_like `x`-coordinate on the unit circle. For real arguments, the domain is [-1, 1]. $PARAMS Returns ------- angle : ndarray The angle of the ray intersecting the unit circle at the given `x`-coordinate in radians [0, pi]. $OUT_SCALAR_1 See Also -------- cos, arctan, arcsin, emath.arccos Notes ----- `arccos` is a multivalued function: for each `x` there are infinitely many numbers `z` such that ``cos(z) = x``. The convention is to return the angle `z` whose real part lies in `[0, pi]`. For real-valued input data types, `arccos` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arccos` is a complex analytic function that has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from above on the former and from below on the latter. The inverse `cos` is also known as `acos` or cos^-1. References ---------- M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ Examples -------- We expect the arccos of 1 to be 0, and of -1 to be pi: >>> np.arccos([1, -1]) array([ 0. , 3.14159265]) Plot arccos: >>> import matplotlib.pyplot as plt >>> x = np.linspace(-1, 1, num=100) >>> plt.plot(x, np.arccos(x)) >>> plt.axis('tight') >>> plt.show() """) add_newdoc('numpy.core.umath', 'arccosh', """ Inverse hyperbolic cosine, element-wise. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- arccosh : ndarray Array of the same shape as `x`. $OUT_SCALAR_1 See Also -------- cosh, arcsinh, sinh, arctanh, tanh Notes ----- `arccosh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `cosh(z) = x`. The convention is to return the `z` whose imaginary part lies in ``[-pi, pi]`` and the real part in ``[0, inf]``. For real-valued input data types, `arccosh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arccosh` is a complex analytical function that has a branch cut `[-inf, 1]` and is continuous from above on it. References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Inverse hyperbolic function", https://en.wikipedia.org/wiki/Arccosh Examples -------- >>> np.arccosh([np.e, 10.0]) array([ 1.65745445, 2.99322285]) >>> np.arccosh(1) 0.0 """) add_newdoc('numpy.core.umath', 'arcsin', """ Inverse sine, element-wise. Parameters ---------- x : array_like `y`-coordinate on the unit circle. $PARAMS Returns ------- angle : ndarray The inverse sine of each element in `x`, in radians and in the closed interval ``[-pi/2, pi/2]``. $OUT_SCALAR_1 See Also -------- sin, cos, arccos, tan, arctan, arctan2, emath.arcsin Notes ----- `arcsin` is a multivalued function: for each `x` there are infinitely many numbers `z` such that :math:`sin(z) = x`. The convention is to return the angle `z` whose real part lies in [-pi/2, pi/2]. For real-valued input data types, *arcsin* always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arcsin` is a complex analytic function that has, by convention, the branch cuts [-inf, -1] and [1, inf] and is continuous from above on the former and from below on the latter. The inverse sine is also known as `asin` or sin^{-1}. References ---------- Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 79ff. http://www.math.sfu.ca/~cbm/aands/ Examples -------- >>> np.arcsin(1) # pi/2 1.5707963267948966 >>> np.arcsin(-1) # -pi/2 -1.5707963267948966 >>> np.arcsin(0) 0.0 """) add_newdoc('numpy.core.umath', 'arcsinh', """ Inverse hyperbolic sine element-wise. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- out : ndarray or scalar Array of the same shape as `x`. $OUT_SCALAR_1 Notes ----- `arcsinh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `sinh(z) = x`. The convention is to return the `z` whose imaginary part lies in `[-pi/2, pi/2]`. For real-valued input data types, `arcsinh` always returns real output. For each value that cannot be expressed as a real number or infinity, it returns ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arccos` is a complex analytical function that has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from the right on the former and from the left on the latter. The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``. References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Inverse hyperbolic function", https://en.wikipedia.org/wiki/Arcsinh Examples -------- >>> np.arcsinh(np.array([np.e, 10.0])) array([ 1.72538256, 2.99822295]) """) add_newdoc('numpy.core.umath', 'arctan', """ Trigonometric inverse tangent, element-wise. The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``. Parameters ---------- x : array_like $PARAMS Returns ------- out : ndarray or scalar Out has the same shape as `x`. Its real part is in ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``). $OUT_SCALAR_1 See Also -------- arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`) and the positive `x`-axis. angle : Argument of complex values. Notes ----- `arctan` is a multi-valued function: for each `x` there are infinitely many numbers `z` such that tan(`z`) = `x`. The convention is to return the angle `z` whose real part lies in [-pi/2, pi/2]. For real-valued input data types, `arctan` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arctan` is a complex analytic function that has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous from the left on the former and from the right on the latter. The inverse tangent is also known as `atan` or tan^{-1}. References ---------- Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ Examples -------- We expect the arctan of 0 to be 0, and of 1 to be pi/4: >>> np.arctan([0, 1]) array([ 0. , 0.78539816]) >>> np.pi/4 0.78539816339744828 Plot arctan: >>> import matplotlib.pyplot as plt >>> x = np.linspace(-10, 10) >>> plt.plot(x, np.arctan(x)) >>> plt.axis('tight') >>> plt.show() """) add_newdoc('numpy.core.umath', 'arctan2', """ Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is the signed angle in radians between the ray ending at the origin and passing through the point (1,0), and the ray ending at the origin and passing through the point (`x2`, `x1`). (Note the role reversal: the "`y`-coordinate" is the first function parameter, the "`x`-coordinate" is the second.) By IEEE convention, this function is defined for `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see Notes for specific values). This function is not defined for complex-valued arguments; for the so-called argument of complex values, use `angle`. Parameters ---------- x1 : array_like, real-valued `y`-coordinates. x2 : array_like, real-valued `x`-coordinates. $BROADCASTABLE_2 $PARAMS Returns ------- angle : ndarray Array of angles in radians, in the range ``[-pi, pi]``. $OUT_SCALAR_2 See Also -------- arctan, tan, angle Notes ----- *arctan2* is identical to the `atan2` function of the underlying C library. The following special values are defined in the C standard: [1]_ ====== ====== ================ `x1` `x2` `arctan2(x1,x2)` ====== ====== ================ +/- 0 +0 +/- 0 +/- 0 -0 +/- pi > 0 +/-inf +0 / +pi < 0 +/-inf -0 / -pi +/-inf +inf +/- (pi/4) +/-inf -inf +/- (3*pi/4) ====== ====== ================ Note that +0 and -0 are distinct floating point numbers, as are +inf and -inf. References ---------- .. [1] ISO/IEC standard 9899:1999, "Programming language C." Examples -------- Consider four points in different quadrants: >>> x = np.array([-1, +1, +1, -1]) >>> y = np.array([-1, -1, +1, +1]) >>> np.arctan2(y, x) * 180 / np.pi array([-135., -45., 45., 135.]) Note the order of the parameters. `arctan2` is defined also when `x2` = 0 and at several other special points, obtaining values in the range ``[-pi, pi]``: >>> np.arctan2([1., -1.], [0., 0.]) array([ 1.57079633, -1.57079633]) >>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf]) array([ 0. , 3.14159265, 0.78539816]) """) add_newdoc('numpy.core.umath', '_arg', """ DO NOT USE, ONLY FOR TESTING """) add_newdoc('numpy.core.umath', 'arctanh', """ Inverse hyperbolic tangent element-wise. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- out : ndarray or scalar Array of the same shape as `x`. $OUT_SCALAR_1 See Also -------- emath.arctanh Notes ----- `arctanh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that ``tanh(z) = x``. The convention is to return the `z` whose imaginary part lies in `[-pi/2, pi/2]`. For real-valued input data types, `arctanh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `arctanh` is a complex analytical function that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from above on the former and from below on the latter. The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``. References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Inverse hyperbolic function", https://en.wikipedia.org/wiki/Arctanh Examples -------- >>> np.arctanh([0, -0.5]) array([ 0. , -0.54930614]) """) add_newdoc('numpy.core.umath', 'bitwise_and', """ Compute the bit-wise AND of two arrays element-wise. Computes the bit-wise AND of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``&``. Parameters ---------- x1, x2 : array_like Only integer and boolean types are handled. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Result. $OUT_SCALAR_2 See Also -------- logical_and bitwise_or bitwise_xor binary_repr : Return the binary representation of the input number as a string. Examples -------- The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise AND of 13 and 17 is therefore ``000000001``, or 1: >>> np.bitwise_and(13, 17) 1 >>> np.bitwise_and(14, 13) 12 >>> np.binary_repr(12) '1100' >>> np.bitwise_and([14,3], 13) array([12, 1]) >>> np.bitwise_and([11,7], [4,25]) array([0, 1]) >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) array([ 2, 4, 16]) >>> np.bitwise_and([True, True], [False, True]) array([False, True]) The ``&`` operator can be used as a shorthand for ``np.bitwise_and`` on ndarrays. >>> x1 = np.array([2, 5, 255]) >>> x2 = np.array([3, 14, 16]) >>> x1 & x2 array([ 2, 4, 16]) """) add_newdoc('numpy.core.umath', 'bitwise_or', """ Compute the bit-wise OR of two arrays element-wise. Computes the bit-wise OR of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``|``. Parameters ---------- x1, x2 : array_like Only integer and boolean types are handled. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Result. $OUT_SCALAR_2 See Also -------- logical_or bitwise_and bitwise_xor binary_repr : Return the binary representation of the input number as a string. Examples -------- The number 13 has the binaray representation ``00001101``. Likewise, 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is then ``000111011``, or 29: >>> np.bitwise_or(13, 16) 29 >>> np.binary_repr(29) '11101' >>> np.bitwise_or(32, 2) 34 >>> np.bitwise_or([33, 4], 1) array([33, 5]) >>> np.bitwise_or([33, 4], [1, 2]) array([33, 6]) >>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4])) array([ 6, 5, 255]) >>> np.array([2, 5, 255]) | np.array([4, 4, 4]) array([ 6, 5, 255]) >>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32), ... np.array([4, 4, 4, 2147483647], dtype=np.int32)) array([ 6, 5, 255, 2147483647]) >>> np.bitwise_or([True, True], [False, True]) array([ True, True]) The ``|`` operator can be used as a shorthand for ``np.bitwise_or`` on ndarrays. >>> x1 = np.array([2, 5, 255]) >>> x2 = np.array([4, 4, 4]) >>> x1 | x2 array([ 6, 5, 255]) """) add_newdoc('numpy.core.umath', 'bitwise_xor', """ Compute the bit-wise XOR of two arrays element-wise. Computes the bit-wise XOR of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``^``. Parameters ---------- x1, x2 : array_like Only integer and boolean types are handled. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Result. $OUT_SCALAR_2 See Also -------- logical_xor bitwise_and bitwise_or binary_repr : Return the binary representation of the input number as a string. Examples -------- The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise XOR of 13 and 17 is therefore ``00011100``, or 28: >>> np.bitwise_xor(13, 17) 28 >>> np.binary_repr(28) '11100' >>> np.bitwise_xor(31, 5) 26 >>> np.bitwise_xor([31,3], 5) array([26, 6]) >>> np.bitwise_xor([31,3], [5,6]) array([26, 5]) >>> np.bitwise_xor([True, True], [False, True]) array([ True, False]) The ``^`` operator can be used as a shorthand for ``np.bitwise_xor`` on ndarrays. >>> x1 = np.array([True, True]) >>> x2 = np.array([False, True]) >>> x1 ^ x2 array([ True, False]) """) add_newdoc('numpy.core.umath', 'ceil', """ Return the ceiling of the input, element-wise. The ceil of the scalar `x` is the smallest integer `i`, such that ``i >= x``. It is often denoted as :math:`\\lceil x \\rceil`. Parameters ---------- x : array_like Input data. $PARAMS Returns ------- y : ndarray or scalar The ceiling of each element in `x`, with `float` dtype. $OUT_SCALAR_1 See Also -------- floor, trunc, rint, fix Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.ceil(a) array([-1., -1., -0., 1., 2., 2., 2.]) """) add_newdoc('numpy.core.umath', 'trunc', """ Return the truncated value of the input, element-wise. The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero than `x` is. In short, the fractional part of the signed number `x` is discarded. Parameters ---------- x : array_like Input data. $PARAMS Returns ------- y : ndarray or scalar The truncated value of each element in `x`. $OUT_SCALAR_1 See Also -------- ceil, floor, rint, fix Notes ----- .. versionadded:: 1.3.0 Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.trunc(a) array([-1., -1., -0., 0., 1., 1., 2.]) """) add_newdoc('numpy.core.umath', 'conjugate', """ Return the complex conjugate, element-wise. The complex conjugate of a complex number is obtained by changing the sign of its imaginary part. Parameters ---------- x : array_like Input value. $PARAMS Returns ------- y : ndarray The complex conjugate of `x`, with same dtype as `y`. $OUT_SCALAR_1 Notes ----- `conj` is an alias for `conjugate`: >>> np.conj is np.conjugate True Examples -------- >>> np.conjugate(1+2j) (1-2j) >>> x = np.eye(2) + 1j * np.eye(2) >>> np.conjugate(x) array([[ 1.-1.j, 0.-0.j], [ 0.-0.j, 1.-1.j]]) """) add_newdoc('numpy.core.umath', 'cos', """ Cosine element-wise. Parameters ---------- x : array_like Input array in radians. $PARAMS Returns ------- y : ndarray The corresponding cosine values. $OUT_SCALAR_1 Notes ----- If `out` is provided, the function writes the result into it, and returns a reference to `out`. (See Examples) References ---------- M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. New York, NY: Dover, 1972. Examples -------- >>> np.cos(np.array([0, np.pi/2, np.pi])) array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter >>> out1 = np.array([0], dtype='d') >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True >>> >>> # Example of ValueError due to provision of shape mis-matched `out` >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) add_newdoc('numpy.core.umath', 'cosh', """ Hyperbolic cosine, element-wise. Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- out : ndarray or scalar Output array of same shape as `x`. $OUT_SCALAR_1 Examples -------- >>> np.cosh(0) 1.0 The hyperbolic cosine describes the shape of a hanging cable: >>> import matplotlib.pyplot as plt >>> x = np.linspace(-4, 4, 1000) >>> plt.plot(x, np.cosh(x)) >>> plt.show() """) add_newdoc('numpy.core.umath', 'degrees', """ Convert angles from radians to degrees. Parameters ---------- x : array_like Input array in radians. $PARAMS Returns ------- y : ndarray of floats The corresponding degree values; if `out` was supplied this is a reference to it. $OUT_SCALAR_1 See Also -------- rad2deg : equivalent function Examples -------- Convert a radian array to degrees >>> rad = np.arange(12.)*np.pi/6 >>> np.degrees(rad) array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.]) >>> out = np.zeros((rad.shape)) >>> r = np.degrees(rad, out) >>> np.all(r == out) True """) add_newdoc('numpy.core.umath', 'rad2deg', """ Convert angles from radians to degrees. Parameters ---------- x : array_like Angle in radians. $PARAMS Returns ------- y : ndarray The corresponding angle in degrees. $OUT_SCALAR_1 See Also -------- deg2rad : Convert angles from degrees to radians. unwrap : Remove large jumps in angle by wrapping. Notes ----- .. versionadded:: 1.3.0 rad2deg(x) is ``180 * x / pi``. Examples -------- >>> np.rad2deg(np.pi/2) 90.0 """) add_newdoc('numpy.core.umath', 'heaviside', """ Compute the Heaviside step function. The Heaviside step function is defined as:: 0 if x1 < 0 heaviside(x1, x2) = x2 if x1 == 0 1 if x1 > 0 where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used. Parameters ---------- x1 : array_like Input values. x2 : array_like The value of the function when x1 is 0. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar The output array, element-wise Heaviside step function of `x1`. $OUT_SCALAR_2 Notes ----- .. versionadded:: 1.13.0 References ---------- .. Wikipedia, "Heaviside step function", https://en.wikipedia.org/wiki/Heaviside_step_function Examples -------- >>> np.heaviside([-1.5, 0, 2.0], 0.5) array([ 0. , 0.5, 1. ]) >>> np.heaviside([-1.5, 0, 2.0], 1) array([ 0., 1., 1.]) """) add_newdoc('numpy.core.umath', 'divide', """ Divide arguments element-wise. Parameters ---------- x1 : array_like Dividend array. x2 : array_like Divisor array. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or scalar The quotient ``x1/x2``, element-wise. $OUT_SCALAR_2 See Also -------- seterr : Set whether to raise or warn on overflow, underflow and division by zero. Notes ----- Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting. Behavior on division by zero can be changed using ``seterr``. In Python 2, when both ``x1`` and ``x2`` are of an integer type, ``divide`` will behave like ``floor_divide``. In Python 3, it behaves like ``true_divide``. Examples -------- >>> np.divide(2.0, 4.0) 0.5 >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> np.divide(x1, x2) array([[ NaN, 1. , 1. ], [ Inf, 4. , 2.5], [ Inf, 7. , 4. ]]) Note the behavior with integer types (Python 2 only): >>> np.divide(2, 4) 0 >>> np.divide(2, 4.) 0.5 Division by zero always yields zero in integer arithmetic (again, Python 2 only), and does not raise an exception or a warning: >>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int)) array([0, 0]) Division by zero can, however, be caught using ``seterr``: >>> old_err_state = np.seterr(divide='raise') >>> np.divide(1, 0) Traceback (most recent call last): File "<stdin>", line 1, in <module> FloatingPointError: divide by zero encountered in divide >>> ignored_states = np.seterr(**old_err_state) >>> np.divide(1, 0) 0 The ``/`` operator can be used as a shorthand for ``np.divide`` on ndarrays. >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = 2 * np.ones(3) >>> x1 / x2 array([[0. , 0.5, 1. ], [1.5, 2. , 2.5], [3. , 3.5, 4. ]]) """) add_newdoc('numpy.core.umath', 'equal', """ Return (x1 == x2) element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. Typically of type bool, unless ``dtype=object`` is passed. $OUT_SCALAR_2 See Also -------- not_equal, greater_equal, less_equal, greater, less Examples -------- >>> np.equal([0, 1, 3], np.arange(3)) array([ True, True, False]) What is compared are values, not types. So an int (1) and an array of length one can evaluate as True: >>> np.equal(1, np.ones(1)) array([ True]) The ``==`` operator can be used as a shorthand for ``np.equal`` on ndarrays. >>> a = np.array([2, 4, 6]) >>> b = np.array([2, 4, 2]) >>> a == b array([ True, True, False]) """) add_newdoc('numpy.core.umath', 'exp', """ Calculate the exponential of all elements in the input array. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- out : ndarray or scalar Output array, element-wise exponential of `x`. $OUT_SCALAR_1 See Also -------- expm1 : Calculate ``exp(x) - 1`` for all elements in the array. exp2 : Calculate ``2**x`` for all elements in the array. Notes ----- The irrational number ``e`` is also known as Euler's number. It is approximately 2.718281, and is the base of the natural logarithm, ``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`, then :math:`e^x = y`. For real input, ``exp(x)`` is always positive. For complex arguments, ``x = a + ib``, we can write :math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already known (it is the real argument, described above). The second term, :math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude 1 and a periodic phase. References ---------- .. [1] Wikipedia, "Exponential function", https://en.wikipedia.org/wiki/Exponential_function .. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69, http://www.math.sfu.ca/~cbm/aands/page_69.htm Examples -------- Plot the magnitude and phase of ``exp(x)`` in the complex plane: >>> import matplotlib.pyplot as plt >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) >>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane >>> out = np.exp(xx) >>> plt.subplot(121) >>> plt.imshow(np.abs(out), ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray') >>> plt.title('Magnitude of exp(x)') >>> plt.subplot(122) >>> plt.imshow(np.angle(out), ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv') >>> plt.title('Phase (angle) of exp(x)') >>> plt.show() """) add_newdoc('numpy.core.umath', 'exp2', """ Calculate `2**p` for all `p` in the input array. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- out : ndarray or scalar Element-wise 2 to the power `x`. $OUT_SCALAR_1 See Also -------- power Notes ----- .. versionadded:: 1.3.0 Examples -------- >>> np.exp2([2, 3]) array([ 4., 8.]) """) add_newdoc('numpy.core.umath', 'expm1', """ Calculate ``exp(x) - 1`` for all elements in the array. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- out : ndarray or scalar Element-wise exponential minus one: ``out = exp(x) - 1``. $OUT_SCALAR_1 See Also -------- log1p : ``log(1 + x)``, the inverse of expm1. Notes ----- This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. Examples -------- The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to about 32 significant digits. This example shows the superiority of expm1 in this case. >>> np.expm1(1e-10) 1.00000000005e-10 >>> np.exp(1e-10) - 1 1.000000082740371e-10 """) add_newdoc('numpy.core.umath', 'fabs', """ Compute the absolute values element-wise. This function returns the absolute values (positive magnitude) of the data in `x`. Complex values are not handled, use `absolute` to find the absolute values of complex data. Parameters ---------- x : array_like The array of numbers for which the absolute values are required. If `x` is a scalar, the result `y` will also be a scalar. $PARAMS Returns ------- y : ndarray or scalar The absolute values of `x`, the returned values are always floats. $OUT_SCALAR_1 See Also -------- absolute : Absolute values including `complex` types. Examples -------- >>> np.fabs(-1) 1.0 >>> np.fabs([-1.2, 1.2]) array([ 1.2, 1.2]) """) add_newdoc('numpy.core.umath', 'floor', """ Return the floor of the input, element-wise. The floor of the scalar `x` is the largest integer `i`, such that `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`. Parameters ---------- x : array_like Input data. $PARAMS Returns ------- y : ndarray or scalar The floor of each element in `x`. $OUT_SCALAR_1 See Also -------- ceil, trunc, rint, fix Notes ----- Some spreadsheet programs calculate the "floor-towards-zero", where ``floor(-2.5) == -2``. NumPy instead uses the definition of `floor` where `floor(-2.5) == -3`. The "floor-towards-zero" function is called ``fix`` in NumPy. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.floor(a) array([-2., -2., -1., 0., 1., 1., 2.]) """) add_newdoc('numpy.core.umath', 'floor_divide', """ Return the largest integer smaller or equal to the division of the inputs. It is equivalent to the Python ``//`` operator and pairs with the Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)`` up to roundoff. Parameters ---------- x1 : array_like Numerator. x2 : array_like Denominator. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray y = floor(`x1`/`x2`) $OUT_SCALAR_2 See Also -------- remainder : Remainder complementary to floor_divide. divmod : Simultaneous floor division and remainder. divide : Standard division. floor : Round a number to the nearest integer toward minus infinity. ceil : Round a number to the nearest integer toward infinity. Examples -------- >>> np.floor_divide(7,3) 2 >>> np.floor_divide([1., 2., 3., 4.], 2.5) array([ 0., 0., 1., 1.]) The ``//`` operator can be used as a shorthand for ``np.floor_divide`` on ndarrays. >>> x1 = np.array([1., 2., 3., 4.]) >>> x1 // 2.5 array([0., 0., 1., 1.]) """) add_newdoc('numpy.core.umath', 'fmod', """ Return the element-wise remainder of division. This is the NumPy implementation of the C library function fmod, the remainder has the same sign as the dividend `x1`. It is equivalent to the Matlab(TM) ``rem`` function and should not be confused with the Python modulus operator ``x1 % x2``. Parameters ---------- x1 : array_like Dividend. x2 : array_like Divisor. $BROADCASTABLE_2 $PARAMS Returns ------- y : array_like The remainder of the division of `x1` by `x2`. $OUT_SCALAR_2 See Also -------- remainder : Equivalent to the Python ``%`` operator. divide Notes ----- The result of the modulo operation for negative dividend and divisors is bound by conventions. For `fmod`, the sign of result is the sign of the dividend, while for `remainder` the sign of the result is the sign of the divisor. The `fmod` function is equivalent to the Matlab(TM) ``rem`` function. Examples -------- >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) array([-1, 0, -1, 1, 0, 1]) >>> np.remainder([-3, -2, -1, 1, 2, 3], 2) array([1, 0, 1, 1, 0, 1]) >>> np.fmod([5, 3], [2, 2.]) array([ 1., 1.]) >>> a = np.arange(-3, 3).reshape(3, 2) >>> a array([[-3, -2], [-1, 0], [ 1, 2]]) >>> np.fmod(a, [2,2]) array([[-1, 0], [-1, 0], [ 1, 0]]) """) add_newdoc('numpy.core.umath', 'greater', """ Return the truth value of (x1 > x2) element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. Typically of type bool, unless ``dtype=object`` is passed. $OUT_SCALAR_2 See Also -------- greater_equal, less, less_equal, equal, not_equal Examples -------- >>> np.greater([4,2],[2,2]) array([ True, False]) The ``>`` operator can be used as a shorthand for ``np.greater`` on ndarrays. >>> a = np.array([4, 2]) >>> b = np.array([2, 2]) >>> a > b array([ True, False]) """) add_newdoc('numpy.core.umath', 'greater_equal', """ Return the truth value of (x1 >= x2) element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- out : bool or ndarray of bool Output array, element-wise comparison of `x1` and `x2`. Typically of type bool, unless ``dtype=object`` is passed. $OUT_SCALAR_2 See Also -------- greater, less, less_equal, equal, not_equal Examples -------- >>> np.greater_equal([4, 2, 1], [2, 2, 2]) array([ True, True, False]) The ``>=`` operator can be used as a shorthand for ``np.greater_equal`` on ndarrays. >>> a = np.array([4, 2, 1]) >>> b = np.array([2, 2, 2]) >>> a >= b array([ True, True, False]) """) add_newdoc('numpy.core.umath', 'hypot', """ Given the "legs" of a right triangle, return its hypotenuse. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), it is broadcast for use with each element of the other argument. (See Examples) Parameters ---------- x1, x2 : array_like Leg of the triangle(s). $BROADCASTABLE_2 $PARAMS Returns ------- z : ndarray The hypotenuse of the triangle(s). $OUT_SCALAR_2 Examples -------- >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) array([[ 5., 5., 5.], [ 5., 5., 5.], [ 5., 5., 5.]]) Example showing broadcast of scalar_like argument: >>> np.hypot(3*np.ones((3, 3)), [4]) array([[ 5., 5., 5.], [ 5., 5., 5.], [ 5., 5., 5.]]) """) add_newdoc('numpy.core.umath', 'invert', """ Compute bit-wise inversion, or bit-wise NOT, element-wise. Computes the bit-wise NOT of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``~``. For signed integer inputs, the two's complement is returned. In a two's-complement system negative numbers are represented by the two's complement of the absolute value. This is the most common method of representing signed integers on computers [1]_. A N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. Parameters ---------- x : array_like Only integer and boolean types are handled. $PARAMS Returns ------- out : ndarray or scalar Result. $OUT_SCALAR_1 See Also -------- bitwise_and, bitwise_or, bitwise_xor logical_not binary_repr : Return the binary representation of the input number as a string. Notes ----- `bitwise_not` is an alias for `invert`: >>> np.bitwise_not is np.invert True References ---------- .. [1] Wikipedia, "Two's complement", https://en.wikipedia.org/wiki/Two's_complement Examples -------- We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x 242 >>> np.binary_repr(x, width=8) '11110010' The result depends on the bit-width: >>> x = np.invert(np.array(13, dtype=np.uint16)) >>> x 65522 >>> np.binary_repr(x, width=16) '1111111111110010' When using signed integer types the result is the two's complement of the result for the unsigned type: >>> np.invert(np.array([13], dtype=np.int8)) array([-14], dtype=int8) >>> np.binary_repr(-14, width=8) '11110010' Booleans are accepted as well: >>> np.invert(np.array([True, False])) array([False, True]) The ``~`` operator can be used as a shorthand for ``np.invert`` on ndarrays. >>> x1 = np.array([True, False]) >>> ~x1 array([False, True]) """) add_newdoc('numpy.core.umath', 'isfinite', """ Test element-wise for finiteness (not infinity or not Not a Number). The result is returned as a boolean array. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- y : ndarray, bool True where ``x`` is not positive infinity, negative infinity, or NaN; false otherwise. $OUT_SCALAR_1 See Also -------- isinf, isneginf, isposinf, isnan Notes ----- Not a Number, positive infinity and negative infinity are considered to be non-finite. NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Also that positive infinity is not equivalent to negative infinity. But infinity is equivalent to positive infinity. Errors result if the second argument is also supplied when `x` is a scalar input, or if first and second arguments have different shapes. Examples -------- >>> np.isfinite(1) True >>> np.isfinite(0) True >>> np.isfinite(np.nan) False >>> np.isfinite(np.inf) False >>> np.isfinite(np.NINF) False >>> np.isfinite([np.log(-1.),1.,np.log(0)]) array([False, True, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) >>> np.isfinite(x, y) array([0, 1, 0]) >>> y array([0, 1, 0]) """) add_newdoc('numpy.core.umath', 'isinf', """ Test element-wise for positive or negative infinity. Returns a boolean array of the same shape as `x`, True where ``x == +/-inf``, otherwise False. Parameters ---------- x : array_like Input values $PARAMS Returns ------- y : bool (scalar) or boolean ndarray True where ``x`` is positive or negative infinity, false otherwise. $OUT_SCALAR_1 See Also -------- isneginf, isposinf, isnan, isfinite Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Errors result if the second argument is supplied when the first argument is a scalar, or if the first and second arguments have different shapes. Examples -------- >>> np.isinf(np.inf) True >>> np.isinf(np.nan) False >>> np.isinf(np.NINF) True >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) array([ True, True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) >>> np.isinf(x, y) array([1, 0, 1]) >>> y array([1, 0, 1]) """) add_newdoc('numpy.core.umath', 'isnan', """ Test element-wise for NaN and return result as a boolean array. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- y : ndarray or bool True where ``x`` is NaN, false otherwise. $OUT_SCALAR_1 See Also -------- isinf, isneginf, isposinf, isfinite, isnat Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Examples -------- >>> np.isnan(np.nan) True >>> np.isnan(np.inf) False >>> np.isnan([np.log(-1.),1.,np.log(0)]) array([ True, False, False]) """) add_newdoc('numpy.core.umath', 'isnat', """ Test element-wise for NaT (not a time) and return result as a boolean array. .. versionadded:: 1.13.0 Parameters ---------- x : array_like Input array with datetime or timedelta data type. $PARAMS Returns ------- y : ndarray or bool True where ``x`` is NaT, false otherwise. $OUT_SCALAR_1 See Also -------- isnan, isinf, isneginf, isposinf, isfinite Examples -------- >>> np.isnat(np.datetime64("NaT")) True >>> np.isnat(np.datetime64("2016-01-01")) False >>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]")) array([ True, False]) """) add_newdoc('numpy.core.umath', 'left_shift', """ Shift the bits of an integer to the left. Bits are shifted to the left by appending `x2` 0s at the right of `x1`. Since the internal representation of numbers is in binary format, this operation is equivalent to multiplying `x1` by ``2**x2``. Parameters ---------- x1 : array_like of integer type Input values. x2 : array_like of integer type Number of zeros to append to `x1`. Has to be non-negative. $BROADCASTABLE_2 $PARAMS Returns ------- out : array of integer type Return `x1` with bits shifted `x2` times to the left. $OUT_SCALAR_2 See Also -------- right_shift : Shift the bits of an integer to the right. binary_repr : Return the binary representation of the input number as a string. Examples -------- >>> np.binary_repr(5) '101' >>> np.left_shift(5, 2) 20 >>> np.binary_repr(20) '10100' >>> np.left_shift(5, [1,2,3]) array([10, 20, 40]) Note that the dtype of the second argument may change the dtype of the result and can lead to unexpected results in some cases (see :ref:`Casting Rules <ufuncs.casting>`): >>> a = np.left_shift(np.uint8(255), 1) # Expect 254 >>> print(a, type(a)) # Unexpected result due to upcasting 510 <class 'numpy.int64'> >>> b = np.left_shift(np.uint8(255), np.uint8(1)) >>> print(b, type(b)) 254 <class 'numpy.uint8'> The ``<<`` operator can be used as a shorthand for ``np.left_shift`` on ndarrays. >>> x1 = 5 >>> x2 = np.array([1, 2, 3]) >>> x1 << x2 array([10, 20, 40]) """) add_newdoc('numpy.core.umath', 'less', """ Return the truth value of (x1 < x2) element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. Typically of type bool, unless ``dtype=object`` is passed. $OUT_SCALAR_2 See Also -------- greater, less_equal, greater_equal, equal, not_equal Examples -------- >>> np.less([1, 2], [2, 2]) array([ True, False]) The ``<`` operator can be used as a shorthand for ``np.less`` on ndarrays. >>> a = np.array([1, 2]) >>> b = np.array([2, 2]) >>> a < b array([ True, False]) """) add_newdoc('numpy.core.umath', 'less_equal', """ Return the truth value of (x1 <= x2) element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. Typically of type bool, unless ``dtype=object`` is passed. $OUT_SCALAR_2 See Also -------- greater, less, greater_equal, equal, not_equal Examples -------- >>> np.less_equal([4, 2, 1], [2, 2, 2]) array([False, True, True]) The ``<=`` operator can be used as a shorthand for ``np.less_equal`` on ndarrays. >>> a = np.array([4, 2, 1]) >>> b = np.array([2, 2, 2]) >>> a <= b array([False, True, True]) """) add_newdoc('numpy.core.umath', 'log', """ Natural logarithm, element-wise. The natural logarithm `log` is the inverse of the exponential function, so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. Parameters ---------- x : array_like Input value. $PARAMS Returns ------- y : ndarray The natural logarithm of `x`, element-wise. $OUT_SCALAR_1 See Also -------- log10, log2, log1p, emath.log Notes ----- Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `exp(z) = x`. The convention is to return the `z` whose imaginary part lies in `[-pi, pi]`. For real-valued input data types, `log` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `log` is a complex analytical function that has a branch cut `[-inf, 0]` and is continuous from above on it. `log` handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm Examples -------- >>> np.log([1, np.e, np.e**2, 0]) array([ 0., 1., 2., -Inf]) """) add_newdoc('numpy.core.umath', 'log10', """ Return the base 10 logarithm of the input array, element-wise. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- y : ndarray The logarithm to the base 10 of `x`, element-wise. NaNs are returned where x is negative. $OUT_SCALAR_1 See Also -------- emath.log10 Notes ----- Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `10**z = x`. The convention is to return the `z` whose imaginary part lies in `[-pi, pi]`. For real-valued input data types, `log10` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `log10` is a complex analytical function that has a branch cut `[-inf, 0]` and is continuous from above on it. `log10` handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm Examples -------- >>> np.log10([1e-15, -3.]) array([-15., nan]) """) add_newdoc('numpy.core.umath', 'log2', """ Base-2 logarithm of `x`. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- y : ndarray Base-2 logarithm of `x`. $OUT_SCALAR_1 See Also -------- log, log10, log1p, emath.log2 Notes ----- .. versionadded:: 1.3.0 Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `2**z = x`. The convention is to return the `z` whose imaginary part lies in `[-pi, pi]`. For real-valued input data types, `log2` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `log2` is a complex analytical function that has a branch cut `[-inf, 0]` and is continuous from above on it. `log2` handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. Examples -------- >>> x = np.array([0, 1, 2, 2**4]) >>> np.log2(x) array([-Inf, 0., 1., 4.]) >>> xi = np.array([0+1.j, 1, 2+0.j, 4.j]) >>> np.log2(xi) array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j]) """) add_newdoc('numpy.core.umath', 'logaddexp', """ Logarithm of the sum of exponentiations of the inputs. Calculates ``log(exp(x1) + exp(x2))``. This function is useful in statistics where the calculated probabilities of events may be so small as to exceed the range of normal floating point numbers. In such cases the logarithm of the calculated probability is stored. This function allows adding probabilities stored in such a fashion. Parameters ---------- x1, x2 : array_like Input values. $BROADCASTABLE_2 $PARAMS Returns ------- result : ndarray Logarithm of ``exp(x1) + exp(x2)``. $OUT_SCALAR_2 See Also -------- logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2. Notes ----- .. versionadded:: 1.3.0 Examples -------- >>> prob1 = np.log(1e-50) >>> prob2 = np.log(2.5e-50) >>> prob12 = np.logaddexp(prob1, prob2) >>> prob12 -113.87649168120691 >>> np.exp(prob12) 3.5000000000000057e-50 """) add_newdoc('numpy.core.umath', 'logaddexp2', """ Logarithm of the sum of exponentiations of the inputs in base-2. Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine learning when the calculated probabilities of events may be so small as to exceed the range of normal floating point numbers. In such cases the base-2 logarithm of the calculated probability can be used instead. This function allows adding probabilities stored in such a fashion. Parameters ---------- x1, x2 : array_like Input values. $BROADCASTABLE_2 $PARAMS Returns ------- result : ndarray Base-2 logarithm of ``2**x1 + 2**x2``. $OUT_SCALAR_2 See Also -------- logaddexp: Logarithm of the sum of exponentiations of the inputs. Notes ----- .. versionadded:: 1.3.0 Examples -------- >>> prob1 = np.log2(1e-50) >>> prob2 = np.log2(2.5e-50) >>> prob12 = np.logaddexp2(prob1, prob2) >>> prob1, prob2, prob12 (-166.09640474436813, -164.77447664948076, -164.28904982231052) >>> 2**prob12 3.4999999999999914e-50 """) add_newdoc('numpy.core.umath', 'log1p', """ Return the natural logarithm of one plus the input array, element-wise. Calculates ``log(1 + x)``. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- y : ndarray Natural logarithm of `1 + x`, element-wise. $OUT_SCALAR_1 See Also -------- expm1 : ``exp(x) - 1``, the inverse of `log1p`. Notes ----- For real-valued input, `log1p` is accurate also for `x` so small that `1 + x == 1` in floating-point accuracy. Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `exp(z) = 1 + x`. The convention is to return the `z` whose imaginary part lies in `[-pi, pi]`. For real-valued input data types, `log1p` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, `log1p` is a complex analytical function that has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p` handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm Examples -------- >>> np.log1p(1e-99) 1e-99 >>> np.log(1 + 1e-99) 0.0 """) add_newdoc('numpy.core.umath', 'logical_and', """ Compute the truth value of x1 AND x2 element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or bool Boolean result of the logical AND operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. $OUT_SCALAR_2 See Also -------- logical_or, logical_not, logical_xor bitwise_and Examples -------- >>> np.logical_and(True, False) False >>> np.logical_and([True, False], [False, False]) array([False, False]) >>> x = np.arange(5) >>> np.logical_and(x>1, x<4) array([False, False, True, True, False]) The ``&`` operator can be used as a shorthand for ``np.logical_and`` on boolean ndarrays. >>> a = np.array([True, False]) >>> b = np.array([False, False]) >>> a & b array([False, False]) """) add_newdoc('numpy.core.umath', 'logical_not', """ Compute the truth value of NOT x element-wise. Parameters ---------- x : array_like Logical NOT is applied to the elements of `x`. $PARAMS Returns ------- y : bool or ndarray of bool Boolean result with the same shape as `x` of the NOT operation on elements of `x`. $OUT_SCALAR_1 See Also -------- logical_and, logical_or, logical_xor Examples -------- >>> np.logical_not(3) False >>> np.logical_not([True, False, 0, 1]) array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_not(x<3) array([False, False, False, True, True]) """) add_newdoc('numpy.core.umath', 'logical_or', """ Compute the truth value of x1 OR x2 element-wise. Parameters ---------- x1, x2 : array_like Logical OR is applied to the elements of `x1` and `x2`. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or bool Boolean result of the logical OR operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. $OUT_SCALAR_2 See Also -------- logical_and, logical_not, logical_xor bitwise_or Examples -------- >>> np.logical_or(True, False) True >>> np.logical_or([True, False], [False, False]) array([ True, False]) >>> x = np.arange(5) >>> np.logical_or(x < 1, x > 3) array([ True, False, False, False, True]) The ``|`` operator can be used as a shorthand for ``np.logical_or`` on boolean ndarrays. >>> a = np.array([True, False]) >>> b = np.array([False, False]) >>> a | b array([ True, False]) """) add_newdoc('numpy.core.umath', 'logical_xor', """ Compute the truth value of x1 XOR x2, element-wise. Parameters ---------- x1, x2 : array_like Logical XOR is applied to the elements of `x1` and `x2`. $BROADCASTABLE_2 $PARAMS Returns ------- y : bool or ndarray of bool Boolean result of the logical XOR operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. $OUT_SCALAR_2 See Also -------- logical_and, logical_or, logical_not, bitwise_xor Examples -------- >>> np.logical_xor(True, False) True >>> np.logical_xor([True, True, False, False], [True, False, True, False]) array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_xor(x < 1, x > 3) array([ True, False, False, False, True]) Simple example showing support of broadcasting >>> np.logical_xor(0, np.eye(2)) array([[ True, False], [False, True]]) """) add_newdoc('numpy.core.umath', 'maximum', """ Element-wise maximum of array elements. Compare two arrays and returns a new array containing the element-wise maxima. If one of the elements being compared is a NaN, then that element is returned. If both elements are NaNs then the first is returned. The latter distinction is important for complex NaNs, which are defined as at least one of the real or imaginary parts being a NaN. The net effect is that NaNs are propagated. Parameters ---------- x1, x2 : array_like The arrays holding the elements to be compared. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or scalar The maximum of `x1` and `x2`, element-wise. $OUT_SCALAR_2 See Also -------- minimum : Element-wise minimum of two arrays, propagates NaNs. fmax : Element-wise maximum of two arrays, ignores NaNs. amax : The maximum value of an array along a given axis, propagates NaNs. nanmax : The maximum value of an array along a given axis, ignores NaNs. fmin, amin, nanmin Notes ----- The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither x1 nor x2 are nans, but it is faster and does proper broadcasting. Examples -------- >>> np.maximum([2, 3, 4], [1, 5, 2]) array([2, 5, 4]) >>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting array([[ 1. , 2. ], [ 0.5, 2. ]]) >>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan]) array([nan, nan, nan]) >>> np.maximum(np.Inf, 1) inf """) add_newdoc('numpy.core.umath', 'minimum', """ Element-wise minimum of array elements. Compare two arrays and returns a new array containing the element-wise minima. If one of the elements being compared is a NaN, then that element is returned. If both elements are NaNs then the first is returned. The latter distinction is important for complex NaNs, which are defined as at least one of the real or imaginary parts being a NaN. The net effect is that NaNs are propagated. Parameters ---------- x1, x2 : array_like The arrays holding the elements to be compared. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or scalar The minimum of `x1` and `x2`, element-wise. $OUT_SCALAR_2 See Also -------- maximum : Element-wise maximum of two arrays, propagates NaNs. fmin : Element-wise minimum of two arrays, ignores NaNs. amin : The minimum value of an array along a given axis, propagates NaNs. nanmin : The minimum value of an array along a given axis, ignores NaNs. fmax, amax, nanmax Notes ----- The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. Examples -------- >>> np.minimum([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) >>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting array([[ 0.5, 0. ], [ 0. , 1. ]]) >>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan]) array([nan, nan, nan]) >>> np.minimum(-np.Inf, 1) -inf """) add_newdoc('numpy.core.umath', 'fmax', """ Element-wise maximum of array elements. Compare two arrays and returns a new array containing the element-wise maxima. If one of the elements being compared is a NaN, then the non-nan element is returned. If both elements are NaNs then the first is returned. The latter distinction is important for complex NaNs, which are defined as at least one of the real or imaginary parts being a NaN. The net effect is that NaNs are ignored when possible. Parameters ---------- x1, x2 : array_like The arrays holding the elements to be compared. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or scalar The maximum of `x1` and `x2`, element-wise. $OUT_SCALAR_2 See Also -------- fmin : Element-wise minimum of two arrays, ignores NaNs. maximum : Element-wise maximum of two arrays, propagates NaNs. amax : The maximum value of an array along a given axis, propagates NaNs. nanmax : The maximum value of an array along a given axis, ignores NaNs. minimum, amin, nanmin Notes ----- .. versionadded:: 1.3.0 The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. Examples -------- >>> np.fmax([2, 3, 4], [1, 5, 2]) array([ 2., 5., 4.]) >>> np.fmax(np.eye(2), [0.5, 2]) array([[ 1. , 2. ], [ 0.5, 2. ]]) >>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan]) array([ 0., 0., nan]) """) add_newdoc('numpy.core.umath', 'fmin', """ Element-wise minimum of array elements. Compare two arrays and returns a new array containing the element-wise minima. If one of the elements being compared is a NaN, then the non-nan element is returned. If both elements are NaNs then the first is returned. The latter distinction is important for complex NaNs, which are defined as at least one of the real or imaginary parts being a NaN. The net effect is that NaNs are ignored when possible. Parameters ---------- x1, x2 : array_like The arrays holding the elements to be compared. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or scalar The minimum of `x1` and `x2`, element-wise. $OUT_SCALAR_2 See Also -------- fmax : Element-wise maximum of two arrays, ignores NaNs. minimum : Element-wise minimum of two arrays, propagates NaNs. amin : The minimum value of an array along a given axis, propagates NaNs. nanmin : The minimum value of an array along a given axis, ignores NaNs. maximum, amax, nanmax Notes ----- .. versionadded:: 1.3.0 The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. Examples -------- >>> np.fmin([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) >>> np.fmin(np.eye(2), [0.5, 2]) array([[ 0.5, 0. ], [ 0. , 1. ]]) >>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan]) array([ 0., 0., nan]) """) add_newdoc('numpy.core.umath', 'clip', """ Clip (limit) the values in an array. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``. Parameters ---------- a : array_like Array containing elements to clip. a_min : array_like Minimum value. a_max : array_like Maximum value. out : ndarray, optional The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. $PARAMS See Also -------- numpy.clip : Wrapper that makes the ``a_min`` and ``a_max`` arguments optional, dispatching to one of `~numpy.core.umath.clip`, `~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`. Returns ------- clipped_array : ndarray An array with the elements of `a`, but where values < `a_min` are replaced with `a_min`, and those > `a_max` with `a_max`. """) add_newdoc('numpy.core.umath', 'matmul', """ Matrix product of two arrays. Parameters ---------- x1, x2 : array_like Input arrays, scalars not allowed. out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not provided or None, a freshly-allocated array is returned. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. .. versionadded:: 1.16 Now handles ufunc kwargs Returns ------- y : ndarray The matrix product of the inputs. This is a scalar only when both x1, x2 are 1-d vectors. Raises ------ ValueError If the last dimension of `x1` is not the same size as the second-to-last dimension of `x2`. If a scalar value is passed in. See Also -------- vdot : Complex-conjugating dot product. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. dot : alternative matrix product with different broadcasting rules. Notes ----- The behavior depends on the arguments in the following way. - If both arguments are 2-D they are multiplied like conventional matrices. - If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. ``matmul`` differs from ``dot`` in two important ways: - Multiplication by scalars is not allowed, use ``*`` instead. - Stacks of matrices are broadcast together as if the matrices were elements, respecting the signature ``(n,k),(k,m)->(n,m)``: >>> a = np.ones([9, 5, 7, 4]) >>> c = np.ones([9, 5, 4, 3]) >>> np.dot(a, c).shape (9, 5, 7, 9, 5, 3) >>> np.matmul(a, c).shape (9, 5, 7, 3) >>> # n is 7, k is 4, m is 3 The matmul function implements the semantics of the ``@`` operator introduced in Python 3.5 following :pep:`465`. Examples -------- For 2-D arrays it is the matrix product: >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([[4, 1], ... [2, 2]]) >>> np.matmul(a, b) array([[4, 1], [2, 2]]) For 2-D mixed with 1-D, the result is the usual. >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([1, 2]) >>> np.matmul(a, b) array([1, 2]) >>> np.matmul(b, a) array([1, 2]) Broadcasting is conventional for stacks of arrays >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) >>> np.matmul(a,b).shape (2, 2, 2) >>> np.matmul(a, b)[0, 1, 1] 98 >>> sum(a[0, 1, :] * b[0 , :, 1]) 98 Vector, vector returns the scalar inner product, but neither argument is complex-conjugated: >>> np.matmul([2j, 3j], [2j, 3j]) (-13+0j) Scalar multiplication raises an error. >>> np.matmul([1,2], 3) Traceback (most recent call last): ... ValueError: matmul: Input operand 1 does not have enough dimensions ... The ``@`` operator can be used as a shorthand for ``np.matmul`` on ndarrays. >>> x1 = np.array([2j, 3j]) >>> x2 = np.array([2j, 3j]) >>> x1 @ x2 (-13+0j) .. versionadded:: 1.10.0 """) add_newdoc('numpy.core.umath', 'modf', """ Return the fractional and integral parts of an array, element-wise. The fractional and integral parts are negative if the given number is negative. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- y1 : ndarray Fractional part of `x`. $OUT_SCALAR_1 y2 : ndarray Integral part of `x`. $OUT_SCALAR_1 Notes ----- For integer input the return values are floats. See Also -------- divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values switched, except it always has a positive remainder. Examples -------- >>> np.modf([0, 3.5]) (array([ 0. , 0.5]), array([ 0., 3.])) >>> np.modf(-0.5) (-0.5, -0) """) add_newdoc('numpy.core.umath', 'multiply', """ Multiply arguments element-wise. Parameters ---------- x1, x2 : array_like Input arrays to be multiplied. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray The product of `x1` and `x2`, element-wise. $OUT_SCALAR_2 Notes ----- Equivalent to `x1` * `x2` in terms of array broadcasting. Examples -------- >>> np.multiply(2.0, 4.0) 8.0 >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> np.multiply(x1, x2) array([[ 0., 1., 4.], [ 0., 4., 10.], [ 0., 7., 16.]]) The ``*`` operator can be used as a shorthand for ``np.multiply`` on ndarrays. >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> x1 * x2 array([[ 0., 1., 4.], [ 0., 4., 10.], [ 0., 7., 16.]]) """) add_newdoc('numpy.core.umath', 'negative', """ Numerical negative, element-wise. Parameters ---------- x : array_like or scalar Input array. $PARAMS Returns ------- y : ndarray or scalar Returned array or scalar: `y = -x`. $OUT_SCALAR_1 Examples -------- >>> np.negative([1.,-1.]) array([-1., 1.]) The unary ``-`` operator can be used as a shorthand for ``np.negative`` on ndarrays. >>> x1 = np.array(([1., -1.])) >>> -x1 array([-1., 1.]) """) add_newdoc('numpy.core.umath', 'positive', """ Numerical positive, element-wise. .. versionadded:: 1.13.0 Parameters ---------- x : array_like or scalar Input array. Returns ------- y : ndarray or scalar Returned array or scalar: `y = +x`. $OUT_SCALAR_1 Notes ----- Equivalent to `x.copy()`, but only defined for types that support arithmetic. Examples -------- >>> x1 = np.array(([1., -1.])) >>> np.positive(x1) array([ 1., -1.]) The unary ``+`` operator can be used as a shorthand for ``np.positive`` on ndarrays. >>> x1 = np.array(([1., -1.])) >>> +x1 array([ 1., -1.]) """) add_newdoc('numpy.core.umath', 'not_equal', """ Return (x1 != x2) element-wise. Parameters ---------- x1, x2 : array_like Input arrays. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. Typically of type bool, unless ``dtype=object`` is passed. $OUT_SCALAR_2 See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.not_equal([1.,2.], [1., 3.]) array([False, True]) >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) array([[False, True], [False, True]]) The ``!=`` operator can be used as a shorthand for ``np.not_equal`` on ndarrays. >>> a = np.array([1., 2.]) >>> b = np.array([1., 3.]) >>> a != b array([False, True]) """) add_newdoc('numpy.core.umath', '_ones_like', """ This function used to be the numpy.ones_like, but now a specific function for that has been written for consistency with the other *_like functions. It is only used internally in a limited fashion now. See Also -------- ones_like """) add_newdoc('numpy.core.umath', 'power', """ First array elements raised to powers from second array, element-wise. Raise each base in `x1` to the positionally-corresponding power in `x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an integer type raised to a negative integer power will raise a ValueError. Parameters ---------- x1 : array_like The bases. x2 : array_like The exponents. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray The bases in `x1` raised to the exponents in `x2`. $OUT_SCALAR_2 See Also -------- float_power : power function that promotes integers to float Examples -------- Cube each element in an array. >>> x1 = np.arange(6) >>> x1 [0, 1, 2, 3, 4, 5] >>> np.power(x1, 3) array([ 0, 1, 8, 27, 64, 125]) Raise the bases to different exponents. >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] >>> np.power(x1, x2) array([ 0., 1., 8., 27., 16., 5.]) The effect of broadcasting. >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) >>> x2 array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) >>> np.power(x1, x2) array([[ 0, 1, 8, 27, 16, 5], [ 0, 1, 8, 27, 16, 5]]) The ``**`` operator can be used as a shorthand for ``np.power`` on ndarrays. >>> x2 = np.array([1, 2, 3, 3, 2, 1]) >>> x1 = np.arange(6) >>> x1 ** x2 array([ 0, 1, 8, 27, 16, 5]) """) add_newdoc('numpy.core.umath', 'float_power', """ First array elements raised to powers from second array, element-wise. Raise each base in `x1` to the positionally-corresponding power in `x2`. `x1` and `x2` must be broadcastable to the same shape. This differs from the power function in that integers, float16, and float32 are promoted to floats with a minimum precision of float64 so that the result is always inexact. The intent is that the function will return a usable result for negative powers and seldom overflow for positive powers. .. versionadded:: 1.12.0 Parameters ---------- x1 : array_like The bases. x2 : array_like The exponents. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray The bases in `x1` raised to the exponents in `x2`. $OUT_SCALAR_2 See Also -------- power : power function that preserves type Examples -------- Cube each element in a list. >>> x1 = range(6) >>> x1 [0, 1, 2, 3, 4, 5] >>> np.float_power(x1, 3) array([ 0., 1., 8., 27., 64., 125.]) Raise the bases to different exponents. >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] >>> np.float_power(x1, x2) array([ 0., 1., 8., 27., 16., 5.]) The effect of broadcasting. >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) >>> x2 array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) >>> np.float_power(x1, x2) array([[ 0., 1., 8., 27., 16., 5.], [ 0., 1., 8., 27., 16., 5.]]) """) add_newdoc('numpy.core.umath', 'radians', """ Convert angles from degrees to radians. Parameters ---------- x : array_like Input array in degrees. $PARAMS Returns ------- y : ndarray The corresponding radian values. $OUT_SCALAR_1 See Also -------- deg2rad : equivalent function Examples -------- Convert a degree array to radians >>> deg = np.arange(12.) * 30. >>> np.radians(deg) array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 , 2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898, 5.23598776, 5.75958653]) >>> out = np.zeros((deg.shape)) >>> ret = np.radians(deg, out) >>> ret is out True """) add_newdoc('numpy.core.umath', 'deg2rad', """ Convert angles from degrees to radians. Parameters ---------- x : array_like Angles in degrees. $PARAMS Returns ------- y : ndarray The corresponding angle in radians. $OUT_SCALAR_1 See Also -------- rad2deg : Convert angles from radians to degrees. unwrap : Remove large jumps in angle by wrapping. Notes ----- .. versionadded:: 1.3.0 ``deg2rad(x)`` is ``x * pi / 180``. Examples -------- >>> np.deg2rad(180) 3.1415926535897931 """) add_newdoc('numpy.core.umath', 'reciprocal', """ Return the reciprocal of the argument, element-wise. Calculates ``1/x``. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- y : ndarray Return array. $OUT_SCALAR_1 Notes ----- .. note:: This function is not designed to work with integers. For integer arguments with absolute value larger than 1 the result is always zero because of the way Python handles integer division. For integer zero the result is an overflow. Examples -------- >>> np.reciprocal(2.) 0.5 >>> np.reciprocal([1, 2., 3.33]) array([ 1. , 0.5 , 0.3003003]) """) add_newdoc('numpy.core.umath', 'remainder', """ Return element-wise remainder of division. Computes the remainder complementary to the `floor_divide` function. It is equivalent to the Python modulus operator``x1 % x2`` and has the same sign as the divisor `x2`. The MATLAB function equivalent to ``np.remainder`` is ``mod``. .. warning:: This should not be confused with: * Python 3.7's `math.remainder` and C's ``remainder``, which computes the IEEE remainder, which are the complement to ``round(x1 / x2)``. * The MATLAB ``rem`` function and or the C ``%`` operator which is the complement to ``int(x1 / x2)``. Parameters ---------- x1 : array_like Dividend array. x2 : array_like Divisor array. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray The element-wise remainder of the quotient ``floor_divide(x1, x2)``. $OUT_SCALAR_2 See Also -------- floor_divide : Equivalent of Python ``//`` operator. divmod : Simultaneous floor division and remainder. fmod : Equivalent of the MATLAB ``rem`` function. divide, floor Notes ----- Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers. ``mod`` is an alias of ``remainder``. Examples -------- >>> np.remainder([4, 7], [2, 3]) array([0, 1]) >>> np.remainder(np.arange(7), 5) array([0, 1, 2, 3, 4, 0, 1]) The ``%`` operator can be used as a shorthand for ``np.remainder`` on ndarrays. >>> x1 = np.arange(7) >>> x1 % 5 array([0, 1, 2, 3, 4, 0, 1]) """) add_newdoc('numpy.core.umath', 'divmod', """ Return element-wise quotient and remainder simultaneously. .. versionadded:: 1.13.0 ``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster because it avoids redundant work. It is used to implement the Python built-in function ``divmod`` on NumPy arrays. Parameters ---------- x1 : array_like Dividend array. x2 : array_like Divisor array. $BROADCASTABLE_2 $PARAMS Returns ------- out1 : ndarray Element-wise quotient resulting from floor division. $OUT_SCALAR_2 out2 : ndarray Element-wise remainder from floor division. $OUT_SCALAR_2 See Also -------- floor_divide : Equivalent to Python's ``//`` operator. remainder : Equivalent to Python's ``%`` operator. modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return values switched. Examples -------- >>> np.divmod(np.arange(5), 3) (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) The `divmod` function can be used as a shorthand for ``np.divmod`` on ndarrays. >>> x = np.arange(5) >>> divmod(x, 3) (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) """) add_newdoc('numpy.core.umath', 'right_shift', """ Shift the bits of an integer to the right. Bits are shifted to the right `x2`. Because the internal representation of numbers is in binary format, this operation is equivalent to dividing `x1` by ``2**x2``. Parameters ---------- x1 : array_like, int Input values. x2 : array_like, int Number of bits to remove at the right of `x1`. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray, int Return `x1` with bits shifted `x2` times to the right. $OUT_SCALAR_2 See Also -------- left_shift : Shift the bits of an integer to the left. binary_repr : Return the binary representation of the input number as a string. Examples -------- >>> np.binary_repr(10) '1010' >>> np.right_shift(10, 1) 5 >>> np.binary_repr(5) '101' >>> np.right_shift(10, [1,2,3]) array([5, 2, 1]) The ``>>`` operator can be used as a shorthand for ``np.right_shift`` on ndarrays. >>> x1 = 10 >>> x2 = np.array([1,2,3]) >>> x1 >> x2 array([5, 2, 1]) """) add_newdoc('numpy.core.umath', 'rint', """ Round elements of the array to the nearest integer. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- out : ndarray or scalar Output array is same shape and type as `x`. $OUT_SCALAR_1 See Also -------- fix, ceil, floor, trunc Notes ----- For values exactly halfway between rounded decimal values, NumPy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.rint(a) array([-2., -2., -0., 0., 2., 2., 2.]) """) add_newdoc('numpy.core.umath', 'sign', """ Returns an element-wise indication of the sign of a number. The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan is returned for nan inputs. For complex inputs, the `sign` function returns ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. complex(nan, 0) is returned for complex nan inputs. Parameters ---------- x : array_like Input values. $PARAMS Returns ------- y : ndarray The sign of `x`. $OUT_SCALAR_1 Notes ----- There is more than one definition of sign in common use for complex numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}` which is different from a common alternative, :math:`x/|x|`. Examples -------- >>> np.sign([-5., 4.5]) array([-1., 1.]) >>> np.sign(0) 0 >>> np.sign(5-2j) (1+0j) """) add_newdoc('numpy.core.umath', 'signbit', """ Returns element-wise True where signbit is set (less than zero). Parameters ---------- x : array_like The input value(s). $PARAMS Returns ------- result : ndarray of bool Output array, or reference to `out` if that was supplied. $OUT_SCALAR_1 Examples -------- >>> np.signbit(-1.2) True >>> np.signbit(np.array([1, -2.3, 2.1])) array([False, True, False]) """) add_newdoc('numpy.core.umath', 'copysign', """ Change the sign of x1 to that of x2, element-wise. If `x2` is a scalar, its sign will be copied to all elements of `x1`. Parameters ---------- x1 : array_like Values to change the sign of. x2 : array_like The sign of `x2` is copied to `x1`. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar The values of `x1` with the sign of `x2`. $OUT_SCALAR_2 Examples -------- >>> np.copysign(1.3, -1) -1.3 >>> 1/np.copysign(0, 1) inf >>> 1/np.copysign(0, -1) -inf >>> np.copysign([-1, 0, 1], -1.1) array([-1., -0., -1.]) >>> np.copysign([-1, 0, 1], np.arange(3)-1) array([-1., 0., 1.]) """) add_newdoc('numpy.core.umath', 'nextafter', """ Return the next floating-point value after x1 towards x2, element-wise. Parameters ---------- x1 : array_like Values to find the next representable value of. x2 : array_like The direction where to look for the next representable value of `x1`. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar The next representable values of `x1` in the direction of `x2`. $OUT_SCALAR_2 Examples -------- >>> eps = np.finfo(np.float64).eps >>> np.nextafter(1, 2) == eps + 1 True >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] array([ True, True]) """) add_newdoc('numpy.core.umath', 'spacing', """ Return the distance between x and the nearest adjacent number. Parameters ---------- x : array_like Values to find the spacing of. $PARAMS Returns ------- out : ndarray or scalar The spacing of values of `x`. $OUT_SCALAR_1 Notes ----- It can be considered as a generalization of EPS: ``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there should not be any representable number between ``x + spacing(x)`` and x for any finite x. Spacing of +- inf and NaN is NaN. Examples -------- >>> np.spacing(1) == np.finfo(np.float64).eps True """) add_newdoc('numpy.core.umath', 'sin', """ Trigonometric sine, element-wise. Parameters ---------- x : array_like Angle, in radians (:math:`2 \\pi` rad equals 360 degrees). $PARAMS Returns ------- y : array_like The sine of each element of x. $OUT_SCALAR_1 See Also -------- arcsin, sinh, cos Notes ----- The sine is one of the fundamental functions of trigonometry (the mathematical study of triangles). Consider a circle of radius 1 centered on the origin. A ray comes in from the :math:`+x` axis, makes an angle at the origin (measured counter-clockwise from that axis), and departs from the origin. The :math:`y` coordinate of the outgoing ray's intersection with the unit circle is the sine of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The function has zeroes where the angle is a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative. The numerous properties of the sine and related functions are included in any standard trigonometry text. Examples -------- Print sine of one angle: >>> np.sin(np.pi/2.) 1.0 Print sines of an array of angles given in degrees: >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. ) array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ]) Plot the sine function: >>> import matplotlib.pylab as plt >>> x = np.linspace(-np.pi, np.pi, 201) >>> plt.plot(x, np.sin(x)) >>> plt.xlabel('Angle [rad]') >>> plt.ylabel('sin(x)') >>> plt.axis('tight') >>> plt.show() """) add_newdoc('numpy.core.umath', 'sinh', """ Hyperbolic sine, element-wise. Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- y : ndarray The corresponding hyperbolic sine values. $OUT_SCALAR_1 Notes ----- If `out` is provided, the function writes the result into it, and returns a reference to `out`. (See Examples) References ---------- M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. New York, NY: Dover, 1972, pg. 83. Examples -------- >>> np.sinh(0) 0.0 >>> np.sinh(np.pi*1j/2) 1j >>> np.sinh(np.pi*1j) # (exact value is 0) 1.2246063538223773e-016j >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter >>> out1 = np.array([0], dtype='d') >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True >>> # Example of ValueError due to provision of shape mis-matched `out` >>> np.sinh(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) add_newdoc('numpy.core.umath', 'sqrt', """ Return the non-negative square-root of an array, element-wise. Parameters ---------- x : array_like The values whose square-roots are required. $PARAMS Returns ------- y : ndarray An array of the same shape as `x`, containing the positive square-root of each element in `x`. If any element in `x` is complex, a complex array is returned (and the square-roots of negative reals are calculated). If all of the elements in `x` are real, so is `y`, with negative elements returning ``nan``. If `out` was provided, `y` is a reference to it. $OUT_SCALAR_1 See Also -------- lib.scimath.sqrt A version which returns complex numbers when given negative reals. Notes ----- *sqrt* has--consistent with common convention--as its branch cut the real "interval" [`-inf`, 0), and is continuous from above on it. A branch cut is a curve in the complex plane across which a given complex function fails to be continuous. Examples -------- >>> np.sqrt([1,4,9]) array([ 1., 2., 3.]) >>> np.sqrt([4, -1, -3+4J]) array([ 2.+0.j, 0.+1.j, 1.+2.j]) >>> np.sqrt([4, -1, np.inf]) array([ 2., nan, inf]) """) add_newdoc('numpy.core.umath', 'cbrt', """ Return the cube-root of an array, element-wise. .. versionadded:: 1.10.0 Parameters ---------- x : array_like The values whose cube-roots are required. $PARAMS Returns ------- y : ndarray An array of the same shape as `x`, containing the cube cube-root of each element in `x`. If `out` was provided, `y` is a reference to it. $OUT_SCALAR_1 Examples -------- >>> np.cbrt([1,8,27]) array([ 1., 2., 3.]) """) add_newdoc('numpy.core.umath', 'square', """ Return the element-wise square of the input. Parameters ---------- x : array_like Input data. $PARAMS Returns ------- out : ndarray or scalar Element-wise `x*x`, of the same shape and dtype as `x`. $OUT_SCALAR_1 See Also -------- numpy.linalg.matrix_power sqrt power Examples -------- >>> np.square([-1j, 1]) array([-1.-0.j, 1.+0.j]) """) add_newdoc('numpy.core.umath', 'subtract', """ Subtract arguments, element-wise. Parameters ---------- x1, x2 : array_like The arrays to be subtracted from each other. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray The difference of `x1` and `x2`, element-wise. $OUT_SCALAR_2 Notes ----- Equivalent to ``x1 - x2`` in terms of array broadcasting. Examples -------- >>> np.subtract(1.0, 4.0) -3.0 >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> np.subtract(x1, x2) array([[ 0., 0., 0.], [ 3., 3., 3.], [ 6., 6., 6.]]) The ``-`` operator can be used as a shorthand for ``np.subtract`` on ndarrays. >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> x1 - x2 array([[0., 0., 0.], [3., 3., 3.], [6., 6., 6.]]) """) add_newdoc('numpy.core.umath', 'tan', """ Compute tangent element-wise. Equivalent to ``np.sin(x)/np.cos(x)`` element-wise. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- y : ndarray The corresponding tangent values. $OUT_SCALAR_1 Notes ----- If `out` is provided, the function writes the result into it, and returns a reference to `out`. (See Examples) References ---------- M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. New York, NY: Dover, 1972. Examples -------- >>> from math import pi >>> np.tan(np.array([-pi,pi/2,pi])) array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter >>> out1 = np.array([0], dtype='d') >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True >>> >>> # Example of ValueError due to provision of shape mis-matched `out` >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) add_newdoc('numpy.core.umath', 'tanh', """ Compute hyperbolic tangent element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. Parameters ---------- x : array_like Input array. $PARAMS Returns ------- y : ndarray The corresponding hyperbolic tangent values. $OUT_SCALAR_1 Notes ----- If `out` is provided, the function writes the result into it, and returns a reference to `out`. (See Examples) References ---------- .. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. New York, NY: Dover, 1972, pg. 83. http://www.math.sfu.ca/~cbm/aands/ .. [2] Wikipedia, "Hyperbolic function", https://en.wikipedia.org/wiki/Hyperbolic_function Examples -------- >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter >>> out1 = np.array([0], dtype='d') >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True >>> # Example of ValueError due to provision of shape mis-matched `out` >>> np.tanh(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) add_newdoc('numpy.core.umath', 'true_divide', """ Returns a true division of the inputs, element-wise. Instead of the Python traditional 'floor division', this returns a true division. True division adjusts the output type to present the best answer, regardless of input types. Parameters ---------- x1 : array_like Dividend array. x2 : array_like Divisor array. $BROADCASTABLE_2 $PARAMS Returns ------- out : ndarray or scalar $OUT_SCALAR_2 Notes ----- In Python, ``//`` is the floor division operator and ``/`` the true division operator. The ``true_divide(x1, x2)`` function is equivalent to true division in Python. Examples -------- >>> x = np.arange(5) >>> np.true_divide(x, 4) array([ 0. , 0.25, 0.5 , 0.75, 1. ]) >>> x/4 array([ 0. , 0.25, 0.5 , 0.75, 1. ]) >>> x//4 array([0, 0, 0, 0, 1]) The ``/`` operator can be used as a shorthand for ``np.true_divide`` on ndarrays. >>> x = np.arange(5) >>> x / 4 array([0. , 0.25, 0.5 , 0.75, 1. ]) """) add_newdoc('numpy.core.umath', 'frexp', """ Decompose the elements of x into mantissa and twos exponent. Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``. The mantissa lies in the open interval(-1, 1), while the twos exponent is a signed integer. Parameters ---------- x : array_like Array of numbers to be decomposed. out1 : ndarray, optional Output array for the mantissa. Must have the same shape as `x`. out2 : ndarray, optional Output array for the exponent. Must have the same shape as `x`. $PARAMS Returns ------- mantissa : ndarray Floating values between -1 and 1. $OUT_SCALAR_1 exponent : ndarray Integer exponents of 2. $OUT_SCALAR_1 See Also -------- ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`. Notes ----- Complex dtypes are not supported, they will raise a TypeError. Examples -------- >>> x = np.arange(9) >>> y1, y2 = np.frexp(x) >>> y1 array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875, 0.5 ]) >>> y2 array([0, 1, 2, 2, 3, 3, 3, 3, 4]) >>> y1 * 2**y2 array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.]) """) add_newdoc('numpy.core.umath', 'ldexp', """ Returns x1 * 2**x2, element-wise. The mantissas `x1` and twos exponents `x2` are used to construct floating point numbers ``x1 * 2**x2``. Parameters ---------- x1 : array_like Array of multipliers. x2 : array_like, int Array of twos exponents. $BROADCASTABLE_2 $PARAMS Returns ------- y : ndarray or scalar The result of ``x1 * 2**x2``. $OUT_SCALAR_2 See Also -------- frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`. Notes ----- Complex dtypes are not supported, they will raise a TypeError. `ldexp` is useful as the inverse of `frexp`, if used by itself it is more clear to simply use the expression ``x1 * 2**x2``. Examples -------- >>> np.ldexp(5, np.arange(4)) array([ 5., 10., 20., 40.], dtype=float16) >>> x = np.arange(6) >>> np.ldexp(*np.frexp(x)) array([ 0., 1., 2., 3., 4., 5.]) """) add_newdoc('numpy.core.umath', 'gcd', """ Returns the greatest common divisor of ``|x1|`` and ``|x2|`` Parameters ---------- x1, x2 : array_like, int Arrays of values. $BROADCASTABLE_2 Returns ------- y : ndarray or scalar The greatest common divisor of the absolute value of the inputs $OUT_SCALAR_2 See Also -------- lcm : The lowest common multiple Examples -------- >>> np.gcd(12, 20) 4 >>> np.gcd.reduce([15, 25, 35]) 5 >>> np.gcd(np.arange(6), 20) array([20, 1, 2, 1, 4, 5]) """) add_newdoc('numpy.core.umath', 'lcm', """ Returns the lowest common multiple of ``|x1|`` and ``|x2|`` Parameters ---------- x1, x2 : array_like, int Arrays of values. $BROADCASTABLE_2 Returns ------- y : ndarray or scalar The lowest common multiple of the absolute value of the inputs $OUT_SCALAR_2 See Also -------- gcd : The greatest common divisor Examples -------- >>> np.lcm(12, 20) 60 >>> np.lcm.reduce([3, 12, 20]) 60 >>> np.lcm.reduce([40, 12, 20]) 120 >>> np.lcm(np.arange(6), 20) array([ 0, 20, 20, 60, 20, 20]) """)
import warnings import sys import os import itertools import pytest import weakref import numpy as np from numpy.testing import ( assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_array_less, build_err_msg, raises, assert_raises, assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT ) from numpy.core.overrides import ARRAY_FUNCTION_ENABLED class _GenericTest: def _test_equal(self, a, b): self._assert_func(a, b) def _test_not_equal(self, a, b): with assert_raises(AssertionError): self._assert_func(a, b) def test_array_rank1_eq(self): """Test two equal array of rank 1 are found equal.""" a = np.array([1, 2]) b = np.array([1, 2]) self._test_equal(a, b) def test_array_rank1_noteq(self): """Test two different array of rank 1 are found not equal.""" a = np.array([1, 2]) b = np.array([2, 2]) self._test_not_equal(a, b) def test_array_rank2_eq(self): """Test two equal array of rank 2 are found equal.""" a = np.array([[1, 2], [3, 4]]) b = np.array([[1, 2], [3, 4]]) self._test_equal(a, b) def test_array_diffshape(self): """Test two arrays with different shapes are found not equal.""" a = np.array([1, 2]) b = np.array([[1, 2], [1, 2]]) self._test_not_equal(a, b) def test_objarray(self): """Test object arrays.""" a = np.array([1, 1], dtype=object) self._test_equal(a, 1) def test_array_likes(self): self._test_equal([1, 2, 3], (1, 2, 3)) class TestArrayEqual(_GenericTest): def setup(self): self._assert_func = assert_array_equal def test_generic_rank1(self): """Test rank 1 array for all dtypes.""" def foo(t): a = np.empty(2, t) a.fill(1) b = a.copy() c = a.copy() c.fill(0) self._test_equal(a, b) self._test_not_equal(c, b) # Test numeric types and object for t in '?bhilqpBHILQPfdgFDG': foo(t) # Test strings for t in ['S1', 'U1']: foo(t) def test_0_ndim_array(self): x = np.array(473963742225900817127911193656584771) y = np.array(18535119325151578301457182298393896) assert_raises(AssertionError, self._assert_func, x, y) y = x self._assert_func(x, y) x = np.array(43) y = np.array(10) assert_raises(AssertionError, self._assert_func, x, y) y = x self._assert_func(x, y) def test_generic_rank3(self): """Test rank 3 array for all dtypes.""" def foo(t): a = np.empty((4, 2, 3), t) a.fill(1) b = a.copy() c = a.copy() c.fill(0) self._test_equal(a, b) self._test_not_equal(c, b) # Test numeric types and object for t in '?bhilqpBHILQPfdgFDG': foo(t) # Test strings for t in ['S1', 'U1']: foo(t) def test_nan_array(self): """Test arrays with nan values in them.""" a = np.array([1, 2, np.nan]) b = np.array([1, 2, np.nan]) self._test_equal(a, b) c = np.array([1, 2, 3]) self._test_not_equal(c, b) def test_string_arrays(self): """Test two arrays with different shapes are found not equal.""" a = np.array(['floupi', 'floupa']) b = np.array(['floupi', 'floupa']) self._test_equal(a, b) c = np.array(['floupipi', 'floupa']) self._test_not_equal(c, b) def test_recarrays(self): """Test record arrays.""" a = np.empty(2, [('floupi', float), ('floupa', float)]) a['floupi'] = [1, 2] a['floupa'] = [1, 2] b = a.copy() self._test_equal(a, b) c = np.empty(2, [('floupipi', float), ('floupa', float)]) c['floupipi'] = a['floupi'].copy() c['floupa'] = a['floupa'].copy() with suppress_warnings() as sup: l = sup.record(FutureWarning, message="elementwise == ") self._test_not_equal(c, b) assert_equal(len(l), 1) def test_masked_nan_inf(self): # Regression test for gh-11121 a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) b = np.array([3., np.nan, 6.5]) self._test_equal(a, b) self._test_equal(b, a) a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) b = np.array([np.inf, 4., 6.5]) self._test_equal(a, b) self._test_equal(b, a) def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having # comparison operators, not on them being able to store booleans # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. class MyArray(np.ndarray): def __eq__(self, other): return bool(np.equal(self, other).all()) def __ne__(self, other): return not self == other a = np.array([1., 2.]).view(MyArray) b = np.array([2., 3.]).view(MyArray) assert_(type(a == a), bool) assert_(a == a) assert_(a != b) self._test_equal(a, a) self._test_not_equal(a, b) self._test_not_equal(b, a) @pytest.mark.skipif( not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__') def test_subclass_that_does_not_implement_npall(self): class MyArray(np.ndarray): def __array_function__(self, *args, **kwargs): return NotImplemented a = np.array([1., 2.]).view(MyArray) b = np.array([2., 3.]).view(MyArray) with assert_raises(TypeError): np.all(a) self._test_equal(a, a) self._test_not_equal(a, b) self._test_not_equal(b, a) class TestBuildErrorMessage: def test_build_err_msg_defaults(self): x = np.array([1.00001, 2.00002, 3.00003]) y = np.array([1.00002, 2.00003, 3.00004]) err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg) b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' '2.00003, 3.00004])') assert_equal(a, b) def test_build_err_msg_no_verbose(self): x = np.array([1.00001, 2.00002, 3.00003]) y = np.array([1.00002, 2.00003, 3.00004]) err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg, verbose=False) b = '\nItems are not equal: There is a mismatch' assert_equal(a, b) def test_build_err_msg_custom_names(self): x = np.array([1.00001, 2.00002, 3.00003]) y = np.array([1.00002, 2.00003, 3.00004]) err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) b = ('\nItems are not equal: There is a mismatch\n FOO: array([' '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' '3.00004])') assert_equal(a, b) def test_build_err_msg_custom_precision(self): x = np.array([1.000000001, 2.00002, 3.00003]) y = np.array([1.000000002, 2.00003, 3.00004]) err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg, precision=10) b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' '1.000000002, 2.00003 , 3.00004 ])') assert_equal(a, b) class TestEqual(TestArrayEqual): def setup(self): self._assert_func = assert_equal def test_nan_items(self): self._assert_func(np.nan, np.nan) self._assert_func([np.nan], [np.nan]) self._test_not_equal(np.nan, [np.nan]) self._test_not_equal(np.nan, 1) def test_inf_items(self): self._assert_func(np.inf, np.inf) self._assert_func([np.inf], [np.inf]) self._test_not_equal(np.inf, [np.inf]) def test_datetime(self): self._test_equal( np.datetime64("2017-01-01", "s"), np.datetime64("2017-01-01", "s") ) self._test_equal( np.datetime64("2017-01-01", "s"), np.datetime64("2017-01-01", "m") ) # gh-10081 self._test_not_equal( np.datetime64("2017-01-01", "s"), np.datetime64("2017-01-02", "s") ) self._test_not_equal( np.datetime64("2017-01-01", "s"), np.datetime64("2017-01-02", "m") ) def test_nat_items(self): # not a datetime nadt_no_unit = np.datetime64("NaT") nadt_s = np.datetime64("NaT", "s") nadt_d = np.datetime64("NaT", "ns") # not a timedelta natd_no_unit = np.timedelta64("NaT") natd_s = np.timedelta64("NaT", "s") natd_d = np.timedelta64("NaT", "ns") dts = [nadt_no_unit, nadt_s, nadt_d] tds = [natd_no_unit, natd_s, natd_d] for a, b in itertools.product(dts, dts): self._assert_func(a, b) self._assert_func([a], [b]) self._test_not_equal([a], b) for a, b in itertools.product(tds, tds): self._assert_func(a, b) self._assert_func([a], [b]) self._test_not_equal([a], b) for a, b in itertools.product(tds, dts): self._test_not_equal(a, b) self._test_not_equal(a, [b]) self._test_not_equal([a], [b]) self._test_not_equal([a], np.datetime64("2017-01-01", "s")) self._test_not_equal([b], np.datetime64("2017-01-01", "s")) self._test_not_equal([a], np.timedelta64(123, "s")) self._test_not_equal([b], np.timedelta64(123, "s")) def test_non_numeric(self): self._assert_func('ab', 'ab') self._test_not_equal('ab', 'abb') def test_complex_item(self): self._assert_func(complex(1, 2), complex(1, 2)) self._assert_func(complex(1, np.nan), complex(1, np.nan)) self._test_not_equal(complex(1, np.nan), complex(1, 2)) self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) def test_negative_zero(self): self._test_not_equal(np.PZERO, np.NZERO) def test_complex(self): x = np.array([complex(1, 2), complex(1, np.nan)]) y = np.array([complex(1, 2), complex(1, 2)]) self._assert_func(x, x) self._test_not_equal(x, y) def test_object(self): #gh-12942 import datetime a = np.array([datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)]) self._test_not_equal(a, a[::-1]) class TestArrayAlmostEqual(_GenericTest): def setup(self): self._assert_func = assert_array_almost_equal def test_closeness(self): # Note that in the course of time we ended up with # `abs(x - y) < 1.5 * 10**(-decimal)` # instead of the previously documented # `abs(x - y) < 0.5 * 10**(-decimal)` # so this check serves to preserve the wrongness. # test scalars self._assert_func(1.499999, 0.0, decimal=0) assert_raises(AssertionError, lambda: self._assert_func(1.5, 0.0, decimal=0)) # test arrays self._assert_func([1.499999], [0.0], decimal=0) assert_raises(AssertionError, lambda: self._assert_func([1.5], [0.0], decimal=0)) def test_simple(self): x = np.array([1234.2222]) y = np.array([1234.2223]) self._assert_func(x, y, decimal=3) self._assert_func(x, y, decimal=4) assert_raises(AssertionError, lambda: self._assert_func(x, y, decimal=5)) def test_nan(self): anan = np.array([np.nan]) aone = np.array([1]) ainf = np.array([np.inf]) self._assert_func(anan, anan) assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) def test_inf(self): a = np.array([[1., 2.], [3., 4.]]) b = a.copy() a[0, 0] = np.inf assert_raises(AssertionError, lambda: self._assert_func(a, b)) b[0, 0] = -np.inf assert_raises(AssertionError, lambda: self._assert_func(a, b)) def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], [[False, False], [True, False]]) self._assert_func(a, b) self._assert_func(b, a) self._assert_func(b, b) # Test fully masked as well (see gh-11123). a = np.ma.MaskedArray(3.5, mask=True) b = np.array([3., 4., 6.5]) self._test_equal(a, b) self._test_equal(b, a) a = np.ma.masked b = np.array([3., 4., 6.5]) self._test_equal(a, b) self._test_equal(b, a) a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) b = np.array([1., 2., 3.]) self._test_equal(a, b) self._test_equal(b, a) a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) b = np.array(1.) self._test_equal(a, b) self._test_equal(b, a) def test_subclass_that_cannot_be_bool(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having # comparison operators, not on them being able to store booleans # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. class MyArray(np.ndarray): def __eq__(self, other): return super().__eq__(other).view(np.ndarray) def __lt__(self, other): return super().__lt__(other).view(np.ndarray) def all(self, *args, **kwargs): raise NotImplementedError a = np.array([1., 2.]).view(MyArray) self._assert_func(a, a) class TestAlmostEqual(_GenericTest): def setup(self): self._assert_func = assert_almost_equal def test_closeness(self): # Note that in the course of time we ended up with # `abs(x - y) < 1.5 * 10**(-decimal)` # instead of the previously documented # `abs(x - y) < 0.5 * 10**(-decimal)` # so this check serves to preserve the wrongness. # test scalars self._assert_func(1.499999, 0.0, decimal=0) assert_raises(AssertionError, lambda: self._assert_func(1.5, 0.0, decimal=0)) # test arrays self._assert_func([1.499999], [0.0], decimal=0) assert_raises(AssertionError, lambda: self._assert_func([1.5], [0.0], decimal=0)) def test_nan_item(self): self._assert_func(np.nan, np.nan) assert_raises(AssertionError, lambda: self._assert_func(np.nan, 1)) assert_raises(AssertionError, lambda: self._assert_func(np.nan, np.inf)) assert_raises(AssertionError, lambda: self._assert_func(np.inf, np.nan)) def test_inf_item(self): self._assert_func(np.inf, np.inf) self._assert_func(-np.inf, -np.inf) assert_raises(AssertionError, lambda: self._assert_func(np.inf, 1)) assert_raises(AssertionError, lambda: self._assert_func(-np.inf, np.inf)) def test_simple_item(self): self._test_not_equal(1, 2) def test_complex_item(self): self._assert_func(complex(1, 2), complex(1, 2)) self._assert_func(complex(1, np.nan), complex(1, np.nan)) self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) self._test_not_equal(complex(1, np.nan), complex(1, 2)) self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) def test_complex(self): x = np.array([complex(1, 2), complex(1, np.nan)]) z = np.array([complex(1, 2), complex(np.nan, 1)]) y = np.array([complex(1, 2), complex(1, 2)]) self._assert_func(x, x) self._test_not_equal(x, y) self._test_not_equal(x, z) def test_error_message(self): """Check the message is formatted correctly for the decimal value. Also check the message when input includes inf or nan (gh12200)""" x = np.array([1.00000000001, 2.00000000002, 3.00003]) y = np.array([1.00000000002, 2.00000000003, 3.00004]) # Test with a different amount of decimal digits with pytest.raises(AssertionError) as exc_info: self._assert_func(x, y, decimal=12) msgs = str(exc_info.value).split('\n') assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)') assert_equal(msgs[4], 'Max absolute difference: 1.e-05') assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') assert_equal( msgs[6], ' x: array([1.00000000001, 2.00000000002, 3.00003 ])') assert_equal( msgs[7], ' y: array([1.00000000002, 2.00000000003, 3.00004 ])') # With the default value of decimal digits, only the 3rd element # differs. Note that we only check for the formatting of the arrays # themselves. with pytest.raises(AssertionError) as exc_info: self._assert_func(x, y) msgs = str(exc_info.value).split('\n') assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)') assert_equal(msgs[4], 'Max absolute difference: 1.e-05') assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])') assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])') # Check the error message when input includes inf x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) with pytest.raises(AssertionError) as exc_info: self._assert_func(x, y) msgs = str(exc_info.value).split('\n') assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)') assert_equal(msgs[4], 'Max absolute difference: 1.') assert_equal(msgs[5], 'Max relative difference: 1.') assert_equal(msgs[6], ' x: array([inf, 0.])') assert_equal(msgs[7], ' y: array([inf, 1.])') # Check the error message when dividing by zero x = np.array([1, 2]) y = np.array([0, 0]) with pytest.raises(AssertionError) as exc_info: self._assert_func(x, y) msgs = str(exc_info.value).split('\n') assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)') assert_equal(msgs[4], 'Max absolute difference: 2') assert_equal(msgs[5], 'Max relative difference: inf') def test_error_message_2(self): """Check the message is formatted correctly when either x or y is a scalar.""" x = 2 y = np.ones(20) with pytest.raises(AssertionError) as exc_info: self._assert_func(x, y) msgs = str(exc_info.value).split('\n') assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') assert_equal(msgs[4], 'Max absolute difference: 1.') assert_equal(msgs[5], 'Max relative difference: 1.') y = 2 x = np.ones(20) with pytest.raises(AssertionError) as exc_info: self._assert_func(x, y) msgs = str(exc_info.value).split('\n') assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') assert_equal(msgs[4], 'Max absolute difference: 1.') assert_equal(msgs[5], 'Max relative difference: 0.5') def test_subclass_that_cannot_be_bool(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having # comparison operators, not on them being able to store booleans # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. class MyArray(np.ndarray): def __eq__(self, other): return super().__eq__(other).view(np.ndarray) def __lt__(self, other): return super().__lt__(other).view(np.ndarray) def all(self, *args, **kwargs): raise NotImplementedError a = np.array([1., 2.]).view(MyArray) self._assert_func(a, a) class TestApproxEqual: def setup(self): self._assert_func = assert_approx_equal def test_simple_0d_arrays(self): x = np.array(1234.22) y = np.array(1234.23) self._assert_func(x, y, significant=5) self._assert_func(x, y, significant=6) assert_raises(AssertionError, lambda: self._assert_func(x, y, significant=7)) def test_simple_items(self): x = 1234.22 y = 1234.23 self._assert_func(x, y, significant=4) self._assert_func(x, y, significant=5) self._assert_func(x, y, significant=6) assert_raises(AssertionError, lambda: self._assert_func(x, y, significant=7)) def test_nan_array(self): anan = np.array(np.nan) aone = np.array(1) ainf = np.array(np.inf) self._assert_func(anan, anan) assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) def test_nan_items(self): anan = np.array(np.nan) aone = np.array(1) ainf = np.array(np.inf) self._assert_func(anan, anan) assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) class TestArrayAssertLess: def setup(self): self._assert_func = assert_array_less def test_simple_arrays(self): x = np.array([1.1, 2.2]) y = np.array([1.2, 2.3]) self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y = np.array([1.0, 2.3]) assert_raises(AssertionError, lambda: self._assert_func(x, y)) assert_raises(AssertionError, lambda: self._assert_func(y, x)) def test_rank2(self): x = np.array([[1.1, 2.2], [3.3, 4.4]]) y = np.array([[1.2, 2.3], [3.4, 4.5]]) self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y = np.array([[1.0, 2.3], [3.4, 4.5]]) assert_raises(AssertionError, lambda: self._assert_func(x, y)) assert_raises(AssertionError, lambda: self._assert_func(y, x)) def test_rank3(self): x = np.ones(shape=(2, 2, 2)) y = np.ones(shape=(2, 2, 2))+1 self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y[0, 0, 0] = 0 assert_raises(AssertionError, lambda: self._assert_func(x, y)) assert_raises(AssertionError, lambda: self._assert_func(y, x)) def test_simple_items(self): x = 1.1 y = 2.2 self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y = np.array([2.2, 3.3]) self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y = np.array([1.0, 3.3]) assert_raises(AssertionError, lambda: self._assert_func(x, y)) def test_nan_noncompare(self): anan = np.array(np.nan) aone = np.array(1) ainf = np.array(np.inf) self._assert_func(anan, anan) assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) def test_nan_noncompare_array(self): x = np.array([1.1, 2.2, 3.3]) anan = np.array(np.nan) assert_raises(AssertionError, lambda: self._assert_func(x, anan)) assert_raises(AssertionError, lambda: self._assert_func(anan, x)) x = np.array([1.1, 2.2, np.nan]) assert_raises(AssertionError, lambda: self._assert_func(x, anan)) assert_raises(AssertionError, lambda: self._assert_func(anan, x)) y = np.array([1.0, 2.0, np.nan]) self._assert_func(y, x) assert_raises(AssertionError, lambda: self._assert_func(x, y)) def test_inf_compare(self): aone = np.array(1) ainf = np.array(np.inf) self._assert_func(aone, ainf) self._assert_func(-ainf, aone) self._assert_func(-ainf, ainf) assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) def test_inf_compare_array(self): x = np.array([1.1, 2.2, np.inf]) ainf = np.array(np.inf) assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) self._assert_func(-ainf, x) @pytest.mark.skip(reason="The raises decorator depends on Nose") class TestRaises: def setup(self): class MyException(Exception): pass self.e = MyException def raises_exception(self, e): raise e def does_not_raise_exception(self): pass def test_correct_catch(self): raises(self.e)(self.raises_exception)(self.e) # raises? def test_wrong_exception(self): try: raises(self.e)(self.raises_exception)(RuntimeError) # raises? except RuntimeError: return else: raise AssertionError("should have caught RuntimeError") def test_catch_no_raise(self): try: raises(self.e)(self.does_not_raise_exception)() # raises? except AssertionError: return else: raise AssertionError("should have raised an AssertionError") class TestWarns: def test_warn(self): def f(): warnings.warn("yo") return 3 before_filters = sys.modules['warnings'].filters[:] assert_equal(assert_warns(UserWarning, f), 3) after_filters = sys.modules['warnings'].filters assert_raises(AssertionError, assert_no_warnings, f) assert_equal(assert_no_warnings(lambda x: x, 1), 1) # Check that the warnings state is unchanged assert_equal(before_filters, after_filters, "assert_warns does not preserver warnings state") def test_context_manager(self): before_filters = sys.modules['warnings'].filters[:] with assert_warns(UserWarning): warnings.warn("yo") after_filters = sys.modules['warnings'].filters def no_warnings(): with assert_no_warnings(): warnings.warn("yo") assert_raises(AssertionError, no_warnings) assert_equal(before_filters, after_filters, "assert_warns does not preserver warnings state") def test_warn_wrong_warning(self): def f(): warnings.warn("yo", DeprecationWarning) failed = False with warnings.catch_warnings(): warnings.simplefilter("error", DeprecationWarning) try: # Should raise a DeprecationWarning assert_warns(UserWarning, f) failed = True except DeprecationWarning: pass if failed: raise AssertionError("wrong warning caught by assert_warn") class TestAssertAllclose: def test_simple(self): x = 1e-3 y = 1e-9 assert_allclose(x, y, atol=1) assert_raises(AssertionError, assert_allclose, x, y) a = np.array([x, y, x, y]) b = np.array([x, y, x, x]) assert_allclose(a, b, atol=1) assert_raises(AssertionError, assert_allclose, a, b) b[-1] = y * (1 + 1e-8) assert_allclose(a, b) assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) assert_allclose(6, 10, rtol=0.5) assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) def test_min_int(self): a = np.array([np.iinfo(np.int_).min], dtype=np.int_) # Should not raise: assert_allclose(a, a) def test_report_fail_percentage(self): a = np.array([1, 1, 1, 1]) b = np.array([1, 1, 1, 2]) with pytest.raises(AssertionError) as exc_info: assert_allclose(a, b) msg = str(exc_info.value) assert_('Mismatched elements: 1 / 4 (25%)\n' 'Max absolute difference: 1\n' 'Max relative difference: 0.5' in msg) def test_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) # Should not raise: assert_allclose(a, b, equal_nan=True) def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) # None of these should raise. a = np.array([np.nan]) b = np.array([np.nan]) assert_array_equal(a, b) assert_array_almost_equal(a, b) assert_array_less(a, b) assert_allclose(a, b) def test_report_max_relative_error(self): a = np.array([0, 1]) b = np.array([0, 2]) with pytest.raises(AssertionError) as exc_info: assert_allclose(a, b) msg = str(exc_info.value) assert_('Max relative difference: 0.5' in msg) def test_timedelta(self): # see gh-18286 a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") assert_allclose(a, a) class TestArrayAlmostEqualNulp: def test_float64_pass(self): # The number of units of least precision # In this case, use a few places above the lowest level (ie nulp=1) nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] # Addition eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) # Subtraction epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) def test_float64_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] eps = np.finfo(x.dtype).eps y = x + x*eps*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) def test_float64_ignore_nan(self): # Ignore ULP differences between various NAN's # Note that MIPS may reverse quiet and signaling nans # so we use the builtin version as a base. offset = np.uint64(0xffffffff) nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. nan1_f64 = nan1_i64.view(np.float64) nan2_f64 = nan2_i64.view(np.float64) assert_array_max_ulp(nan1_f64, nan2_f64, 0) def test_float32_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) def test_float32_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] eps = np.finfo(x.dtype).eps y = x + x*eps*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) def test_float32_ignore_nan(self): # Ignore ULP differences between various NAN's # Note that MIPS may reverse quiet and signaling nans # so we use the builtin version as a base. offset = np.uint32(0xffff) nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. nan1_f32 = nan1_i32.view(np.float32) nan2_f32 = nan2_i32.view(np.float32) assert_array_max_ulp(nan1_f32, nan2_f32, 0) def test_float16_pass(self): nulp = 5 x = np.linspace(-4, 4, 10, dtype=np.float16) x = 10**x x = np.r_[-x, x] eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) def test_float16_fail(self): nulp = 5 x = np.linspace(-4, 4, 10, dtype=np.float16) x = 10**x x = np.r_[-x, x] eps = np.finfo(x.dtype).eps y = x + x*eps*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) def test_float16_ignore_nan(self): # Ignore ULP differences between various NAN's # Note that MIPS may reverse quiet and signaling nans # so we use the builtin version as a base. offset = np.uint16(0xff) nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. nan1_f16 = nan1_i16.view(np.float16) nan2_f16 = nan2_i16.view(np.float16) assert_array_max_ulp(nan1_f16, nan2_f16, 0) def test_complex128_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change y = x + x*eps*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x - x*epsneg*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) def test_complex128_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, x + y*1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + x*1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change y = x + x*eps*nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, x + y*1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + x*1j, nulp) y = x - x*epsneg*nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + y*1j, nulp) def test_complex64_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x + x*eps*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x - x*epsneg*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) def test_complex64_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, x + y*1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + x*1j, nulp) y = x + x*eps*nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp*2. assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, x + y*1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + x*1j, nulp) y = x - x*epsneg*nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, xi, y + y*1j, nulp) class TestULP: def test_equal(self): x = np.random.randn(10) assert_array_max_ulp(x, x, maxulp=0) def test_single(self): # Generate 1 + small deviation, check that adding eps gives a few UNL x = np.ones(10).astype(np.float32) x += 0.01 * np.random.randn(10).astype(np.float32) eps = np.finfo(np.float32).eps assert_array_max_ulp(x, x+eps, maxulp=20) def test_double(self): # Generate 1 + small deviation, check that adding eps gives a few UNL x = np.ones(10).astype(np.float64) x += 0.01 * np.random.randn(10).astype(np.float64) eps = np.finfo(np.float64).eps assert_array_max_ulp(x, x+eps, maxulp=200) def test_inf(self): for dt in [np.float32, np.float64]: inf = np.array([np.inf]).astype(dt) big = np.array([np.finfo(dt).max]) assert_array_max_ulp(inf, big, maxulp=200) def test_nan(self): # Test that nan is 'far' from small, tiny, inf, max and min for dt in [np.float32, np.float64]: if dt == np.float32: maxulp = 1e6 else: maxulp = 1e12 inf = np.array([np.inf]).astype(dt) nan = np.array([np.nan]).astype(dt) big = np.array([np.finfo(dt).max]) tiny = np.array([np.finfo(dt).tiny]) zero = np.array([np.PZERO]).astype(dt) nzero = np.array([np.NZERO]).astype(dt) assert_raises(AssertionError, lambda: assert_array_max_ulp(nan, inf, maxulp=maxulp)) assert_raises(AssertionError, lambda: assert_array_max_ulp(nan, big, maxulp=maxulp)) assert_raises(AssertionError, lambda: assert_array_max_ulp(nan, tiny, maxulp=maxulp)) assert_raises(AssertionError, lambda: assert_array_max_ulp(nan, zero, maxulp=maxulp)) assert_raises(AssertionError, lambda: assert_array_max_ulp(nan, nzero, maxulp=maxulp)) class TestStringEqual: def test_simple(self): assert_string_equal("hello", "hello") assert_string_equal("hello\nmultiline", "hello\nmultiline") with pytest.raises(AssertionError) as exc_info: assert_string_equal("foo\nbar", "hello\nbar") msg = str(exc_info.value) assert_equal(msg, "Differences in strings:\n- foo\n+ hello") assert_raises(AssertionError, lambda: assert_string_equal("foo", "hello")) def test_regex(self): assert_string_equal("a+*b", "a+*b") assert_raises(AssertionError, lambda: assert_string_equal("aaa", "a+b")) def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None): try: mod_warns = mod.__warningregistry__ except AttributeError: # the lack of a __warningregistry__ # attribute means that no warning has # occurred; this can be triggered in # a parallel test scenario, while in # a serial test scenario an initial # warning (and therefore the attribute) # are always created first mod_warns = {} num_warns = len(mod_warns) # Python 3.4 appears to clear any pre-existing warnings of the same type, # when raising warnings inside a catch_warnings block. So, there is a # warning generated by the tests within the context manager, but no # previous warnings. if 'version' in mod_warns: # Python 3 adds a 'version' entry to the registry, # do not count it. num_warns -= 1 # Behavior of warnings is Python version dependent. Adjust the # expected result to compensate. In particular, Python 3.7 does # not make an entry for ignored warnings. if sys.version_info[:2] >= (3, 7): if py37 is not None: n_in_context = py37 else: if py34 is not None: n_in_context = py34 assert_equal(num_warns, n_in_context) def test_warn_len_equal_call_scenarios(): # assert_warn_len_equal is called under # varying circumstances depending on serial # vs. parallel test scenarios; this test # simply aims to probe both code paths and # check that no assertion is uncaught # parallel scenario -- no warning issued yet class mod: pass mod_inst = mod() assert_warn_len_equal(mod=mod_inst, n_in_context=0) # serial test scenario -- the __warningregistry__ # attribute should be present class mod: def __init__(self): self.__warningregistry__ = {'warning1':1, 'warning2':2} mod_inst = mod() assert_warn_len_equal(mod=mod_inst, n_in_context=2) def _get_fresh_mod(): # Get this module, with warning registry empty my_mod = sys.modules[__name__] try: my_mod.__warningregistry__.clear() except AttributeError: # will not have a __warningregistry__ unless warning has been # raised in the module at some point pass return my_mod def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = _get_fresh_mod() assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) with clear_and_catch_warnings(modules=[my_mod]): warnings.simplefilter('ignore') warnings.warn('Some warning') assert_equal(my_mod.__warningregistry__, {}) # Without specified modules, don't clear warnings during context # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. with clear_and_catch_warnings(): warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1, py37=0) # Confirm that specifying module keeps old warning, does not add new with clear_and_catch_warnings(modules=[my_mod]): warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 1, py37=0) # Another warning, no module spec does add to warnings dict, except on # Python 3.4 (see comments in `assert_warn_len_equal`) # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. with clear_and_catch_warnings(): warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 2, py34=1, py37=0) def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means # we end up inside its module, not ours. def warn(arr): warnings.warn("Some warning 2", stacklevel=2) return arr np.apply_along_axis(warn, 0, [0]) # Test module based warning suppression: assert_warn_len_equal(my_mod, 0) with suppress_warnings() as sup: sup.record(UserWarning) # suppress warning from other module (may have .pyc ending), # if apply_along_axis is moved, had to be changed. sup.filter(module=np.lib.shape_base) warnings.warn("Some warning") warn_other_module() # Check that the suppression did test the file correctly (this module # got filtered) assert_equal(len(sup.log), 1) assert_equal(sup.log[0].message.args[0], "Some warning") assert_warn_len_equal(my_mod, 0, py37=0) sup = suppress_warnings() # Will have to be changed if apply_along_axis is moved: sup.filter(module=my_mod) with sup: warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) # And test repeat works: sup.filter(module=my_mod) with sup: warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) # Without specified modules, don't clear warnings during context # Python 3.7 does not add ignored warnings. with suppress_warnings(): warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1, py37=0) def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) # Test module based warning suppression: with suppress_warnings() as sup: sup.filter(UserWarning) warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) sup = suppress_warnings() sup.filter(UserWarning) with sup: warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) # And test repeat works: sup.filter(module=my_mod) with sup: warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) # Without specified modules, don't clear warnings during context # Python 3.7 does not add ignored warnings. with suppress_warnings(): warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1, py37=0) def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @sup def warn(category): warnings.warn('Some warning', category) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") warn(UserWarning) # should be supppressed warn(RuntimeWarning) assert_equal(len(w), 1) def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() with sup: log2 = sup.record(message='Some other warning 2') sup.filter(message='Some warning') warnings.warn('Some warning') warnings.warn('Some other warning') warnings.warn('Some other warning 2') assert_equal(len(sup.log), 2) assert_equal(len(log1), 1) assert_equal(len(log2),1) assert_equal(log2[0].message.args[0], 'Some other warning 2') # Do it again, with the same context to see if some warnings survived: with sup: log2 = sup.record(message='Some other warning 2') sup.filter(message='Some warning') warnings.warn('Some warning') warnings.warn('Some other warning') warnings.warn('Some other warning 2') assert_equal(len(sup.log), 2) assert_equal(len(log1), 1) assert_equal(len(log2), 1) assert_equal(log2[0].message.args[0], 'Some other warning 2') # Test nested: with suppress_warnings() as sup: sup.record() with suppress_warnings() as sup2: sup2.record(message='Some warning') warnings.warn('Some warning') warnings.warn('Some other warning') assert_equal(len(sup2.log), 1) assert_equal(len(sup.log), 1) def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means # we end up inside its module, not ours. def warn(arr): warnings.warn("Some warning", stacklevel=2) return arr np.apply_along_axis(warn, 0, [0]) with suppress_warnings() as sup: sup.record() with suppress_warnings("always"): for i in range(2): warnings.warn("Some warning") assert_equal(len(sup.log), 2) with suppress_warnings() as sup: sup.record() with suppress_warnings("location"): for i in range(2): warnings.warn("Some warning") warnings.warn("Some warning") assert_equal(len(sup.log), 2) with suppress_warnings() as sup: sup.record() with suppress_warnings("module"): for i in range(2): warnings.warn("Some warning") warnings.warn("Some warning") warn_other_module() assert_equal(len(sup.log), 2) with suppress_warnings() as sup: sup.record() with suppress_warnings("once"): for i in range(2): warnings.warn("Some warning") warnings.warn("Some other warning") warn_other_module() assert_equal(len(sup.log), 2) def test_tempdir(): with tempdir() as tdir: fpath = os.path.join(tdir, 'tmp') with open(fpath, 'w'): pass assert_(not os.path.isdir(tdir)) raised = False try: with tempdir() as tdir: raise ValueError() except ValueError: raised = True assert_(raised) assert_(not os.path.isdir(tdir)) def test_temppath(): with temppath() as fpath: with open(fpath, 'w'): pass assert_(not os.path.isfile(fpath)) raised = False try: with temppath() as fpath: raise ValueError() except ValueError: raised = True assert_(raised) assert_(not os.path.isfile(fpath)) class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) def test_clear_and_catch_warnings_inherit(): # Test can subclass and add default modules my_mod = _get_fresh_mod() with my_cacw(): warnings.simplefilter('ignore') warnings.warn('Some warning') assert_equal(my_mod.__warningregistry__, {}) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") class TestAssertNoGcCycles: """ Test assert_no_gc_cycles """ def test_passes(self): def no_cycle(): b = [] b.append([]) return b with assert_no_gc_cycles(): no_cycle() assert_no_gc_cycles(no_cycle) def test_asserts(self): def make_cycle(): a = [] a.append(a) a.append(a) return a with assert_raises(AssertionError): with assert_no_gc_cycles(): make_cycle() with assert_raises(AssertionError): assert_no_gc_cycles(make_cycle) @pytest.mark.slow def test_fails(self): """ Test that in cases where the garbage cannot be collected, we raise an error, instead of hanging forever trying to clear it. """ class ReferenceCycleInDel: """ An object that not only contains a reference cycle, but creates new cycles whenever it's garbage-collected and its __del__ runs """ make_cycle = True def __init__(self): self.cycle = self def __del__(self): # break the current cycle so that `self` can be freed self.cycle = None if ReferenceCycleInDel.make_cycle: # but create a new one so that the garbage collector has more # work to do. ReferenceCycleInDel() try: w = weakref.ref(ReferenceCycleInDel()) try: with assert_raises(RuntimeError): # this will be unable to get a baseline empty garbage assert_no_gc_cycles(lambda: None) except AssertionError: # the above test is only necessary if the GC actually tried to free # our object anyway, which python 2.7 does not. if w() is not None: pytest.skip("GC does not call __del__ on cyclic objects") raise finally: # make sure that we stop creating reference cycles ReferenceCycleInDel.make_cycle = False
seberg/numpy
numpy/testing/tests/test_utils.py
numpy/core/code_generators/ufunc_docstrings.py
# flake8: noqa __docformat__ = "restructuredtext" # Let users know if they're missing any of our hard dependencies hard_dependencies = ("numpy", "pytz", "dateutil") missing_dependencies = [] for dependency in hard_dependencies: try: __import__(dependency) except ImportError as e: missing_dependencies.append(f"{dependency}: {e}") if missing_dependencies: raise ImportError( "Unable to import required dependencies:\n" + "\n".join(missing_dependencies) ) del hard_dependencies, dependency, missing_dependencies # numpy compat from pandas.compat import ( np_version_under1p18 as _np_version_under1p18, is_numpy_dev as _is_numpy_dev, ) try: from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib except ImportError as e: # pragma: no cover # hack but overkill to use re module = str(e).replace("cannot import name ", "") raise ImportError( f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --force' to build the C extensions first." ) from e from pandas._config import ( get_option, set_option, reset_option, describe_option, option_context, options, ) # let init-time option registration happen import pandas.core.config_init from pandas.core.api import ( # dtype Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype, Float32Dtype, Float64Dtype, CategoricalDtype, PeriodDtype, IntervalDtype, DatetimeTZDtype, StringDtype, BooleanDtype, # missing NA, isna, isnull, notna, notnull, # indexes Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, IndexSlice, # tseries NaT, Period, period_range, Timedelta, timedelta_range, Timestamp, date_range, bdate_range, Interval, interval_range, DateOffset, # conversion to_numeric, to_datetime, to_timedelta, # misc Flags, Grouper, factorize, unique, value_counts, NamedAgg, array, Categorical, set_eng_float_format, Series, DataFrame, ) from pandas.core.arrays.sparse import SparseDtype from pandas.tseries.api import infer_freq from pandas.tseries import offsets from pandas.core.computation.api import eval from pandas.core.reshape.api import ( concat, lreshape, melt, wide_to_long, merge, merge_asof, merge_ordered, crosstab, pivot, pivot_table, get_dummies, cut, qcut, ) import pandas.api from pandas.util._print_versions import show_versions from pandas.io.api import ( # excel ExcelFile, ExcelWriter, read_excel, # parsers read_csv, read_fwf, read_table, # pickle read_pickle, to_pickle, # pytables HDFStore, read_hdf, # sql read_sql, read_sql_query, read_sql_table, # misc read_clipboard, read_parquet, read_orc, read_feather, read_gbq, read_html, read_xml, read_json, read_stata, read_sas, read_spss, ) from pandas.io.json import _json_normalize as json_normalize from pandas.util._tester import test import pandas.testing import pandas.arrays # use the closest tagged version if possible from pandas._version import get_versions v = get_versions() __version__ = v.get("closest-tag", v["version"]) __git_version__ = v.get("full-revisionid") del get_versions, v # GH 27101 def __getattr__(name): import warnings if name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " "Import from datetime module instead.", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == "np": warnings.warn( "The pandas.np module is deprecated " "and will be removed from pandas in a future version. " "Import numpy directly instead", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {"SparseSeries", "SparseDataFrame"}: warnings.warn( f"The {name} class is removed from pandas. Accessing it from " "the top-level namespace will also be removed in the next version", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == "SparseArray": warnings.warn( "The pandas.SparseArray class is deprecated " "and will be removed from pandas in a future version. " "Use pandas.arrays.SparseArray instead.", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f"module 'pandas' has no attribute '{name}'") # module level doc-string __doc__ = """ pandas - a powerful data analysis and manipulation library for Python ===================================================================== **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. Main Features ------------- Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. - Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large data sets. - Intuitive merging and joining data sets. - Flexible reshaping and pivoting of data sets. - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, date shifting and lagging. """
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/__init__.py
import numpy as np import pandas as pd from pandas import ( Categorical, DataFrame, Index, Series, Timestamp, ) import pandas._testing as tm from pandas.core.arrays import IntervalArray class TestGetNumericData: def test_get_numeric_data_preserve_dtype(self): # get the numeric data obj = DataFrame({"A": [1, "2", 3.0]}) result = obj._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) tm.assert_frame_equal(result, expected) def test_get_numeric_data(self): datetime64name = np.dtype("M8[ns]").name objectname = np.dtype(np.object_).name df = DataFrame( {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")}, index=np.arange(10), ) result = df.dtypes expected = Series( [ np.dtype("float64"), np.dtype("int64"), np.dtype(objectname), np.dtype(datetime64name), ], index=["a", "b", "c", "f"], ) tm.assert_series_equal(result, expected) df = DataFrame( { "a": 1.0, "b": 2, "c": "foo", "d": np.array([1.0] * 10, dtype="float32"), "e": np.array([1] * 10, dtype="int32"), "f": np.array([1] * 10, dtype="int16"), "g": Timestamp("20010102"), }, index=np.arange(10), ) result = df._get_numeric_data() expected = df.loc[:, ["a", "b", "d", "e", "f"]] tm.assert_frame_equal(result, expected) only_obj = df.loc[:, ["c", "g"]] result = only_obj._get_numeric_data() expected = df.loc[:, []] tm.assert_frame_equal(result, expected) df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]}) result = df._get_numeric_data() expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]}) tm.assert_frame_equal(result, expected) df = result.copy() result = df._get_numeric_data() expected = df tm.assert_frame_equal(result, expected) def test_get_numeric_data_mixed_dtype(self): # numeric and object columns df = DataFrame( { "a": [1, 2, 3], "b": [True, False, True], "c": ["foo", "bar", "baz"], "d": [None, None, None], "e": [3.14, 0.577, 2.773], } ) result = df._get_numeric_data() tm.assert_index_equal(result.columns, Index(["a", "b", "e"])) def test_get_numeric_data_extension_dtype(self): # GH#22290 df = DataFrame( { "A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"), "B": Categorical(list("abcabc")), "C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"), "D": IntervalArray.from_breaks(range(7)), } ) result = df._get_numeric_data() expected = df.loc[:, ["A", "C"]] tm.assert_frame_equal(result, expected)
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/tests/frame/methods/test_get_numeric_data.py
import warnings import pytest import pandas as pd import pandas._testing as tm from pandas.tests.extension.base.base import BaseExtensionTests class BaseReduceTests(BaseExtensionTests): """ Reduction specific tests. Generally these only make sense for numeric/boolean operations. """ def check_reduce(self, s, op_name, skipna): result = getattr(s, op_name)(skipna=skipna) expected = getattr(s.astype("float64"), op_name)(skipna=skipna) tm.assert_almost_equal(result, expected) class BaseNoReduceTests(BaseReduceTests): """ we don't define any reductions """ @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) class BaseNumericReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) # min/max with empty produce numpy warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) self.check_reduce(s, op_name, skipna) class BaseBooleanReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) self.check_reduce(s, op_name, skipna)
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/tests/extension/base/reduce.py
from __future__ import annotations from contextlib import suppress from typing import ( TYPE_CHECKING, Any, Hashable, Sequence, ) import warnings import numpy as np from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import ( AbstractMethodError, InvalidIndexError, ) from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, isna, ) import pandas.core.common as com from pandas.core.construction import array as pd_array from pandas.core.indexers import ( check_array_indexer, is_empty_indexer, is_exact_shape_match, is_list_like_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) if TYPE_CHECKING: from pandas import ( DataFrame, Series, ) # "null slice" _NS = slice(None, None) # the public IndexSlicerMaker class _IndexSlice: """ Create an object to more easily perform multi-index slicing. See Also -------- MultiIndex.remove_unused_levels : New MultiIndex with no unused levels. Notes ----- See :ref:`Defined Levels <advanced.shown_levels>` for further info on slicing a MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) >>> columns = ['foo', 'bar'] >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), ... index=midx, columns=columns) Using the default slice command: >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 Using the IndexSlice class for a more intuitive command: >>> idx = pd.IndexSlice >>> dfmi.loc[idx[:, 'B0':'B1'], :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 """ def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingError(Exception): pass class IndexingMixin: """ Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series. """ @property def iloc(self) -> _iLocIndexer: """ Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean array. Allowed inputs are: - An integer, e.g. ``5``. - A list or array of integers, e.g. ``[4, 3, 0]``. - A slice object with ints, e.g. ``1:7``. - A boolean array. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See more at :ref:`Selection by Position <indexing.integer>`. See Also -------- DataFrame.iat : Fast integer location scalar accessor. DataFrame.loc : Purely label-location based indexer for selection by label. Series.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] >>> df = pd.DataFrame(mydict) >>> df a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing just the rows** With a scalar integer. >>> type(df.iloc[0]) <class 'pandas.core.series.Series'> >>> df.iloc[0] a 1 b 2 c 3 d 4 Name: 0, dtype: int64 With a list of integers. >>> df.iloc[[0]] a b c d 0 1 2 3 4 >>> type(df.iloc[[0]]) <class 'pandas.core.frame.DataFrame'> >>> df.iloc[[0, 1]] a b c d 0 1 2 3 4 1 100 200 300 400 With a `slice` object. >>> df.iloc[:3] a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 With a boolean mask the same length as the index. >>> df.iloc[[True, False, True]] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 With a callable, useful in method chains. The `x` passed to the ``lambda`` is the DataFrame being sliced. This selects the rows whose index label even. >>> df.iloc[lambda x: x.index % 2 == 0] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 **Indexing both axes** You can mix the indexer types for the index and columns. Use ``:`` to select the entire axis. With scalar integers. >>> df.iloc[0, 1] 2 With lists of integers. >>> df.iloc[[0, 2], [1, 3]] b d 0 2 4 2 2000 4000 With `slice` objects. >>> df.iloc[1:3, 0:3] a b c 1 100 200 300 2 1000 2000 3000 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 With a callable function that expects the Series or DataFrame. >>> df.iloc[:, lambda df: [0, 2]] a c 0 1 3 1 100 300 2 1000 3000 """ return _iLocIndexer("iloc", self) @property def loc(self) -> _LocIndexer: """ Access a group of rows and columns by label(s) or a boolean array. ``.loc[]`` is primarily label based, but may also be used with a boolean array. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index). - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'``. .. warning:: Note that contrary to usual python slices, **both** the start and the stop are included - A boolean array of the same length as the axis being sliced, e.g. ``[True, False, True]``. - An alignable boolean Series. The index of the key will be aligned before masking. - An alignable Index. The Index of the returned selection will be the input. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above) See more at :ref:`Selection by Label <indexing.label>`. Raises ------ KeyError If any items are not found. IndexingError If an indexed key is passed and its index is unalignable to the frame index. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.iloc : Access group of rows and columns by integer position(s). DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the Series/DataFrame. Series.loc : Access group of values using labels. Examples -------- **Getting values** >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 Single label. Note this returns the row as a Series. >>> df.loc['viper'] max_speed 4 shield 5 Name: viper, dtype: int64 List of labels. Note using ``[[]]`` returns a DataFrame. >>> df.loc[['viper', 'sidewinder']] max_speed shield viper 4 5 sidewinder 7 8 Single label for row and column >>> df.loc['cobra', 'shield'] 2 Slice with labels for row and single label for column. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc['cobra':'viper', 'max_speed'] cobra 1 viper 4 Name: max_speed, dtype: int64 Boolean list with the same length as the row axis >>> df.loc[[False, False, True]] max_speed shield sidewinder 7 8 Alignable boolean Series: >>> df.loc[pd.Series([False, True, False], ... index=['viper', 'sidewinder', 'cobra'])] max_speed shield sidewinder 7 8 Index (same behavior as ``df.reindex``) >>> df.loc[pd.Index(["cobra", "viper"], name="foo")] max_speed shield foo cobra 1 2 viper 4 5 Conditional that returns a boolean Series >>> df.loc[df['shield'] > 6] max_speed shield sidewinder 7 8 Conditional that returns a boolean Series with column labels specified >>> df.loc[df['shield'] > 6, ['max_speed']] max_speed sidewinder 7 Callable that returns a boolean Series >>> df.loc[lambda df: df['shield'] == 8] max_speed shield sidewinder 7 8 **Setting values** Set value for all items matching the list of labels >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50 >>> df max_speed shield cobra 1 2 viper 4 50 sidewinder 7 50 Set value for an entire row >>> df.loc['cobra'] = 10 >>> df max_speed shield cobra 10 10 viper 4 50 sidewinder 7 50 Set value for an entire column >>> df.loc[:, 'max_speed'] = 30 >>> df max_speed shield cobra 30 10 viper 30 50 sidewinder 30 50 Set value for rows matching callable condition >>> df.loc[df['shield'] > 35] = 0 >>> df max_speed shield cobra 30 10 viper 0 0 sidewinder 0 0 **Getting values on a DataFrame with an index that has integer labels** Another example using integers for the index >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=[7, 8, 9], columns=['max_speed', 'shield']) >>> df max_speed shield 7 1 2 8 4 5 9 7 8 Slice with integer labels for rows. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc[7:9] max_speed shield 7 1 2 8 4 5 9 7 8 **Getting values with a MultiIndex** A number of examples using a DataFrame with a MultiIndex >>> tuples = [ ... ('cobra', 'mark i'), ('cobra', 'mark ii'), ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'), ... ('viper', 'mark ii'), ('viper', 'mark iii') ... ] >>> index = pd.MultiIndex.from_tuples(tuples) >>> values = [[12, 2], [0, 4], [10, 20], ... [1, 4], [7, 1], [16, 36]] >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index) >>> df max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Single label. Note this returns a DataFrame with a single index. >>> df.loc['cobra'] max_speed shield mark i 12 2 mark ii 0 4 Single index tuple. Note this returns a Series. >>> df.loc[('cobra', 'mark ii')] max_speed 0 shield 4 Name: (cobra, mark ii), dtype: int64 Single label for row and column. Similar to passing in a tuple, this returns a Series. >>> df.loc['cobra', 'mark i'] max_speed 12 shield 2 Name: (cobra, mark i), dtype: int64 Single tuple. Note using ``[[]]`` returns a DataFrame. >>> df.loc[[('cobra', 'mark ii')]] max_speed shield cobra mark ii 0 4 Single tuple for the index with a single label for the column >>> df.loc[('cobra', 'mark i'), 'shield'] 2 Slice from index tuple to single label >>> df.loc[('cobra', 'mark i'):'viper'] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Slice from index tuple to index tuple >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 """ return _LocIndexer("loc", self) @property def at(self) -> _AtIndexer: """ Access a single value for a row/column label pair. Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ KeyError If 'label' does not exist in DataFrame. See Also -------- DataFrame.iat : Access a single value for a row/column pair by integer position. DataFrame.loc : Access a group of rows and columns by label(s). Series.at : Access a single value using a label. Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... index=[4, 5, 6], columns=['A', 'B', 'C']) >>> df A B C 4 0 2 3 5 0 4 1 6 10 20 30 Get value at specified row/column pair >>> df.at[4, 'B'] 2 Set value at specified row/column pair >>> df.at[4, 'B'] = 10 >>> df.at[4, 'B'] 10 Get value within a Series >>> df.loc[5].at['B'] 4 """ return _AtIndexer("at", self) @property def iat(self) -> _iAtIndexer: """ Access a single value for a row/column pair by integer position. Similar to ``iloc``, in that both provide integer-based lookups. Use ``iat`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ IndexError When integer position is out of bounds. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.loc : Access a group of rows and columns by label(s). DataFrame.iloc : Access a group of rows and columns by integer position(s). Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... columns=['A', 'B', 'C']) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] 1 Set value at specified row/column pair >>> df.iat[1, 2] = 10 >>> df.iat[1, 2] 10 Get value within a series >>> df.loc[0].iat[1] 2 """ return _iAtIndexer("iat", self) class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis = None def __call__(self, axis=None): # we need to return a copy of ourselves new_self = type(self)(self.name, self.obj) if axis is not None: axis = self.obj._get_axis_number(axis) new_self.axis = axis return new_self def _get_setitem_indexer(self, key): """ Convert a potentially-label-based key into a positional indexer. """ if self.name == "loc": self._ensure_listlike_indexer(key) if self.axis is not None: return self._convert_tuple(key, is_setter=True) ax = self.obj._get_axis(0) if isinstance(ax, MultiIndex) and self.name != "iloc": with suppress(TypeError, KeyError, InvalidIndexError): # TypeError e.g. passed a bool return ax.get_loc(key) if isinstance(key, tuple): with suppress(IndexingError): return self._convert_tuple(key, is_setter=True) if isinstance(key, range): return list(key) try: return self._convert_to_indexer(key, axis=0, is_setter=True) except TypeError as e: # invalid indexer type vs 'other' indexing errors if "cannot do" in str(e): raise elif "unhashable type" in str(e): raise raise IndexingError(key) from e def _ensure_listlike_indexer(self, key, axis=None, value=None): """ Ensure that a list-like of column labels are all present by adding them if they do not already exist. Parameters ---------- key : list-like of column labels Target labels. axis : key axis if known """ column_axis = 1 # column only exists in 2-dimensional DataFrame if self.ndim != 2: return if isinstance(key, tuple) and len(key) > 1: # key may be a tuple if we are .loc # if length of key is > 1 set key to column part key = key[column_axis] axis = column_axis if ( axis == column_axis and not isinstance(self.obj.columns, MultiIndex) and is_list_like_indexer(key) and not com.is_bool_indexer(key) and all(is_hashable(k) for k in key) ): # GH#38148 keys = self.obj.columns.union(key, sort=False) self.obj._mgr = self.obj._mgr.reindex_axis( keys, axis=0, consolidate=False, only_slice=True ) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._has_valid_setitem_indexer(key) iloc = self if self.name == "iloc" else self.obj.iloc iloc._setitem_with_indexer(indexer, value, self.name) def _validate_key(self, key, axis: int): """ Ensure that key is valid for current indexer. Parameters ---------- key : scalar, slice or list-like Key requested. axis : int Dimension on which the indexing is being made. Raises ------ TypeError If the key (or some element of it) has wrong type. IndexError If the key (or some element of it) is out of bounds. KeyError If the key was not found. """ raise AbstractMethodError(self) def _has_valid_tuple(self, key: tuple): """ Check the key for valid keys across my indexer. """ self._validate_key_length(key) for i, k in enumerate(key): try: self._validate_key(k, i) except ValueError as err: raise ValueError( "Location based indexing can only have " f"[{self._valid_types}] types" ) from err def _is_nested_tuple_indexer(self, tup: tuple) -> bool: """ Returns ------- bool """ if any(isinstance(ax, MultiIndex) for ax in self.obj.axes): return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) return False def _convert_tuple(self, key, is_setter: bool = False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) for i in range(self.ndim): if i == axis: keyidx.append( self._convert_to_indexer(key, axis=axis, is_setter=is_setter) ) else: keyidx.append(slice(None)) else: self._validate_key_length(key) for i, k in enumerate(key): idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter) keyidx.append(idx) return tuple(keyidx) def _validate_key_length(self, key: Sequence[Any]) -> None: if len(key) > self.ndim: raise IndexingError("Too many indexers") def _getitem_tuple_same_dim(self, tup: tuple): """ Index with indexers that should return an object of the same dimension as self.obj. This is only called after a failed call to _getitem_lowerdim. """ retval = self.obj for i, key in enumerate(tup): if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) # We should never have retval.ndim < self.ndim, as that should # be handled by the _getitem_lowerdim call above. assert retval.ndim == self.ndim return retval def _getitem_lowerdim(self, tup: tuple): # we can directly get the axis result since the axis is specified if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) # we may have a nested tuples indexer here if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) # we maybe be using a tuple to represent multiple dimensions here ax0 = self.obj._get_axis(0) # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) if isinstance(ax0, MultiIndex) and self.name != "iloc": with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) self._validate_key_length(tup) for i, key in enumerate(tup): if is_label_like(key): # We don't need to check for tuples here because those are # caught by the _is_nested_tuple_indexer check above. section = self._getitem_axis(key, axis=i) # We should never have a scalar section here, because # _getitem_lowerdim is only called after a check for # is_scalar_access, which that would be. if section.ndim == self.ndim: # we're in the middle of slicing through a MultiIndex # revise the key wrt to `section` by inserting an _NS new_key = tup[:i] + (_NS,) + tup[i + 1 :] else: # Note: the section.ndim == self.ndim check above # rules out having DataFrame here, so we dont need to worry # about transposing. new_key = tup[:i] + tup[i + 1 :] if len(new_key) == 1: new_key = new_key[0] # Slices should return views, but calling iloc/loc with a null # slice returns a new object. if com.is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc return getattr(section, self.name)[new_key] raise IndexingError("not applicable") def _getitem_nested_tuple(self, tup: tuple): # we have a nested tuple so have at least 1 multi-index level # we should be able to match up the dimensionality here # we have too many indexers for our dim, but have at least 1 # multi-index dimension, try to see if we have something like # a tuple passed to a series with a multi-index if len(tup) > self.ndim: if self.name != "loc": # This should never be reached, but lets be explicit about it raise ValueError("Too many indices") if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in tup ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") if self.ndim == 1 or not any(isinstance(x, slice) for x in tup): # GH#10521 Series should reduce MultiIndex dimensions instead of # DataFrame, IndexingError is not raised when slice(None,None,None) # with one row. with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) # this is a series with a multi-index specified a tuple of # selectors axis = self.axis or 0 return self._getitem_axis(tup, axis=axis) # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj # GH#41369 Loop in reverse order ensures indexing along columns before rows # which selects only necessary blocks which avoids dtype conversion if possible axis = len(tup) - 1 for key in tup[::-1]: if com.is_null_slice(key): axis -= 1 continue obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis -= 1 # if we have a scalar, we are done if is_scalar(obj) or not hasattr(obj, "ndim"): break return obj def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if type(key) is tuple: key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) if self._is_scalar_access(key): with suppress(KeyError, IndexError, AttributeError): # AttributeError for IntervalTree get_value return self.obj._get_value(*key, takeable=self._takeable) return self._getitem_tuple(key) else: # we by definition only have the 0th axis axis = self.axis or 0 maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key: tuple): raise NotImplementedError() def _getitem_tuple(self, tup: tuple): raise AbstractMethodError(self) def _getitem_axis(self, key, axis: int): raise NotImplementedError() def _has_valid_setitem_indexer(self, indexer) -> bool: raise AbstractMethodError(self) def _getbool_axis(self, key, axis: int): # caller is responsible for ensuring non-None axis labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds = key.nonzero()[0] return self.obj._take_with_is_copy(inds, axis=axis) @doc(IndexingMixin.loc) class _LocIndexer(_LocationIndexer): _takeable: bool = False _valid_types = ( "labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean" ) # ------------------------------------------------------------------- # Key Checks @doc(_LocationIndexer._validate_key) def _validate_key(self, key, axis: int): # valid for a collection of labels (we check their presence later) # slice of labels (where start-end in labels) # slice of integers (only if in the labels) # boolean not in slice and with boolean index if isinstance(key, bool) and not is_bool_dtype(self.obj.index): raise KeyError( f"{key}: boolean label can not be used without a boolean index" ) if isinstance(key, slice) and ( isinstance(key.start, bool) or isinstance(key.stop, bool) ): raise TypeError(f"{key}: boolean values can not be used in a slice") def _has_valid_setitem_indexer(self, indexer) -> bool: return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False for i, k in enumerate(key): if not is_scalar(k): return False ax = self.obj.axes[i] if isinstance(ax, MultiIndex): return False if isinstance(k, str) and ax._supports_partial_string_indexing: # partial string indexing, df.loc['2000', 'A'] # should not be considered scalar return False if not ax.is_unique: return False return True # ------------------------------------------------------------------- # MultiIndex Handling def _multi_take_opportunity(self, tup: tuple) -> bool: """ Check whether there is the possibility to use ``_multi_take``. Currently the limit is that all axes being indexed, must be indexed with list-likes. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- bool Whether the current indexing, can be passed through `_multi_take`. """ if not all(is_list_like_indexer(x) for x in tup): return False # just too complicated return not any(com.is_bool_indexer(x) for x in tup) def _multi_take(self, tup: tuple): """ Create the indexers for the passed tuple of keys, and executes the take operation. This allows the take operation to be executed all at once, rather than once for each dimension. Improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- values: same type as the object being indexed """ # GH 836 d = { axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, self.obj._AXIS_ORDERS) } return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True) # ------------------------------------------------------------------- def _getitem_iterable(self, key, axis: int): """ Index current object with an iterable collection of keys. Parameters ---------- key : iterable Targeted labels. axis : int Dimension on which the indexing is being made. Raises ------ KeyError If no key was found. Will change in the future to raise if not all keys were found. Returns ------- scalar, DataFrame, or Series: indexed value(s). """ # we assume that not com.is_bool_indexer(key), as that is # handled before we get here. self._validate_key(key, axis) # A collection of keys keyarr, indexer = self._get_listlike_indexer(key, axis) return self.obj._reindex_with_indexers( {axis: [keyarr, indexer]}, copy=True, allow_dups=True ) def _getitem_tuple(self, tup: tuple): with suppress(IndexingError): return self._getitem_lowerdim(tup) # no multi-index, so validate all of the indexers self._has_valid_tuple(tup) # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) return self._getitem_tuple_same_dim(tup) def _get_label(self, label, axis: int): # GH#5667 this will fail if the label is not present in the axis. return self.obj.xs(label, axis=axis) def _handle_lowerdim_multi_index_axis0(self, tup: tuple): # we have an axis0 multi-index, handle or raise axis = self.axis or 0 try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=axis) except (TypeError, InvalidIndexError): # slices are unhashable pass except KeyError as ek: # raise KeyError if number of indexers match # else IndexingError will be raised if self.ndim < len(tup) <= self.obj.index.nlevels: raise ek raise IndexingError("No label returned") def _getitem_axis(self, key, axis: int): key = item_from_zerodim(key) if is_iterator(key): key = list(key) labels = self.obj._get_axis(axis) key = labels._get_partial_string_timestamp_match_key(key) if isinstance(key, slice): self._validate_key(key, axis) return self._get_slice_axis(key, axis=axis) elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): # an iterable multi-selection if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, "ndim") and key.ndim > 1: raise ValueError("Cannot index with multidimensional key") return self._getitem_iterable(key, axis=axis) # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] # fall thru to straight lookup self._validate_key(key, axis) return self._get_label(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): """ This is pretty simple as we just have to deal with labels. """ # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self.obj._slice(indexer, axis=axis) else: # DatetimeIndex overrides Index.slice_indexer and may # return a DatetimeIndex instead of a slice object. return self.obj.take(indexer, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Convert indexing key into something we can use to do actual fancy indexing on a ndarray. Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) if isinstance(key, slice): return labels._convert_slice_indexer(key, kind="loc") # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(key) and not is_int_index if is_scalar(key) or isinstance(labels, MultiIndex): # Otherwise get_loc will raise InvalidIndexError # if we are a label return me try: return labels.get_loc(key) except LookupError: if isinstance(key, tuple) and isinstance(labels, MultiIndex): if len(key) == labels.nlevels: return {"key": key} raise except InvalidIndexError: # GH35015, using datetime as column indices raises exception if not isinstance(labels, MultiIndex): raise except TypeError: pass except ValueError: if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition # always valid return {"key": key} if is_nested_tuple(key, labels): if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in key ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") return labels.get_locs(key) elif is_list_like_indexer(key): if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) (inds,) = key.nonzero() return inds else: return self._get_listlike_indexer(key, axis)[1] else: try: return labels.get_loc(key) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(key): return {"key": key} raise def _get_listlike_indexer(self, key, axis: int): """ Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found. """ ax = self.obj._get_axis(axis) # Have the index compute an indexer or return None # if it cannot handle: indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): # _validate_read_indexer is a no-op if no -1s, so skip return ax[indexer], indexer if ax._index_as_unique: indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) self._validate_read_indexer(keyarr, indexer, axis) return keyarr, indexer def _validate_read_indexer(self, key, indexer, axis: int): """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis : int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values: missing_mask = indexer < 0 missing = (missing_mask).sum() if missing: if missing == len(indexer): axis_name = self.obj._get_axis_name(axis) raise KeyError(f"None of [{key}] are in the [{axis_name}]") ax = self.obj._get_axis(axis) not_found = list(set(key) - set(ax)) raise KeyError(f"{not_found} not in index") @doc(IndexingMixin.iloc) class _iLocIndexer(_LocationIndexer): _valid_types = ( "integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array" ) _takeable = True # ------------------------------------------------------------------- # Key Checks def _validate_key(self, key, axis: int): if com.is_bool_indexer(key): if hasattr(key, "index") and isinstance(key.index, Index): if key.index.inferred_type == "integer": raise NotImplementedError( "iLocation based boolean " "indexing on an integer type " "is not available" ) raise ValueError( "iLocation based boolean indexing cannot use " "an indexable as a mask" ) return if isinstance(key, slice): return elif is_integer(key): self._validate_integer(key, axis) elif isinstance(key, tuple): # a tuple should already have been caught by this point # so don't treat a tuple as a valid indexer raise IndexingError("Too many indexers") elif is_list_like_indexer(key): arr = np.array(key) len_axis = len(self.obj._get_axis(axis)) # check that the key has a numeric dtype if not is_numeric_dtype(arr.dtype): raise IndexError(f".iloc requires numeric indexers, got {arr}") # check that the key does not exceed the maximum size of the index if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis): raise IndexError("positional indexers are out-of-bounds") else: raise ValueError(f"Can only index by location with a [{self._valid_types}]") def _has_valid_setitem_indexer(self, indexer) -> bool: """ Validate that a positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally. Returns ------- bool """ if isinstance(indexer, dict): raise IndexError("iloc cannot enlarge its target object") if isinstance(indexer, ABCDataFrame): warnings.warn( "DataFrame indexer for .iloc is deprecated and will be removed in" "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, stacklevel=3, ) if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("iloc cannot enlarge its target object") elif isinstance(i, dict): raise IndexError("iloc cannot enlarge its target object") return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False return all(is_integer(k) for k in key) def _validate_integer(self, key: int, axis: int) -> None: """ Check that 'key' is a valid position in the desired axis. Parameters ---------- key : int Requested position. axis : int Desired axis. Raises ------ IndexError If 'key' is not a valid position in axis 'axis'. """ len_axis = len(self.obj._get_axis(axis)) if key >= len_axis or key < -len_axis: raise IndexError("single positional indexer is out-of-bounds") # ------------------------------------------------------------------- def _getitem_tuple(self, tup: tuple): self._has_valid_tuple(tup) with suppress(IndexingError): return self._getitem_lowerdim(tup) return self._getitem_tuple_same_dim(tup) def _get_list_axis(self, key, axis: int): """ Return Series values by list or array of integers. Parameters ---------- key : list-like positional indexer axis : int Returns ------- Series object Notes ----- `axis` can only be zero. """ try: return self.obj._take_with_is_copy(key, axis=axis) except IndexError as err: # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): if isinstance(key, ABCDataFrame): raise IndexError( "DataFrame indexer is not allowed for .iloc\n" "Consider using .loc for automatic alignment." ) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) if is_iterator(key): key = list(key) if isinstance(key, list): key = np.asarray(key) if com.is_bool_indexer(key): self._validate_key(key, axis) return self._getbool_axis(key, axis=axis) # a list of integers elif is_list_like_indexer(key): return self._get_list_axis(key, axis=axis) # a single integer else: key = item_from_zerodim(key) if not is_integer(key): raise TypeError("Cannot index by location index with a non-integer key") # validate the location self._validate_integer(key, axis) return self.obj._ixs(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) labels._validate_positional_slice(slice_obj) return self.obj._slice(slice_obj, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Much simpler as we only have to deal with our valid types. """ return key def _get_setitem_indexer(self, key): # GH#32257 Fall through to let numpy do validation if is_iterator(key): return list(key) return key # ------------------------------------------------------------------- def _setitem_with_indexer(self, indexer, value, name="iloc"): """ _setitem_with_indexer is for setting values on a Series/DataFrame using positional indexers. If the relevant keys are not present, the Series/DataFrame may be expanded. This method is currently broken when dealing with non-unique Indexes, since it goes from positional indexers back to labels when calling BlockManager methods, see GH#12991, GH#22046, GH#15686. """ info_axis = self.obj._info_axis_number # maybe partial set take_split_path = not self.obj._mgr.is_single_block # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value if ( not take_split_path and getattr(self.obj._mgr, "blocks", False) and self.ndim > 1 ): # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value blk = self.obj._mgr.blocks[0] take_split_path = not blk._can_hold_element(val) # if we have any multi-indexes that have non-trivial slices # (not null slices) then we must take the split path, xref # GH 10360, GH 27841 if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for i, ax in zip(indexer, self.obj.axes): if isinstance(ax, MultiIndex) and not ( is_integer(i) or com.is_null_slice(i) ): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): if isinstance(idx, dict): # reindex the axis to the new value # and set inplace key, _ = convert_missing_indexer(idx) # if this is the items axes, then take the main missing # path first # this correctly sets the dtype and avoids cache issues # essentially this separates out the block that is needed # to possibly be modified if self.ndim > 1 and i == info_axis: # add the new item, and set the value # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like if not len(self.obj): if not is_list_like_indexer(value): raise ValueError( "cannot set a frame with no " "defined index and a scalar" ) self.obj[key] = value return # add a new item with the dtype setup if com.is_null_slice(indexer[0]): # We are setting an entire column self.obj[key] = value else: self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes ) self._setitem_with_indexer(new_indexer, value, name) return # reindex the axis # make sure to clear the cache because we are # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) # We are expanding the Series/DataFrame values to match # the length of thenew index `labels`. GH#40096 ensure # this is valid even if the index has duplicates. taker = np.arange(len(index) + 1, dtype=np.intp) taker[-1] = -1 reindexers = {i: (labels, taker)} new_obj = self.obj._reindex_with_indexers( reindexers, allow_dups=True ) self.obj._mgr = new_obj._mgr self.obj._maybe_update_cacher(clear=True) self.obj._is_copy = None nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: indexer, missing = convert_missing_indexer(indexer) if missing: self._setitem_with_indexer_missing(indexer, value) return # align and set the values if take_split_path: # We have to operate column-wise self._setitem_with_indexer_split_path(indexer, value, name) else: self._setitem_single_block(indexer, value, name) def _setitem_with_indexer_split_path(self, indexer, value, name: str): """ Setitem column-wise. """ # Above we only set take_split_path to True for 2D cases assert self.ndim == 2 if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) if len(indexer) > self.ndim: raise IndexError("too many indices for array") if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: raise ValueError(r"Cannot set values with ndim > 2") if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): from pandas import Series value = self._align_series(indexer, Series(value)) # Ensure we have something we can iterate over info_axis = indexer[1] ilocs = self._ensure_iterable_column_indexer(info_axis) pi = indexer[0] lplane_indexer = length_of_indexer(pi, self.obj.index) # lplane_indexer gives the expected length of obj[indexer[0]] # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value, name) elif np.ndim(value) == 2: self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): # We are setting multiple rows in a single column. self._setitem_single_column(ilocs[0], value, pi) elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): # We are trying to set N values into M entries of a single # column, which is invalid for N != M # Exclude zero-len for e.g. boolean masking that is all-false if len(value) == 1 and not is_integer(info_axis): # This is a case like df.iloc[:3, [1]] = [0] # where we treat as df.iloc[:3, 1] = 0 return self._setitem_with_indexer((pi, info_axis[0]), value[0]) raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) elif lplane_indexer == 0 and len(value) == len(self.obj.index): # We get here in one case via .loc with a all-False mask pass elif len(ilocs) == len(value): # We are setting multiple columns in a single row. for loc, v in zip(ilocs, value): self._setitem_single_column(loc, v, pi) elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0: # This is a setitem-with-expansion, see # test_loc_setitem_empty_append_expands_rows_mixed_dtype # e.g. df = DataFrame(columns=["x", "y"]) # df["x"] = df["x"].astype(np.int64) # df.loc[:, "x"] = [1, 2, 3] self._setitem_single_column(ilocs[0], value, pi) else: raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) else: # scalar value for loc in ilocs: self._setitem_single_column(loc, value, pi) def _setitem_with_indexer_2d_value(self, indexer, value): # We get here with np.ndim(value) == 2, excluding DataFrame, # which goes through _setitem_with_indexer_frame_value pi = indexer[0] ilocs = self._ensure_iterable_column_indexer(indexer[1]) # GH#7551 Note that this coerces the dtype if we are mixed value = np.array(value, dtype=object) if len(ilocs) != value.shape[1]: raise ValueError( "Must have equal len keys and value when setting with an ndarray" ) for i, loc in enumerate(ilocs): # setting with a list, re-coerces self._setitem_single_column(loc, value[:, i].tolist(), pi) def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str): ilocs = self._ensure_iterable_column_indexer(indexer[1]) sub_indexer = list(indexer) pi = indexer[0] multiindex_indexer = isinstance(self.obj.columns, MultiIndex) unique_cols = value.columns.is_unique # We do not want to align the value in case of iloc GH#37728 if name == "iloc": for i, loc in enumerate(ilocs): val = value.iloc[:, i] self._setitem_single_column(loc, val, pi) elif not unique_cols and value.columns.equals(self.obj.columns): # We assume we are already aligned, see # test_iloc_setitem_frame_duplicate_columns_multiple_blocks for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value.iloc[:, loc], multiindex_indexer, ) else: val = np.nan self._setitem_single_column(loc, val, pi) elif not unique_cols: raise ValueError("Setting with non-unique columns is not allowed.") else: for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value[item], multiindex_indexer ) else: val = np.nan self._setitem_single_column(loc, val, pi) def _setitem_single_column(self, loc: int, value, plane_indexer): """ Parameters ---------- loc : int Indexer for column position plane_indexer : int, slice, listlike[int] The indexer we use for setitem along axis=0. """ pi = plane_indexer ser = self.obj._ixs(loc, axis=1) # perform the equivalent of a setitem on the info axis # as we have a null slice or a slice with full bounds # which means essentially reassign to the columns of a # multi-dim object # GH#6149 (null slice), GH#10408 (full bounds) if com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)): ser = value elif ( is_array_like(value) and is_exact_shape_match(ser, value) and not is_empty_indexer(pi, value) ): if is_list_like(pi): ser = value[np.argsort(pi)] else: # in case of slice ser = value[pi] else: # set the item, possibly having a dtype change ser = ser.copy() ser._mgr = ser._mgr.setitem(indexer=(pi,), value=value) ser._maybe_update_cacher(clear=True) # reset the sliced object if unique self.obj._iset_item(loc, ser) def _setitem_single_block(self, indexer, value, name: str): """ _setitem_with_indexer for the case when we have a single Block. """ from pandas import Series info_axis = self.obj._info_axis_number item_labels = self.obj._get_axis(info_axis) if isinstance(indexer, tuple): # if we are setting on the info axis ONLY # set using those methods to avoid block-splitting # logic here if ( len(indexer) > info_axis and is_integer(indexer[info_axis]) and all( com.is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis ) ): selected_item_labels = item_labels[indexer[info_axis]] if len(item_labels.get_indexer_for([selected_item_labels])) == 1: self.obj[selected_item_labels] = value return indexer = maybe_convert_ix(*indexer) if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): # TODO(EA): ExtensionBlock.setitem this causes issues with # setting for extensionarrays that store dicts. Need to decide # if it's worth supporting that. value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame) and name != "iloc": value = self._align_frame(indexer, value) # check for chained assignment self.obj._check_is_chained_assignment_possible() # actually do the set self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) def _setitem_with_indexer_missing(self, indexer, value): """ Insert new row(s) or column(s) into the Series or DataFrame. """ from pandas import Series # reindex the axis to the new value # and set inplace if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index), indexer) # we have a coerced indexer, e.g. a float # that matches in an Int64Index, so # we will not create a duplicate index, rather # index to that element # e.g. 0.0 -> 0 # GH#12246 if index.is_unique: new_indexer = index.get_indexer([new_index[-1]]) if (new_indexer != -1).any(): # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): # GH#22717 handle casting compatibility that np.concatenate # does incorrectly new_values = concat_compat([self.obj._values, new_values]) self.obj._mgr = self.obj._constructor( new_values, index=new_index, name=self.obj.name )._mgr self.obj._maybe_update_cacher(clear=True) elif self.ndim == 2: if not len(self.obj.columns): # no columns and scalar raise ValueError("cannot set a frame with no defined columns") if isinstance(value, ABCSeries): # append a Series value = value.reindex(index=self.obj.columns, copy=True) value.name = indexer elif isinstance(value, dict): value = Series( value, index=self.obj.columns, name=indexer, dtype=object ) else: # a list-list if is_list_like_indexer(value): # must have conforming columns if len(value) != len(self.obj.columns): raise ValueError("cannot set a row with mismatched columns") value = Series(value, index=self.obj.columns, name=indexer) self.obj._mgr = self.obj.append(value)._mgr self.obj._maybe_update_cacher(clear=True) def _ensure_iterable_column_indexer(self, column_indexer): """ Ensure that our column indexer is something that can be iterated over. """ if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False): """ Parameters ---------- indexer : tuple, slice, scalar Indexer used to get the locations that will be set to `ser`. ser : pd.Series Values to assign to the locations specified by `indexer`. multiindex_indexer : bool, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. Returns ------- `np.array` of `ser` broadcast to the appropriate shape for assignment to the locations selected by `indexer` """ if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = (indexer,) if isinstance(indexer, tuple): # flatten np.ndarray indexers def ravel(i): return i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.ndim == 2 obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? # hence need to align to a single axis dimension # rather that find all valid dims # frame if is_frame: single_aligner = single_aligner and aligners[0] # we have a frame, with multiple indexers on both axes; and a # series, so need to broadcast (see GH5206) if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer): ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: len_indexer = len(indexer[1]) ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T return ser for i, idx in enumerate(indexer): ax = obj.axes[i] # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if ser.index.equals(new_ix) or not len(new_ix): return ser._values.copy() return ser.reindex(new_ix)._values # 2 dims elif single_aligner: # reindex along index ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values elif is_integer(indexer) and self.ndim == 1: if is_object_dtype(self.obj): return ser ax = self.obj._get_axis(0) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values[indexer] elif is_integer(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError("Incompatible indexer with Series") def _align_frame(self, indexer, df: DataFrame): is_frame = self.ndim == 2 if isinstance(indexer, tuple): idx, cols = None, None sindexers = [] for i, ix in enumerate(indexer): ax = self.obj.axes[i] if is_sequence(ix) or isinstance(ix, slice): if isinstance(ix, np.ndarray): ix = ix.ravel() if idx is None: idx = ax[ix] elif cols is None: cols = ax[ix] else: break else: sindexers.append(i) if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy()._values else: val = df.reindex(idx, columns=cols)._values return val elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame: ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy()._values else: # we have a multi-index and are trying to align # with a particular, level GH3738 if ( isinstance(ax, MultiIndex) and isinstance(df.index, MultiIndex) and ax.nlevels != df.index.nlevels ): raise TypeError( "cannot align on a multi-index with out " "specifying the join levels" ) val = df.reindex(index=ax)._values return val raise ValueError("Incompatible indexer with DataFrame") class _ScalarAccessIndexer(NDFrameIndexerBase): """ Access scalars quickly. """ def _convert_key(self, key, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if not isinstance(key, tuple): # we could have a convertible item here (e.g. Timestamp) if not is_list_like_indexer(key): key = (key,) else: raise ValueError("Invalid call for scalar access (getting)!") key = self._convert_key(key) return self.obj._get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = _tuplify(self.ndim, key) key = list(self._convert_key(key, is_setter=True)) if len(key) != self.ndim: raise ValueError("Not enough indexers for scalar access (setting)!") self.obj._set_value(*key, value=value, takeable=self._takeable) @doc(IndexingMixin.at) class _AtIndexer(_ScalarAccessIndexer): _takeable = False def _convert_key(self, key, is_setter: bool = False): """ Require they keys to be the same type as the index. (so we don't fallback) """ # GH 26989 # For series, unpacking key needs to result in the label. # This is already the case for len(key) == 1; e.g. (1,) if self.ndim == 1 and len(key) > 1: key = (key,) # allow arbitrary setting if is_setter: return list(key) return key @property def _axes_are_unique(self) -> bool: # Only relevant for self.ndim == 2 assert self.ndim == 2 return self.obj.index.is_unique and self.obj.columns.is_unique def __getitem__(self, key): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (getting)!") return self.obj.loc[key] return super().__getitem__(key) def __setitem__(self, key, value): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (setting)!") self.obj.loc[key] = value return return super().__setitem__(key, value) @doc(IndexingMixin.iat) class _iAtIndexer(_ScalarAccessIndexer): _takeable = True def _convert_key(self, key, is_setter: bool = False): """ Require integer args. (and convert to label arguments) """ for i in key: if not is_integer(i): raise ValueError("iAt based indexing can only have integer indexers") return key def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: """ Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple """ _tup: list[Hashable | slice] _tup = [slice(None, None) for _ in range(ndim)] _tup[0] = loc return tuple(_tup) def convert_to_index_sliceable(obj: DataFrame, key): """ If we are index sliceable, then return my slicer, otherwise return None. """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind="getitem") elif isinstance(key, str): # we are an actual column if key in obj.columns: return None # We might have a datetimelike string that we can translate to a # slice here via partial string indexing if idx._supports_partial_string_indexing: try: res = idx._get_string_slice(str(key)) warnings.warn( "Indexing a DataFrame with a datetimelike index using a single " "string to slice the rows, like `frame[string]`, is deprecated " "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, stacklevel=3, ) return res except (KeyError, ValueError, NotImplementedError): return None return None def check_bool_indexer(index: Index, key) -> np.ndarray: """ Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index. """ result = key if isinstance(key, ABCSeries) and not key.index.equals(index): result = result.reindex(index) mask = isna(result._values) if mask.any(): raise IndexingError( "Unalignable boolean Series provided as " "indexer (index of the boolean Series and of " "the indexed object do not match)." ) return result.astype(bool)._values if is_object_dtype(key): # key might be object-dtype bool, check_array_indexer needs bool array result = np.asarray(result, dtype=bool) elif not is_array_like(result): # GH 33924 # key may contain nan elements, check_array_indexer needs bool array result = pd_array(result, dtype=bool) return check_array_indexer(index, result) def convert_missing_indexer(indexer): """ Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer["key"] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False def convert_from_missing_indexer_tuple(indexer, axes): """ Create a filtered indexer that doesn't have any missing indexers. """ def get_indexer(_i, _idx): return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)) def maybe_convert_ix(*args): """ We likely want to take the cross-product. """ for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): return args return np.ix_(*args) def is_nested_tuple(tup, labels) -> bool: """ Returns ------- bool """ # check for a compatible nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False for k in tup: if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False def is_label_like(key) -> bool: """ Returns ------- bool """ # select a label or row return not isinstance(key, slice) and not is_list_like_indexer(key) def need_slice(obj: slice) -> bool: """ Returns ------- bool """ return ( obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1) )
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/core/indexing.py
from pandas import ( TimedeltaIndex, timedelta_range, ) import pandas._testing as tm class TestTimedeltaIndexDelete: def test_delete(self): idx = timedelta_range(start="1 Days", periods=5, freq="D", name="idx") # preserve freq expected_0 = timedelta_range(start="2 Days", periods=4, freq="D", name="idx") expected_4 = timedelta_range(start="1 Days", periods=4, freq="D", name="idx") # reset freq to None expected_1 = TimedeltaIndex( ["1 day", "3 day", "4 day", "5 day"], freq=None, name="idx" ) cases = { 0: expected_0, -5: expected_0, -1: expected_4, 4: expected_4, 1: expected_1, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq with tm.external_error_raised((IndexError, ValueError)): # either depending on numpy version idx.delete(5) def test_delete_slice(self): idx = timedelta_range(start="1 days", periods=10, freq="D", name="idx") # preserve freq expected_0_2 = timedelta_range(start="4 days", periods=7, freq="D", name="idx") expected_7_9 = timedelta_range(start="1 days", periods=7, freq="D", name="idx") # reset freq to None expected_3_5 = TimedeltaIndex( ["1 d", "2 d", "3 d", "7 d", "8 d", "9 d", "10d"], freq=None, name="idx" ) cases = { (0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq result = idx.delete(slice(n[0], n[-1] + 1)) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq def test_delete_doesnt_infer_freq(self): # GH#30655 behavior matches DatetimeIndex tdi = TimedeltaIndex(["1 Day", "2 Days", None, "3 Days", "4 Days"]) result = tdi.delete(2) assert result.freq is None
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/tests/indexes/timedeltas/test_delete.py
from __future__ import annotations from contextlib import contextmanager import re from typing import ( Sequence, Type, cast, ) import warnings @contextmanager def assert_produces_warning( expected_warning: type[Warning] | bool | None = Warning, filter_level="always", check_stacklevel: bool = True, raise_on_extra_warnings: bool = True, match: str | None = None, ): """ Context manager for running code expected to either raise a specific warning, or not raise any warnings. Verifies that the code raises the expected warning, and that it does not raise any other unexpected warnings. It is basically a wrapper around ``warnings.catch_warnings``. Parameters ---------- expected_warning : {Warning, False, None}, default Warning The type of Exception raised. ``exception.Warning`` is the base class for all warnings. To check that no warning is returned, specify ``False`` or ``None``. filter_level : str or None, default "always" Specifies whether warnings are ignored, displayed, or turned into errors. Valid values are: * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated from each location * "module" - print the warning the first time it is generated from each module * "once" - print the warning the first time it is generated check_stacklevel : bool, default True If True, displays the line that called the function containing the warning to show were the function is called. Otherwise, the line that implements the function is displayed. raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. match : str, optional Match warning message. Examples -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning()) ... >>> with assert_produces_warning(False): ... warnings.warn(RuntimeWarning()) ... Traceback (most recent call last): ... AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. >>> with assert_produces_warning(UserWarning): ... warnings.warn(RuntimeWarning()) Traceback (most recent call last): ... AssertionError: Did not see expected warning of class 'UserWarning'. ..warn:: This is *not* thread-safe. """ __tracebackhide__ = True with warnings.catch_warnings(record=True) as w: warnings.simplefilter(filter_level) yield w if expected_warning: expected_warning = cast(Type[Warning], expected_warning) _assert_caught_expected_warning( caught_warnings=w, expected_warning=expected_warning, match=match, check_stacklevel=check_stacklevel, ) if raise_on_extra_warnings: _assert_caught_no_extra_warnings( caught_warnings=w, expected_warning=expected_warning, ) def _assert_caught_expected_warning( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning], match: str | None, check_stacklevel: bool, ) -> None: """Assert that there was the expected warning among the caught warnings.""" saw_warning = False matched_message = False for actual_warning in caught_warnings: if issubclass(actual_warning.category, expected_warning): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): _assert_raised_with_correct_stacklevel(actual_warning) if match is not None and re.search(match, str(actual_warning.message)): matched_message = True if not saw_warning: raise AssertionError( f"Did not see expected warning of class " f"{repr(expected_warning.__name__)}" ) if match and not matched_message: raise AssertionError( f"Did not see warning {repr(expected_warning.__name__)} " f"matching {match}" ) def _assert_caught_no_extra_warnings( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning] | bool | None, ) -> None: """Assert that no extra warnings apart from the expected ones are caught.""" extra_warnings = [] for actual_warning in caught_warnings: if _is_unexpected_warning(actual_warning, expected_warning): unclosed = "unclosed transport <asyncio.sslproto._SSLProtocolTransport" if actual_warning.category == ResourceWarning and unclosed in str( actual_warning.message ): # FIXME: kludge because pytest.filterwarnings does not # suppress these, xref GH#38630 continue extra_warnings.append( ( actual_warning.category.__name__, actual_warning.message, actual_warning.filename, actual_warning.lineno, ) ) if extra_warnings: raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}") def _is_unexpected_warning( actual_warning: warnings.WarningMessage, expected_warning: type[Warning] | bool | None, ) -> bool: """Check if the actual warning issued is unexpected.""" if actual_warning and not expected_warning: return True expected_warning = cast(Type[Warning], expected_warning) return bool(not issubclass(actual_warning.category, expected_warning)) def _assert_raised_with_correct_stacklevel( actual_warning: warnings.WarningMessage, ) -> None: from inspect import ( getframeinfo, stack, ) caller = getframeinfo(stack()[4][0]) msg = ( "Warning not set with correct stacklevel. " f"File where warning is raised: {actual_warning.filename} != " f"{caller.filename}. Warning message: {actual_warning.message}" ) assert actual_warning.filename == caller.filename, msg
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/_testing/_warnings.py
from typing import Optional import numpy as np from pandas._libs import lib from pandas.core.dtypes.cast import maybe_downcast_numeric from pandas.core.dtypes.common import ( ensure_object, is_datetime_or_timedelta_dtype, is_decimal, is_integer_dtype, is_number, is_numeric_dtype, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) import pandas as pd from pandas.core.arrays.numeric import NumericArray def to_numeric(arg, errors="raise", downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series Argument to be converted. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaN. - If 'ignore', then invalid parsing will return the input. downcast : {'integer', 'signed', 'unsigned', 'float'}, default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. Returns ------- ret Numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. DataFrame.convert_dtypes : Convert dtypes. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 Downcasting of nullable integer and floating dtypes is supported: >>> s = pd.Series([1, 2, 3], dtype="Int64") >>> pd.to_numeric(s, downcast="integer") 0 1 1 2 2 3 dtype: Int8 >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") >>> pd.to_numeric(s, downcast="float") 0 1.0 1 2.1 2 3.0 dtype: Float32 """ if downcast not in (None, "integer", "signed", "unsigned", "float"): raise ValueError("invalid downcasting method provided") if errors not in ("ignore", "raise", "coerce"): raise ValueError("invalid error value specified") is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndex): is_index = True if needs_i8_conversion(arg.dtype): values = arg.asi8 else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype="O") elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype="O") elif getattr(arg, "ndim", 1) > 1: raise TypeError("arg must be a list, tuple, 1-d array, or Series") else: values = arg # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: Optional[np.ndarray] = None if isinstance(values, NumericArray): mask = values._mask values = values._data[~mask] values_dtype = getattr(values, "dtype", None) if is_numeric_dtype(values_dtype): pass elif is_datetime_or_timedelta_dtype(values_dtype): values = values.view(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: values, _ = lib.maybe_convert_numeric( values, set(), coerce_numeric=coerce_numeric ) except (ValueError, TypeError): if errors == "raise": raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values.dtype): typecodes = None if downcast in ("integer", "signed"): typecodes = np.typecodes["Integer"] elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): typecodes = np.typecodes["UnsignedInteger"] elif downcast == "float": typecodes = np.typecodes["Float"] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: dtype = np.dtype(dtype) if dtype.itemsize <= values.dtype.itemsize: values = maybe_downcast_numeric(values, dtype) # successful conversion if values.dtype == dtype: break # GH33013: for IntegerArray & FloatingArray need to reconstruct masked array if mask is not None: data = np.zeros(mask.shape, dtype=values.dtype) data[~mask] = values from pandas.core.arrays import ( FloatingArray, IntegerArray, ) klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray values = klass(data, mask.copy()) if is_series: return arg._constructor(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
import numpy as np import pytest from pandas import ( Categorical, Index, ) import pandas._testing as tm class TestCategoricalSort: def test_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal( c.argsort(ascending=True), expected, check_dtype=False ) expected = expected[::-1] tm.assert_numpy_array_equal( c.argsort(ascending=False), expected, check_dtype=False ) def test_numpy_argsort(self): c = Categorical([5, 3, 1, 4, 2], ordered=True) expected = np.array([2, 4, 1, 3, 0]) tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) tm.assert_numpy_array_equal( np.argsort(c, kind="mergesort"), expected, check_dtype=False ) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, axis=0) msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(c, order="C") def test_sort_values(self): # unordered cats are sortable cat = Categorical(["a", "b", "b", "a"], ordered=False) cat.sort_values() cat = Categorical(["a", "c", "b", "d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) cat = Categorical( ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True ) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=object) tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # sort (inplace order) cat1 = cat.copy() orig_codes = cat1._codes cat1.sort_values(inplace=True) assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) # reverse cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) def test_sort_values_na_position(self): # see gh-12882 cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) exp_categories = Index([2, 5]) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values() # default arguments tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) res = cat.sort_values(ascending=True, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) res = cat.sort_values(ascending=False, na_position="first") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) res = cat.sort_values(ascending=True, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) res = cat.sort_values(ascending=False, na_position="last") tm.assert_numpy_array_equal(res.__array__(), exp) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="last") exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories) cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position="first") exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) exp_categories = Index(["a", "b", "c", "d"]) tm.assert_numpy_array_equal(res.__array__(), exp_val) tm.assert_index_equal(res.categories, exp_categories)
datapythonista/pandas
pandas/tests/arrays/categorical/test_sorting.py
pandas/core/tools/numeric.py
""" A collection of image utilities using the Python Imaging Library (PIL). This is a local version of utility functions from scipy that are wrapping PIL functionality. These functions are deprecated in scipy 1.0.0 and will be removed in scipy 1.2.0. Therefore, the functionality used in sklearn is copied here. This file is taken from scipy/misc/pilutil.py in scipy 1.0.0. Modifications include: making this module importable if pillow is not installed, removal of DeprecationWarning, removal of functions scikit-learn does not need. Copyright (c) 2001, 2002 Enthought, Inc. All rights reserved. Copyright (c) 2003-2017 SciPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. c. Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from __future__ import division, print_function, absolute_import import numpy import tempfile from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis, transpose, iscomplexobj, uint8, issubdtype, array) # Modification of original scipy pilutil.py to make this module importable if # pillow is not installed. If pillow is not installed, functions will raise # ImportError when called. try: try: from PIL import Image except ImportError: import Image pillow_installed = True if not hasattr(Image, 'frombytes'): Image.frombytes = Image.fromstring except ImportError: pillow_installed = False __all__ = ['bytescale', 'imread', 'imsave', 'fromimage', 'toimage', 'imresize'] def bytescale(data, cmin=None, cmax=None, high=255, low=0): """ Byte scales an array (image). Byte scaling means converting the input image to uint8 dtype and scaling the range to ``(low, high)`` (default 0-255). If the input image already has dtype uint8, no scaling is done. This function is only available if Python Imaging Library (PIL) is installed. Parameters ---------- data : ndarray PIL image data array. cmin : scalar, optional Bias scaling of small values. Default is ``data.min()``. cmax : scalar, optional Bias scaling of large values. Default is ``data.max()``. high : scalar, optional Scale max value to `high`. Default is 255. low : scalar, optional Scale min value to `low`. Default is 0. Returns ------- img_array : uint8 ndarray The byte-scaled array. Examples -------- >>> from scipy.misc import bytescale >>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ], ... [ 73.88003259, 80.91433048, 4.88878881], ... [ 51.53875334, 34.45808177, 27.5873488 ]]) >>> bytescale(img) array([[255, 0, 236], [205, 225, 4], [140, 90, 70]], dtype=uint8) >>> bytescale(img, high=200, low=100) array([[200, 100, 192], [180, 188, 102], [155, 135, 128]], dtype=uint8) >>> bytescale(img, cmin=0, cmax=255) array([[91, 3, 84], [74, 81, 5], [52, 34, 28]], dtype=uint8) """ if data.dtype == uint8: return data if high > 255: raise ValueError("`high` should be less than or equal to 255.") if low < 0: raise ValueError("`low` should be greater than or equal to 0.") if high < low: raise ValueError("`high` should be greater than or equal to `low`.") if cmin is None: cmin = data.min() if cmax is None: cmax = data.max() cscale = cmax - cmin if cscale < 0: raise ValueError("`cmax` should be larger than `cmin`.") elif cscale == 0: cscale = 1 scale = float(high - low) / cscale bytedata = (data - cmin) * scale + low return (bytedata.clip(low, high) + 0.5).astype(uint8) def imread(name, flatten=False, mode=None): """ Read an image from a file as an array. This function is only available if Python Imaging Library (PIL) is installed. Parameters ---------- name : str or file object The file name or file object to be read. flatten : bool, optional If True, flattens the color layers into a single gray-scale layer. mode : str, optional Mode to convert image to, e.g. ``'RGB'``. See the Notes for more details. Returns ------- imread : ndarray The array obtained by reading the image. Notes ----- `imread` uses the Python Imaging Library (PIL) to read an image. The following notes are from the PIL documentation. `mode` can be one of the following strings: * 'L' (8-bit pixels, black and white) * 'P' (8-bit pixels, mapped to any other mode using a color palette) * 'RGB' (3x8-bit pixels, true color) * 'RGBA' (4x8-bit pixels, true color with transparency mask) * 'CMYK' (4x8-bit pixels, color separation) * 'YCbCr' (3x8-bit pixels, color video format) * 'I' (32-bit signed integer pixels) * 'F' (32-bit floating point pixels) PIL also provides limited support for a few special modes, including 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' (true color with premultiplied alpha). When translating a color image to black and white (mode 'L', 'I' or 'F'), the library uses the ITU-R 601-2 luma transform:: L = R * 299/1000 + G * 587/1000 + B * 114/1000 When `flatten` is True, the image is converted using mode 'F'. When `mode` is not None and `flatten` is True, the image is first converted according to `mode`, and the result is then flattened using mode 'F'. """ if not pillow_installed: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") im = Image.open(name) return fromimage(im, flatten=flatten, mode=mode) def imsave(name, arr, format=None): """ Save an array as an image. This function is only available if Python Imaging Library (PIL) is installed. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- name : str or file object Output file name or file object. arr : ndarray, MxN or MxNx3 or MxNx4 Array containing image values. If the shape is ``MxN``, the array represents a grey-level image. Shape ``MxNx3`` stores the red, green and blue bands along the last dimension. An alpha layer may be included, specified as the last colour band of an ``MxNx4`` array. format : str Image format. If omitted, the format to use is determined from the file name extension. If a file object was used instead of a file name, this parameter should always be used. Examples -------- Construct an array of gradient intensity values and save to file: >>> from scipy.misc import imsave >>> x = np.zeros((255, 255)) >>> x = np.zeros((255, 255), dtype=np.uint8) >>> x[:] = np.arange(255) >>> imsave('gradient.png', x) Construct an array with three colour bands (R, G, B) and store to file: >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8) >>> rgb[..., 0] = np.arange(255) >>> rgb[..., 1] = 55 >>> rgb[..., 2] = 1 - np.arange(255) >>> imsave('rgb_gradient.png', rgb) """ im = toimage(arr, channel_axis=2) if format is None: im.save(name) else: im.save(name, format) return def fromimage(im, flatten=False, mode=None): """ Return a copy of a PIL image as a numpy array. This function is only available if Python Imaging Library (PIL) is installed. Parameters ---------- im : PIL image Input image. flatten : bool If true, convert the output to grey-scale. mode : str, optional Mode to convert image to, e.g. ``'RGB'``. See the Notes of the `imread` docstring for more details. Returns ------- fromimage : ndarray The different colour bands/channels are stored in the third dimension, such that a grey-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. """ if not pillow_installed: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") if not Image.isImageType(im): raise TypeError("Input is not a PIL image.") if mode is not None: if mode != im.mode: im = im.convert(mode) elif im.mode == 'P': # Mode 'P' means there is an indexed "palette". If we leave the mode # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D # containing the indices into the palette, and not a 3-D array # containing the RGB or RGBA values. if 'transparency' in im.info: im = im.convert('RGBA') else: im = im.convert('RGB') if flatten: im = im.convert('F') elif im.mode == '1': # Workaround for crash in PIL. When im is 1-bit, the call array(im) # can cause a seg. fault, or generate garbage. See # https://github.com/scipy/scipy/issues/2138 and # https://github.com/python-pillow/Pillow/issues/350. # # This converts im from a 1-bit image to an 8-bit image. im = im.convert('L') a = array(im) return a _errstr = "Mode is unknown or incompatible with input array shape." def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None, mode=None, channel_axis=None): """Takes a numpy array and returns a PIL image. This function is only available if Python Imaging Library (PIL) is installed. The mode of the PIL image depends on the array shape and the `pal` and `mode` keywords. For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values (from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode is given as 'F' or 'I' in which case a float and/or integer array is made. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Notes ----- For 3-D arrays, the `channel_axis` argument tells which dimension of the array holds the channel data. For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' by default or 'YCbCr' if selected. The numpy array must be either 2 dimensional or 3 dimensional. """ if not pillow_installed: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") data = asarray(arr) if iscomplexobj(data): raise ValueError("Cannot convert a complex-valued array.") shape = list(data.shape) valid = len(shape) == 2 or ((len(shape) == 3) and ((3 in shape) or (4 in shape))) if not valid: raise ValueError("'arr' does not have a suitable array shape for " "any mode.") if len(shape) == 2: shape = (shape[1], shape[0]) # columns show up first if mode == 'F': data32 = data.astype(numpy.float32) image = Image.frombytes(mode, shape, data32.tostring()) return image if mode in [None, 'L', 'P']: bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) image = Image.frombytes('L', shape, bytedata.tostring()) if pal is not None: image.putpalette(asarray(pal, dtype=uint8).tostring()) # Becomes a mode='P' automagically. elif mode == 'P': # default gray-scale pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] * ones((3,), dtype=uint8)[newaxis, :]) image.putpalette(asarray(pal, dtype=uint8).tostring()) return image if mode == '1': # high input gives threshold for 1 bytedata = (data > high) image = Image.frombytes('1', shape, bytedata.tostring()) return image if cmin is None: cmin = amin(ravel(data)) if cmax is None: cmax = amax(ravel(data)) data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low if mode == 'I': data32 = data.astype(numpy.uint32) image = Image.frombytes(mode, shape, data32.tostring()) else: raise ValueError(_errstr) return image # if here then 3-d array with a 3 or a 4 in the shape length. # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' if channel_axis is None: if (3 in shape): ca = numpy.flatnonzero(asarray(shape) == 3)[0] else: ca = numpy.flatnonzero(asarray(shape) == 4) if len(ca): ca = ca[0] else: raise ValueError("Could not find channel dimension.") else: ca = channel_axis numch = shape[ca] if numch not in [3, 4]: raise ValueError("Channel axis dimension is not valid.") bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) if ca == 2: strdata = bytedata.tostring() shape = (shape[1], shape[0]) elif ca == 1: strdata = transpose(bytedata, (0, 2, 1)).tostring() shape = (shape[2], shape[0]) elif ca == 0: strdata = transpose(bytedata, (1, 2, 0)).tostring() shape = (shape[2], shape[1]) if mode is None: if numch == 3: mode = 'RGB' else: mode = 'RGBA' if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']: raise ValueError(_errstr) if mode in ['RGB', 'YCbCr']: if numch != 3: raise ValueError("Invalid array shape for mode.") if mode in ['RGBA', 'CMYK']: if numch != 4: raise ValueError("Invalid array shape for mode.") # Here we know data and mode is correct image = Image.frombytes(mode, shape, strdata) return image def imresize(arr, size, interp='bilinear', mode=None): """ Resize an image. This function is only available if Python Imaging Library (PIL) is installed. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Parameters ---------- arr : ndarray The array of image to be resized. size : int, float or tuple * int - Percentage of current size. * float - Fraction of current size. * tuple - Size of the output image (height, width). interp : str, optional Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic'). mode : str, optional The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing. If ``mode=None`` (the default), 2-D images will be treated like ``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays, `mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively. Returns ------- imresize : ndarray The resized array of image. See Also -------- toimage : Implicitly used to convert `arr` according to `mode`. scipy.ndimage.zoom : More generic implementation that does not use PIL. """ im = toimage(arr, mode=mode) ts = type(size) if issubdtype(ts, numpy.signedinteger): percent = size / 100.0 size = tuple((array(im.size)*percent).astype(int)) elif issubdtype(type(size), numpy.floating): size = tuple((array(im.size)*size).astype(int)) else: size = (size[1], size[0]) func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} imnew = im.resize(size, resample=func[interp]) return fromimage(imnew)
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/externals/_pilutil.py
"""Spectral Embedding""" # Author: Gael Varoquaux <gael.varoquaux@normalesup.org> # Wei LI <kuantkid@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy import sparse from scipy.linalg import eigh from scipy.sparse.linalg import eigsh, lobpcg from scipy.sparse.csgraph import connected_components from scipy.sparse.csgraph import laplacian as csgraph_laplacian from ..base import BaseEstimator from ..externals import six from ..utils import check_random_state, check_array, check_symmetric from ..utils.extmath import _deterministic_vector_sign_flip from ..metrics.pairwise import rbf_kernel from ..neighbors import kneighbors_graph def _graph_connected_component(graph, node_id): """Find the largest graph connected components that contains one given node Parameters ---------- graph : array-like, shape: (n_samples, n_samples) adjacency matrix of the graph, non-zero weight means an edge between the nodes node_id : int The index of the query node of the graph Returns ------- connected_components_matrix : array-like, shape: (n_samples,) An array of bool value indicating the indexes of the nodes belonging to the largest connected components of the given query node """ n_node = graph.shape[0] if sparse.issparse(graph): # speed up row-wise access to boolean connection mask graph = graph.tocsr() connected_nodes = np.zeros(n_node, dtype=np.bool) nodes_to_explore = np.zeros(n_node, dtype=np.bool) nodes_to_explore[node_id] = True for _ in range(n_node): last_num_component = connected_nodes.sum() np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes) if last_num_component >= connected_nodes.sum(): break indices = np.where(nodes_to_explore)[0] nodes_to_explore.fill(False) for i in indices: if sparse.issparse(graph): neighbors = graph[i].toarray().ravel() else: neighbors = graph[i] np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore) return connected_nodes def _graph_is_connected(graph): """ Return whether the graph is connected (True) or Not (False) Parameters ---------- graph : array-like or sparse matrix, shape: (n_samples, n_samples) adjacency matrix of the graph, non-zero weight means an edge between the nodes Returns ------- is_connected : bool True means the graph is fully connected and False means not """ if sparse.isspmatrix(graph): # sparse graph, find all the connected components n_connected_components, _ = connected_components(graph) return n_connected_components == 1 else: # dense graph, find all connected components start from node 0 return _graph_connected_component(graph, 0).sum() == graph.shape[0] def _set_diag(laplacian, value, norm_laplacian): """Set the diagonal of the laplacian matrix and convert it to a sparse format well suited for eigenvalue decomposition Parameters ---------- laplacian : array or sparse matrix The graph laplacian value : float The value of the diagonal norm_laplacian : bool Whether the value of the diagonal should be changed or not Returns ------- laplacian : array or sparse matrix An array of matrix in a form that is well suited to fast eigenvalue decomposition, depending on the band width of the matrix. """ n_nodes = laplacian.shape[0] # We need all entries in the diagonal to values if not sparse.isspmatrix(laplacian): if norm_laplacian: laplacian.flat[::n_nodes + 1] = value else: laplacian = laplacian.tocoo() if norm_laplacian: diag_idx = (laplacian.row == laplacian.col) laplacian.data[diag_idx] = value # If the matrix has a small number of diagonals (as in the # case of structured matrices coming from images), the # dia format might be best suited for matvec products: n_diags = np.unique(laplacian.row - laplacian.col).size if n_diags <= 7: # 3 or less outer diagonals on each side laplacian = laplacian.todia() else: # csr has the fastest matvec and is thus best suited to # arpack laplacian = laplacian.tocsr() return laplacian def spectral_embedding(adjacency, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, norm_laplacian=True, drop_first=True): """Project the sample on the first eigenvectors of the graph Laplacian. The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigenvectors associated to the smallest eigenvalues) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigenvector decomposition works as expected. Note : Laplacian Eigenmaps is the actual algorithm implemented here. Read more in the :ref:`User Guide <spectral_embedding>`. Parameters ---------- adjacency : array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components : integer, optional, default 8 The dimension of the projection subspace. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int, RandomState instance or None, optional, default: None A pseudo random number generator used for the initialization of the lobpcg eigenvectors decomposition. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'amg'. eigen_tol : float, optional, default=0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. norm_laplacian : bool, optional, default=True If True, then compute normalized Laplacian. drop_first : bool, optional, default=True Whether to drop the first eigenvector. For spectral embedding, this should be True as the first eigenvector should be constant vector for connected graph, but for spectral clustering, this should be kept as False to retain the first eigenvector. Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph has one connected component. If there graph has many components, the first few eigenvectors will simply uncover the connected components of the graph. References ---------- * https://en.wikipedia.org/wiki/LOBPCG * Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method Andrew V. Knyazev https://doi.org/10.1137%2FS1064827500366124 """ adjacency = check_symmetric(adjacency) try: from pyamg import smoothed_aggregation_solver except ImportError: if eigen_solver == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " "not available.") if eigen_solver is None: eigen_solver = 'arpack' elif eigen_solver not in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for eigen_solver: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver) random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # Whether to drop the first eigenvector if drop_first: n_components = n_components + 1 if not _graph_is_connected(adjacency): warnings.warn("Graph is not fully connected, spectral embedding" " may not work as expected.") laplacian, dd = csgraph_laplacian(adjacency, normed=norm_laplacian, return_diag=True) if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with eigen_solver='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen # /lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 1, norm_laplacian) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: # We are computing the opposite of the laplacian inplace so as # to spare a memory allocation of a possibly very large array laplacian *= -1 v0 = random_state.uniform(-1, 1, laplacian.shape[0]) lambdas, diffusion_map = eigsh(laplacian, k=n_components, sigma=1.0, which='LM', tol=eigen_tol, v0=v0) embedding = diffusion_map.T[n_components::-1] if norm_laplacian: embedding = embedding / dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg eigen_solver = "lobpcg" # Revert the laplacian to its opposite to have lobpcg work laplacian *= -1 if eigen_solver == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. if not sparse.issparse(laplacian): warnings.warn("AMG works better for sparse matrices") # lobpcg needs double precision floats laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True) laplacian = _set_diag(laplacian, 1, norm_laplacian) ml = smoothed_aggregation_solver(check_array(laplacian, 'csr')) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T if norm_laplacian: embedding = embedding / dd if embedding.shape[0] == 1: raise ValueError elif eigen_solver == "lobpcg": # lobpcg needs double precision floats laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True) if n_nodes < 5 * n_components + 1: # see note above under arpack why lobpcg has problems with small # number of nodes # lobpcg will fallback to eigh, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.toarray() lambdas, diffusion_map = eigh(laplacian) embedding = diffusion_map.T[:n_components] if norm_laplacian: embedding = embedding / dd else: laplacian = _set_diag(laplacian, 1, norm_laplacian) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] if norm_laplacian: embedding = embedding / dd if embedding.shape[0] == 1: raise ValueError embedding = _deterministic_vector_sign_flip(embedding) if drop_first: return embedding[1:n_components].T else: return embedding[:n_components].T class SpectralEmbedding(BaseEstimator): """Spectral embedding for non-linear dimensionality reduction. Forms an affinity matrix given by the specified function and applies spectral decomposition to the corresponding graph laplacian. The resulting transformation is given by the value of the eigenvectors for each data point. Note : Laplacian Eigenmaps is the actual algorithm implemented here. Read more in the :ref:`User Guide <spectral_embedding>`. Parameters ----------- n_components : integer, default: 2 The dimension of the projected subspace. affinity : string or callable, default : "nearest_neighbors" How to construct the affinity matrix. - 'nearest_neighbors' : construct affinity matrix by knn graph - 'rbf' : construct affinity matrix by rbf kernel - 'precomputed' : interpret X as precomputed affinity matrix - callable : use passed in function as affinity the function takes in data matrix (n_samples, n_features) and return affinity matrix (n_samples, n_samples). gamma : float, optional, default : 1/n_features Kernel coefficient for rbf kernel. random_state : int, RandomState instance or None, optional, default: None A pseudo random number generator used for the initialization of the lobpcg eigenvectors. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'amg'. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. n_neighbors : int, default : max(n_samples/10 , 1) Number of nearest neighbors for nearest_neighbors graph building. n_jobs : int or None, optional (default=None) The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- embedding_ : array, shape = (n_samples, n_components) Spectral embedding of the training matrix. affinity_matrix_ : array, shape = (n_samples, n_samples) Affinity_matrix constructed from samples or precomputed. Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.manifold import SpectralEmbedding >>> X, _ = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> embedding = SpectralEmbedding(n_components=2) >>> X_transformed = embedding.fit_transform(X[:100]) >>> X_transformed.shape (100, 2) References ---------- - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - On Spectral Clustering: Analysis and an algorithm, 2001 Andrew Y. Ng, Michael I. Jordan, Yair Weiss http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100 - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 """ def __init__(self, n_components=2, affinity="nearest_neighbors", gamma=None, random_state=None, eigen_solver=None, n_neighbors=None, n_jobs=None): self.n_components = n_components self.affinity = affinity self.gamma = gamma self.random_state = random_state self.eigen_solver = eigen_solver self.n_neighbors = n_neighbors self.n_jobs = n_jobs @property def _pairwise(self): return self.affinity == "precomputed" def _get_affinity_matrix(self, X, Y=None): """Calculate the affinity matrix from data Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Y: Ignored Returns ------- affinity_matrix, shape (n_samples, n_samples) """ if self.affinity == 'precomputed': self.affinity_matrix_ = X return self.affinity_matrix_ if self.affinity == 'nearest_neighbors': if sparse.issparse(X): warnings.warn("Nearest neighbors affinity currently does " "not support sparse input, falling back to " "rbf affinity") self.affinity = "rbf" else: self.n_neighbors_ = (self.n_neighbors if self.n_neighbors is not None else max(int(X.shape[0] / 10), 1)) self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs) # currently only symmetric affinity_matrix supported self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ + self.affinity_matrix_.T) return self.affinity_matrix_ if self.affinity == 'rbf': self.gamma_ = (self.gamma if self.gamma is not None else 1.0 / X.shape[1]) self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_) return self.affinity_matrix_ self.affinity_matrix_ = self.affinity(X) return self.affinity_matrix_ def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Returns ------- self : object Returns the instance itself. """ X = check_array(X, ensure_min_samples=2, estimator=self) random_state = check_random_state(self.random_state) if isinstance(self.affinity, six.string_types): if self.affinity not in set(("nearest_neighbors", "rbf", "precomputed")): raise ValueError(("%s is not a valid affinity. Expected " "'precomputed', 'rbf', 'nearest_neighbors' " "or a callable.") % self.affinity) elif not callable(self.affinity): raise ValueError(("'affinity' is expected to be an affinity " "name or a callable. Got: %s") % self.affinity) affinity_matrix = self._get_affinity_matrix(X) self.embedding_ = spectral_embedding(affinity_matrix, n_components=self.n_components, eigen_solver=self.eigen_solver, random_state=random_state) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Returns ------- X_new : array-like, shape (n_samples, n_components) """ self.fit(X) return self.embedding_
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/manifold/spectral_embedding_.py
import numpy as np from scipy import sparse from numpy.testing import assert_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from sklearn.utils import check_random_state from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_raises from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso from sklearn.linear_model.ransac import _dynamic_max_trials from sklearn.exceptions import ConvergenceWarning # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data rng = np.random.RandomState(1000) outliers = np.unique(rng.randint(len(X), size=200)) data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10 X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False rng = np.random.RandomState(0) X = rng.rand(10, 2) y = rng.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) # there is a 1e-9 chance it will take these many trials. No good reason # 1e-2 isn't enough, can still happen # 2 is the what ransac defines as min_samples = X.shape[1] + 1 max_trials = _dynamic_max_trials( len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2) for i in range(50): ransac_estimator.set_params(min_samples=2, random_state=i) ransac_estimator.fit(X, y) assert_less(ransac_estimator.n_trials_, max_trials + 1) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros(100)) def test_ransac_resid_thresh_no_inliers(): # When residual_threshold=0.0 there are no inliers and a # ValueError with a message should be raised base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.0, random_state=0, max_trials=5) msg = ("RANSAC could not find a valid consensus set") assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y) assert_equal(ransac_estimator.n_skips_no_inliers_, 5) assert_equal(ransac_estimator.n_skips_invalid_data_, 0) assert_equal(ransac_estimator.n_skips_invalid_model_, 0) def test_ransac_no_valid_data(): def is_data_valid(X, y): return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_data_valid=is_data_valid, max_trials=5) msg = ("RANSAC could not find a valid consensus set") assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y) assert_equal(ransac_estimator.n_skips_no_inliers_, 0) assert_equal(ransac_estimator.n_skips_invalid_data_, 5) assert_equal(ransac_estimator.n_skips_invalid_model_, 0) def test_ransac_no_valid_model(): def is_model_valid(estimator, X, y): return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_model_valid=is_model_valid, max_trials=5) msg = ("RANSAC could not find a valid consensus set") assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y) assert_equal(ransac_estimator.n_skips_no_inliers_, 0) assert_equal(ransac_estimator.n_skips_invalid_data_, 0) assert_equal(ransac_estimator.n_skips_invalid_model_, 5) def test_ransac_exceed_max_skips(): def is_data_valid(X, y): return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3) msg = ("RANSAC skipped more iterations than `max_skips`") assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y) assert_equal(ransac_estimator.n_skips_no_inliers_, 0) assert_equal(ransac_estimator.n_skips_invalid_data_, 4) assert_equal(ransac_estimator.n_skips_invalid_model_, 0) def test_ransac_warn_exceed_max_skips(): global cause_skip cause_skip = False def is_data_valid(X, y): global cause_skip if not cause_skip: cause_skip = True return True else: return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_data_valid=is_data_valid, max_skips=3, max_trials=5) assert_warns(ConvergenceWarning, ransac_estimator.fit, X, y) assert_equal(ransac_estimator.n_skips_no_inliers_, 0) assert_equal(ransac_estimator.n_skips_invalid_data_, 4) assert_equal(ransac_estimator.n_skips_invalid_model_, 0) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_residual_loss(): loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1) loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1) loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.loss = loss_mono ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss="squared_loss") ransac_estimator3.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_fit_sample_weight(): ransac_estimator = RANSACRegressor(random_state=0) n_samples = y.shape[0] weights = np.ones(n_samples) ransac_estimator.fit(X, y, weights) # sanity check assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False # check that mask is correct assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) X_ = random_state.randint(0, 200, [10, 1]) y_ = np.ndarray.flatten(0.2 * X_ + 2) sample_weight = random_state.randint(0, 10, 10) outlier_X = random_state.randint(0, 1000, [1, 1]) outlier_weight = random_state.randint(0, 10, 1) outlier_y = random_state.randint(-1000, 0, 1) X_flat = np.append(np.repeat(X_, sample_weight, axis=0), np.repeat(outlier_X, outlier_weight, axis=0), axis=0) y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0), np.repeat(outlier_y, outlier_weight, axis=0), axis=0)) ransac_estimator.fit(X_flat, y_flat) ref_coef_ = ransac_estimator.estimator_.coef_ sample_weight = np.append(sample_weight, outlier_weight) X_ = np.append(X_, outlier_X, axis=0) y_ = np.append(y_, outlier_y) ransac_estimator.fit(X_, y_, sample_weight) assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_) # check that if base_estimator.fit doesn't support # sample_weight, raises error base_estimator = Lasso() ransac_estimator = RANSACRegressor(base_estimator) assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/linear_model/tests/test_ransac.py
# -*- coding: utf-8 -*- """ DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <robertlayton@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Lars Buitinck # # License: BSD 3 clause import numpy as np from scipy import sparse from ..base import BaseEstimator, ClusterMixin from ..utils import check_array, check_consistent_length from ..neighbors import NearestNeighbors from ._dbscan_inner import dbscan_inner def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None, algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=None): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int or None, optional (default=None) The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- core_samples : array [n_core_samples] Indices of core samples. labels : array [n_samples] Cluster labels for each point. Noisy samples are given the label -1. See also -------- DBSCAN An estimator interface for this clustering algorithm. optics A similar clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :func:`cluster.optics` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ if not eps > 0.0: raise ValueError("eps must be positive.") X = check_array(X, accept_sparse='csr') if sample_weight is not None: sample_weight = np.asarray(sample_weight) check_consistent_length(X, sample_weight) # Calculate neighborhood for all samples. This leaves the original point # in, which needs to be considered later (i.e. point i is in the # neighborhood of point i. While True, its useless information) if metric == 'precomputed' and sparse.issparse(X): neighborhoods = np.empty(X.shape[0], dtype=object) X.sum_duplicates() # XXX: modifies X's internals in-place X_mask = X.data <= eps masked_indices = X.indices.astype(np.intp, copy=False)[X_mask] masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]] # insert the diagonal: a point is its own neighbor, but 0 distance # means absence from sparse matrix data masked_indices = np.insert(masked_indices, masked_indptr, np.arange(X.shape[0])) masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0]) # split into rows neighborhoods[:] = np.split(masked_indices, masked_indptr) else: neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs) neighbors_model.fit(X) # This has worst case O(n^2) memory complexity neighborhoods = neighbors_model.radius_neighbors(X, eps, return_distance=False) if sample_weight is None: n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) else: n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]) # Initially, all samples are noise. labels = np.full(X.shape[0], -1, dtype=np.intp) # A list of all core samples found. core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8) dbscan_inner(core_samples, neighborhoods, labels) return np.where(core_samples)[0], labels class DBSCAN(BaseEstimator, ClusterMixin): """Perform DBSCAN clustering from vector array or distance matrix. DBSCAN - Density-Based Spatial Clustering of Applications with Noise. Finds core samples of high density and expands clusters from them. Good for data which contains clusters of similar density. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. n_jobs : int or None, optional (default=None) The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- core_sample_indices_ : array, shape = [n_core_samples] Indices of core samples. components_ : array, shape = [n_core_samples, n_features] Copy of each core sample found by training. labels_ : array, shape = [n_samples] Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. Examples -------- >>> from sklearn.cluster import DBSCAN >>> import numpy as np >>> X = np.array([[1, 2], [2, 2], [2, 3], ... [8, 7], [8, 8], [25, 80]]) >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) >>> clustering.labels_ array([ 0, 0, 0, 1, 1, -1]) >>> clustering # doctest: +NORMALIZE_WHITESPACE DBSCAN(algorithm='auto', eps=3, leaf_size=30, metric='euclidean', metric_params=None, min_samples=2, n_jobs=None, p=None) See also -------- OPTICS A similar clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ def __init__(self, eps=0.5, min_samples=5, metric='euclidean', metric_params=None, algorithm='auto', leaf_size=30, p=None, n_jobs=None): self.eps = eps self.min_samples = min_samples self.metric = metric self.metric_params = metric_params self.algorithm = algorithm self.leaf_size = leaf_size self.p = p self.n_jobs = n_jobs def fit(self, X, y=None, sample_weight=None): """Perform DBSCAN clustering from features or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. y : Ignored """ X = check_array(X, accept_sparse='csr') clust = dbscan(X, sample_weight=sample_weight, **self.get_params()) self.core_sample_indices_, self.labels_ = clust if len(self.core_sample_indices_): # fix for scipy sparse indexing issue self.components_ = X[self.core_sample_indices_].copy() else: # no core samples self.components_ = np.empty((0, X.shape[1])) return self def fit_predict(self, X, y=None, sample_weight=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. y : Ignored Returns ------- y : ndarray, shape (n_samples,) cluster labels """ self.fit(X, sample_weight=sample_weight) return self.labels_
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/cluster/dbscan_.py
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com> # License: BSD 3 clause import warnings import numpy as np import numpy.ma as ma from scipy import sparse from scipy import stats from ..base import BaseEstimator, TransformerMixin from ..utils import check_array from ..utils import deprecated from ..utils.sparsefuncs import _get_median from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..externals import six zip = six.moves.zip map = six.moves.map __all__ = [ 'Imputer', ] def _get_mask(X, value_to_mask): """Compute the boolean mask X == missing_values.""" if value_to_mask == "NaN" or np.isnan(value_to_mask): return np.isnan(X) else: return X == value_to_mask def _most_frequent(array, extra_value, n_repeat): """Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.""" # Compute the most frequent value in array only if array.size > 0: mode = stats.mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # Ties the breaks. Copy the behaviour of scipy.stats.mode if most_frequent_value < extra_value: return most_frequent_value else: return extra_value @deprecated("Imputer was deprecated in version 0.20 and will be " "removed in 0.22. Import impute.SimpleImputer from " "sklearn instead.") class Imputer(BaseEstimator, TransformerMixin): """Imputation transformer for completing missing values. Read more in the :ref:`User Guide <imputation>`. Parameters ---------- missing_values : integer or "NaN", optional (default="NaN") The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For missing values encoded as np.nan, use the string value "NaN". strategy : string, optional (default="mean") The imputation strategy. - If "mean", then replace missing values using the mean along the axis. - If "median", then replace missing values using the median along the axis. - If "most_frequent", then replace missing using the most frequent value along the axis. axis : integer, optional (default=0) The axis along which to impute. - If `axis=0`, then impute along columns. - If `axis=1`, then impute along rows. verbose : integer, optional (default=0) Controls the verbosity of the imputer. copy : boolean, optional (default=True) If True, a copy of X will be created. If False, imputation will be done in-place whenever possible. Note that, in the following cases, a new copy will always be made, even if `copy=False`: - If X is not an array of floating values; - If X is sparse and `missing_values=0`; - If `axis=0` and X is encoded as a CSR matrix; - If `axis=1` and X is encoded as a CSC matrix. Attributes ---------- statistics_ : array of shape (n_features,) The imputation fill value for each feature if axis == 0. Notes ----- - When ``axis=0``, columns which only contained missing values at `fit` are discarded upon `transform`. - When ``axis=1``, an exception is raised if there are rows for which it is not possible to fill in the missing values (e.g., because they only contain missing values). """ def __init__(self, missing_values="NaN", strategy="mean", axis=0, verbose=0, copy=True): self.missing_values = missing_values self.strategy = strategy self.axis = axis self.verbose = verbose self.copy = copy def fit(self, X, y=None): """Fit the imputer on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Returns ------- self : Imputer """ # Check parameters allowed_strategies = ["mean", "median", "most_frequent"] if self.strategy not in allowed_strategies: raise ValueError("Can only use these strategies: {0} " " got strategy={1}".format(allowed_strategies, self.strategy)) if self.axis not in [0, 1]: raise ValueError("Can only impute missing values on axis 0 and 1, " " got axis={0}".format(self.axis)) # Since two different arrays can be provided in fit(X) and # transform(X), the imputation data will be computed in transform() # when the imputation is done per sample (i.e., when axis=1). if self.axis == 0: X = check_array(X, accept_sparse='csc', dtype=np.float64, force_all_finite=False) if sparse.issparse(X): self.statistics_ = self._sparse_fit(X, self.strategy, self.missing_values, self.axis) else: self.statistics_ = self._dense_fit(X, self.strategy, self.missing_values, self.axis) return self def _sparse_fit(self, X, strategy, missing_values, axis): """Fit the transformer on sparse data.""" # Imputation is done "by column", so if we want to do it # by row we only need to convert the matrix to csr format. if axis == 1: X = X.tocsr() else: X = X.tocsc() # Count the zeros if missing_values == 0: n_zeros_axis = np.zeros(X.shape[not axis], dtype=int) else: n_zeros_axis = X.shape[axis] - np.diff(X.indptr) # Mean if strategy == "mean": if missing_values != 0: n_non_missing = n_zeros_axis # Mask the missing elements mask_missing_values = _get_mask(X.data, missing_values) mask_valids = np.logical_not(mask_missing_values) # Sum only the valid elements new_data = X.data.copy() new_data[mask_missing_values] = 0 X = sparse.csc_matrix((new_data, X.indices, X.indptr), copy=False) sums = X.sum(axis=0) # Count the elements != 0 mask_non_zeros = sparse.csc_matrix( (mask_valids.astype(np.float64), X.indices, X.indptr), copy=False) s = mask_non_zeros.sum(axis=0) n_non_missing = np.add(n_non_missing, s) else: sums = X.sum(axis=axis) n_non_missing = np.diff(X.indptr) # Ignore the error, columns with a np.nan statistics_ # are not an error at this point. These columns will # be removed in transform with np.errstate(all="ignore"): return np.ravel(sums) / np.ravel(n_non_missing) # Median + Most frequent else: # Remove the missing values, for each column columns_all = np.hsplit(X.data, X.indptr[1:-1]) mask_missing_values = _get_mask(X.data, missing_values) mask_valids = np.hsplit(np.logical_not(mask_missing_values), X.indptr[1:-1]) # astype necessary for bug in numpy.hsplit before v1.9 columns = [col[mask.astype(bool, copy=False)] for col, mask in zip(columns_all, mask_valids)] # Median if strategy == "median": median = np.empty(len(columns)) for i, column in enumerate(columns): median[i] = _get_median(column, n_zeros_axis[i]) return median # Most frequent elif strategy == "most_frequent": most_frequent = np.empty(len(columns)) for i, column in enumerate(columns): most_frequent[i] = _most_frequent(column, 0, n_zeros_axis[i]) return most_frequent def _dense_fit(self, X, strategy, missing_values, axis): """Fit the transformer on dense data.""" X = check_array(X, force_all_finite=False) mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=mask) # Mean if strategy == "mean": mean_masked = np.ma.mean(masked_X, axis=axis) # Avoid the warning "Warning: converting a masked element to nan." mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = np.nan return mean # Median elif strategy == "median": median_masked = np.ma.median(masked_X, axis=axis) # Avoid the warning "Warning: converting a masked element to nan." median = np.ma.getdata(median_masked) median[np.ma.getmaskarray(median_masked)] = np.nan return median # Most frequent elif strategy == "most_frequent": # scipy.stats.mstats.mode cannot be used because it will no work # properly if the first element is masked and if its frequency # is equal to the frequency of the most frequent valid element # See https://github.com/scipy/scipy/issues/2636 # To be able access the elements by columns if axis == 0: X = X.transpose() mask = mask.transpose() most_frequent = np.empty(X.shape[0]) for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): row_mask = np.logical_not(row_mask).astype(np.bool) row = row[row_mask] most_frequent[i] = _most_frequent(row, np.nan, 0) return most_frequent def transform(self, X): """Impute all missing values in X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] The input data to complete. """ if self.axis == 0: check_is_fitted(self, 'statistics_') X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError("X has %d features per sample, expected %d" % (X.shape[1], self.statistics_.shape[0])) # Since two different arrays can be provided in fit(X) and # transform(X), the imputation data need to be recomputed # when the imputation is done per sample else: X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy) if sparse.issparse(X): statistics = self._sparse_fit(X, self.strategy, self.missing_values, self.axis) else: statistics = self._dense_fit(X, self.strategy, self.missing_values, self.axis) # Delete the invalid rows/columns invalid_mask = np.isnan(statistics) valid_mask = np.logical_not(invalid_mask) valid_statistics = statistics[valid_mask] valid_statistics_indexes = np.where(valid_mask)[0] missing = np.arange(X.shape[not self.axis])[invalid_mask] if self.axis == 0 and invalid_mask.any(): if self.verbose: warnings.warn("Deleting features without " "observed values: %s" % missing) X = X[:, valid_statistics_indexes] elif self.axis == 1 and invalid_mask.any(): raise ValueError("Some rows only contain " "missing values: %s" % missing) # Do actual imputation if sparse.issparse(X) and self.missing_values != 0: mask = _get_mask(X.data, self.missing_values) indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int), np.diff(X.indptr))[mask] X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) else: if sparse.issparse(X): X = X.toarray() mask = _get_mask(X, self.missing_values) n_missing = np.sum(mask, axis=self.axis) values = np.repeat(valid_statistics, n_missing) if self.axis == 0: coordinates = np.where(mask.transpose())[::-1] else: coordinates = mask X[coordinates] = values return X
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/preprocessing/imputation.py
"""Global configuration state and functions for management """ import os from contextlib import contextmanager as contextmanager _global_config = { 'assume_finite': bool(os.environ.get('SKLEARN_ASSUME_FINITE', False)), 'working_memory': int(os.environ.get('SKLEARN_WORKING_MEMORY', 1024)) } def get_config(): """Retrieve current values for configuration set by :func:`set_config` Returns ------- config : dict Keys are parameter names that can be passed to :func:`set_config`. """ return _global_config.copy() def set_config(assume_finite=None, working_memory=None): """Set global scikit-learn configuration Parameters ---------- assume_finite : bool, optional If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. Global default: False. working_memory : int, optional If set, scikit-learn will attempt to limit the size of temporary arrays to this number of MiB (per job when parallelised), often saving both computation time and memory on expensive operations that can be performed in chunks. Global default: 1024. """ if assume_finite is not None: _global_config['assume_finite'] = assume_finite if working_memory is not None: _global_config['working_memory'] = working_memory @contextmanager def config_context(**new_config): """Context manager for global scikit-learn configuration Parameters ---------- assume_finite : bool, optional If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. Global default: False. working_memory : int, optional If set, scikit-learn will attempt to limit the size of temporary arrays to this number of MiB (per job when parallelised), often saving both computation time and memory on expensive operations that can be performed in chunks. Global default: 1024. Notes ----- All settings, not just those presently modified, will be returned to their previous values when the context manager is exited. This is not thread-safe. Examples -------- >>> import sklearn >>> from sklearn.utils.validation import assert_all_finite >>> with sklearn.config_context(assume_finite=True): ... assert_all_finite([float('nan')]) >>> with sklearn.config_context(assume_finite=True): ... with sklearn.config_context(assume_finite=False): ... assert_all_finite([float('nan')]) ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Input contains NaN, ... """ old_config = get_config().copy() set_config(**new_config) try: yield finally: set_config(**old_config)
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/_config.py
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from ._function_transformer import FunctionTransformer from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import QuantileTransformer from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import minmax_scale from .data import quantile_transform from .data import power_transform from .data import PowerTransformer from .data import PolynomialFeatures from ._encoders import OneHotEncoder from ._encoders import OrdinalEncoder from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from ._discretization import KBinsDiscretizer from .imputation import Imputer # stub, remove in version 0.21 from .data import CategoricalEncoder # noqa __all__ = [ 'Binarizer', 'FunctionTransformer', 'Imputer', 'KBinsDiscretizer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'QuantileTransformer', 'Normalizer', 'OneHotEncoder', 'OrdinalEncoder', 'PowerTransformer', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', 'quantile_transform', 'power_transform', ]
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_equal, assert_true, assert_false from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings, assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import six # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_true(hasattr(clf, "coef_") == (k == 'linear')) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(gamma='scale', kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=unit_weight) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target, sample_weight=random_weight) score3 = lsvr_unflat.score(diabetes.data, diabetes.target, sample_weight=random_weight) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM(gamma='scale') clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [-1, -1, -1]) assert_equal(pred.dtype, np.dtype('intp')) assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]], decimal=3) assert_raises(AttributeError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal(clf.score_samples([[2., 2.]]), clf.decision_function([[2., 2.]]) + clf.offset_) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(gamma='scale', probability=True, random_state=0, C=1.0), svm.NuSVC(gamma='scale', probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 def test_weight(): # Test class weights clf = svm.SVC(gamma='scale', class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC(gamma="scale") clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22 @pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22 @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='macro') <= metrics.f1_score(y, y_pred_balanced, average='macro')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(gamma='scale', nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC(gamma="scale") assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC(gamma="scale").fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC(gamma="scale") clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError if six.PY2: # Test unicode (same as str on python3) clf = svm.SVC(kernel=u'linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel=u'linear', random_seed=0) # Test default behavior on both versions clf = svm.SVC(gamma='scale', kernel='linear', probability=True) clf.fit(X, Y) clf.predict_proba(T) svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=unit_weight) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0).\ fit(X, Y, sample_weight=random_weight) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(gamma='scale', kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC(gamma="scale") assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR(gamma='scale') assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) lsvr = svm.LinearSVR(random_state=0, max_iter=2) assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target) assert_equal(lsvr.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(gamma='scale', probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(gamma='scale', probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(gamma='scale', decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y)) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack(( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1] # Q4 )) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel='linear', decision_function_shape='ovr') clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert_greater(np.min(pred_class_deci_val), 0.0) # Test if the first point has lower decision value on every quadrant # compared to the second point assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] msg = ("The default value of gamma will change from 'auto' to 'scale' in " "version 0.22 to account better for unscaled features. Set gamma " "explicitly to 'auto' or 'scale' to avoid this warning.") assert_warns_message(FutureWarning, msg, svm.SVC().fit, X, y) assert_no_warnings(svm.SVC(kernel='linear').fit, X, y) assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y) def test_gamma_scale(): X, y = [[0.], [1.]], [0, 1] clf = svm.SVC(gamma='scale') assert_no_warnings(clf.fit, X, y) assert_equal(clf._gamma, 2.) # X_std ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] assert_no_warnings(clf.fit, X, y)
vortex-ape/scikit-learn
sklearn/svm/tests/test_svm.py
sklearn/preprocessing/__init__.py
import arrayfire import numpy import afnumpy from .. import private_utils as pu from numpy import newaxis import numbers from numpy import broadcast from ..decorators import * def concatenate(arrays, axis=0): if(len(arrays) < 1): raise ValueError('need at least one array to concatenate') if(axis > 3): raise NotImplementedError('only up to 4 axis as currently supported') arr = arrays[0].d_array.copy() axis = pu.c2f(arrays[0].shape, axis) for a in arrays[1:]: arr = arrayfire.join(axis, arr, a.d_array) return afnumpy.ndarray(pu.af_shape(arr), dtype=arrays[0].dtype, af_array=arr) def roll(a, shift, axis=None): shape = a.shape if(axis is None): axis = 0 a = a.flatten() axis = pu.c2f(a.shape, axis) if axis == 0: s = arrayfire.shift(a.d_array, shift, 0, 0, 0) elif axis == 1: s = arrayfire.shift(a.d_array, 0, shift, 0, 0) elif axis == 2: s = arrayfire.shift(a.d_array, 0, 0, shift, 0) elif axis == 3: s = arrayfire.shift(a.d_array, 0, 0, 0, shift) else: raise NotImplementedError return afnumpy.ndarray(a.shape, dtype=a.dtype, af_array=s).reshape(shape) def rollaxis(a, axis, start=0): n = a.ndim if axis < 0: axis += n if start < 0: start += n msg = 'rollaxis: %s (%d) must be >=0 and < %d' if not (0 <= axis < n): raise ValueError(msg % ('axis', axis, n)) if not (0 <= start < n+1): raise ValueError(msg % ('start', start, n+1)) if (axis < start): # it's been removed start -= 1 if axis==start: return a axes = list(range(0, n)) axes.remove(axis) axes.insert(start, axis) return a.transpose(axes) def ones(shape, dtype=float, order='C'): b = numpy.ones(shape, dtype, order) return afnumpy.ndarray(b.shape, b.dtype, buffer=b,order=order) def reshape(a, newshape, order='C'): return a.reshape(newshape,order) def asanyarray(a, dtype=None, order=None): return afnumpy.array(a, dtype, copy=False, order=order, subok=True) def floor(x, out=None): s = arrayfire.floor(x.d_array) a = afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s) if out is not None: out[:] = a[:] return a def ceil(x, out=None): s = arrayfire.ceil(x.d_array) a = afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s) if out is not None: out[:] = a[:] return a def abs(x, out=None): if not isinstance(x, afnumpy.ndarray): return numpy.abs(x, out) a = x.__abs__() if out is not None: out[:] = a return a def asarray(a, dtype=None, order=None): if(isinstance(a, afnumpy.ndarray) and (dtype is None or dtype == a.dtype)): # special case for performance return a return afnumpy.array(a, dtype, copy=False, order=order) def ascontiguousarray(a, dtype=None): return afnumpy.array(a, dtype, copy=False, order='C', ndmin=1) def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): if axis is not None: axisa, axisb, axisc = (axis,) * 3 a = asarray(a) b = asarray(b) # Move working axis to the end of the shape a = rollaxis(a, axisa, a.ndim) b = rollaxis(b, axisb, b.ndim) msg = ("incompatible dimensions for cross product\n" "(dimension must be 2 or 3)") if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): raise ValueError(msg) # Create the output array shape = broadcast(a[..., 0], b[..., 0]).shape if a.shape[-1] == 3 or b.shape[-1] == 3: shape += (3,) dtype = afnumpy.promote_types(a.dtype, b.dtype) cp = afnumpy.empty(shape, dtype) # create local aliases for readability a0 = a[..., 0] a1 = a[..., 1] if a.shape[-1] == 3: a2 = a[..., 2] b0 = b[..., 0] b1 = b[..., 1] if b.shape[-1] == 3: b2 = b[..., 2] if cp.ndim != 0 and cp.shape[-1] == 3: cp0 = cp[..., 0] cp1 = cp[..., 1] cp2 = cp[..., 2] if a.shape[-1] == 2: if b.shape[-1] == 2: # a0 * b1 - a1 * b0 afnumpy.multiply(a0, b1, out=cp) cp -= a1 * b0 if cp.ndim == 0: return cp else: # This works because we are moving the last axis return rollaxis(cp, -1, axisc) else: # cp0 = a1 * b2 - 0 (a2 = 0) # cp1 = 0 - a0 * b2 (a2 = 0) # cp2 = a0 * b1 - a1 * b0 afnumpy.multiply(a1, b2, out=cp0) afnumpy.multiply(a0, b2, out=cp1) negative(cp1, out=cp1) afnumpy.multiply(a0, b1, out=cp2) cp2 -= a1 * b0 elif a.shape[-1] == 3: if b.shape[-1] == 3: # cp0 = a1 * b2 - a2 * b1 # cp1 = a2 * b0 - a0 * b2 # cp2 = a0 * b1 - a1 * b0 afnumpy.multiply(a1, b2, out=cp0) tmp = afnumpy.array(a2 * b1) cp0 -= tmp afnumpy.multiply(a2, b0, out=cp1) afnumpy.multiply(a0, b2, out=tmp) cp1 -= tmp afnumpy.multiply(a0, b1, out=cp2) afnumpy.multiply(a1, b0, out=tmp) cp2 -= tmp else: # cp0 = 0 - a2 * b1 (b2 = 0) # cp1 = a2 * b0 - 0 (b2 = 0) # cp2 = a0 * b1 - a1 * b0 afnumpy.multiply(a2, b1, out=cp0) negative(cp0, out=cp0) afnumpy.multiply(a2, b0, out=cp1) afnumpy.multiply(a0, b1, out=cp2) cp2 -= a1 * b0 if cp.ndim == 1: return cp else: # This works because we are moving the last axis return rollaxis(cp, -1, axisc) @outufunc def isnan(x): if not isinstance(x, afnumpy.ndarray): return numpy.isnan(x) s = arrayfire.isnan(x.d_array) return afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s) @outufunc def isinf(x): if not isinstance(x, afnumpy.ndarray): return numpy.isinf(x) s = arrayfire.isinf(x.d_array) return afnumpy.ndarray(x.shape, dtype=pu.typemap(s.dtype()), af_array=s)
import afnumpy import numpy from asserts import * import sys import pytest xfail = pytest.mark.xfail def test_copy(): b = numpy.random.random((2,3)) a = afnumpy.array(b) c = afnumpy.copy(a) d = numpy.copy(b) a[:] = 0 b[:] = 0 iassert(c,d) def test_meshgrid(): nx, ny = (3, 2) x2 = numpy.linspace(0, 1, nx) y2 = numpy.linspace(0, 1, ny) x1 = afnumpy.array(x2) y1 = afnumpy.array(y2) iassert(afnumpy.meshgrid(x1, y1), numpy.meshgrid(x2, y2)) def test_broadcast_arrays(): # Currently arrayfire is missing support for int64 x2 = numpy.array([[1,2,3]], dtype=numpy.float32) y2 = numpy.array([[1],[2],[3]], dtype=numpy.float32) x1 = afnumpy.array(x2) y1 = afnumpy.array(y2) iassert(afnumpy.broadcast_arrays(x1, y1), numpy.broadcast_arrays(x2, y2)) x1 = afnumpy.array([2]) y1 = afnumpy.array(2) x2 = numpy.array([2]) y2 = numpy.array(2) iassert(afnumpy.broadcast_arrays(x1, y1), numpy.broadcast_arrays(x2, y2)) def test_tile(): # Currently arrayfire is missing support for int64 b = numpy.array([0, 1, 2], dtype=numpy.float32) a = afnumpy.array(b) iassert(afnumpy.tile(a, 2), numpy.tile(b, 2)) iassert(afnumpy.tile(a, (2,2)), numpy.tile(b, (2,2))) iassert(afnumpy.tile(a, (2,1,2)), numpy.tile(b, (2,1,2))) def test_arccos(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.arccos(a), numpy.arccos(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.arccos(a, out=c), numpy.arccos(b, out=d)) fassert(c, d) def test_arcsin(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.arcsin(a), numpy.arcsin(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.arcsin(a, out=c), numpy.arcsin(b, out=d)) fassert(c, d) def test_arctan(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.arctan(a), numpy.arctan(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.arctan(a, out=c), numpy.arctan(b, out=d)) fassert(c, d) def test_arctan2(): a1 = afnumpy.random.random((2,3)) b1 = numpy.array(a1) a2 = afnumpy.random.random((2,3)) b2 = numpy.array(a2) fassert(afnumpy.arctan2(a1,a2), numpy.arctan2(b1,b2)) c = afnumpy.random.random((2,3)) d = numpy.array(c) fassert(afnumpy.arctan2(a1,a2, out=c), numpy.arctan2(b1, b2, out=d)) fassert(c, d) def test_arccosh(): # Domain for arccosh starts at 1 a = afnumpy.random.random((2,3))+1 b = numpy.array(a) fassert(afnumpy.arccosh(a), numpy.arccosh(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.arccosh(a, out=c), numpy.arccosh(b, out=d)) fassert(c, d) def test_arcsinh(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.arcsinh(a), numpy.arcsinh(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.arcsinh(a, out=c), numpy.arcsinh(b, out=d)) fassert(c, d) def test_arctanh(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.arctanh(a), numpy.arctanh(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.arctanh(a, out=c), numpy.arctanh(b, out=d)) fassert(c, d) def test_cos(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.cos(a), numpy.cos(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.cos(a, out=c), numpy.cos(b, out=d)) fassert(c, d) def test_sin(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.sin(a), numpy.sin(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.sin(a, out=c), numpy.sin(b, out=d)) fassert(c, d) def test_tan(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.tan(a), numpy.tan(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.tan(a, out=c), numpy.tan(b, out=d)) fassert(c, d) def test_cosh(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.cosh(a), numpy.cosh(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.cosh(a, out=c), numpy.cosh(b, out=d)) fassert(c, d) def test_sinh(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.sinh(a), numpy.sinh(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.sinh(a, out=c), numpy.sinh(b, out=d)) fassert(c, d) def test_tanh(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.tanh(a), numpy.tanh(b)) c = afnumpy.random.random((2,3)) d = numpy.array(a) fassert(afnumpy.tanh(a, out=c), numpy.tanh(b, out=d)) fassert(c, d) def test_exp(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.exp(a), numpy.exp(b)) def test_log(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.log(a), numpy.log(b)) def test_log10(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.log10(a), numpy.log10(b)) def test_real(): x = numpy.sqrt([1+0j, 0+1j]) y = afnumpy.array(x) fassert(afnumpy.real(y), numpy.real(x)) y.real[:] = 0 x.real[:] = 0 fassert(y, x) def test_imag(): x = numpy.sqrt([1+0j, 0+1j]) y = afnumpy.array(x) fassert(afnumpy.imag(y), numpy.imag(x)) y.real[:] = 0 x.real[:] = 0 fassert(y, x) def test_multiply(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.multiply(a,a), numpy.multiply(b,b)) a = afnumpy.array(2) ao = afnumpy.array(0) b = numpy.array(a) bo = numpy.array(0) fassert(afnumpy.multiply(a,a), numpy.multiply(b,b)) fassert(afnumpy.multiply(a,a, out=ao), numpy.multiply(b,b, out = bo)) fassert(ao, bo) def test_subtract(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.subtract(a,a), numpy.subtract(b,b)) a = afnumpy.array(2) ao = afnumpy.array(0) b = numpy.array(a) bo = numpy.array(0) fassert(afnumpy.subtract(a,a), numpy.subtract(b,b)) fassert(afnumpy.subtract(a,a, out=ao), numpy.subtract(b,b, out = bo)) fassert(ao, bo) def test_add(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.add(a,a), numpy.add(b,b)) a = afnumpy.array(2) ao = afnumpy.array(0) b = numpy.array(a) bo = numpy.array(0) fassert(afnumpy.add(a,a), numpy.add(b,b)) fassert(afnumpy.add(a,a, out=ao), numpy.add(b,b, out = bo)) fassert(ao, bo) def test_divide(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.divide(a,a), numpy.divide(b,b)) a = afnumpy.array(2) b = numpy.array(a) if sys.version_info >= (3, 0): ao = afnumpy.array(0.) bo = numpy.array(0.) else: ao = afnumpy.array(0) bo = numpy.array(0) fassert(afnumpy.divide(a,a), numpy.divide(b,b)) fassert(afnumpy.divide(a,a, out=ao), numpy.divide(b,b, out = bo)) fassert(ao, bo) def test_true_divide(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.true_divide(a,a), numpy.true_divide(b,b)) a = afnumpy.array(2) b = numpy.array(a) ao = afnumpy.array(0.) bo = numpy.array(0.) fassert(afnumpy.true_divide(a,a), numpy.true_divide(b,b)) fassert(afnumpy.true_divide(a,a, out=ao), numpy.true_divide(b,b, out = bo)) fassert(ao, bo) def test_floor_divide(): a = afnumpy.random.random((2,3)) b = numpy.array(a) fassert(afnumpy.floor_divide(a,a), numpy.floor_divide(b,b)) a = afnumpy.array(2) b = numpy.array(a) ao = afnumpy.array(0) bo = numpy.array(0) fassert(afnumpy.floor_divide(a,a), numpy.floor_divide(b,b)) fassert(afnumpy.floor_divide(a,a, out=ao), numpy.floor_divide(b,b, out = bo)) fassert(ao, bo) def test_angle(): a = afnumpy.random.random((2,3))+afnumpy.random.random((2,3))*1.0j b = numpy.array(a) fassert(afnumpy.angle(a), numpy.angle(b)) def test_conjugate(): a = afnumpy.random.random((2,3))+afnumpy.random.random((2,3))*1.0j b = numpy.array(a) fassert(afnumpy.conjugate(a), numpy.conjugate(b)) def test_conj(): a = afnumpy.random.random((2,3))+afnumpy.random.random((2,3))*1.0j b = numpy.array(a) fassert(afnumpy.conj(a), numpy.conj(b)) def test_percentile(): a = numpy.array([[10, 7, 4], [3, 2, 1]], dtype=numpy.float32) b = afnumpy.array(a) fassert(afnumpy.percentile(b, 50), numpy.percentile(a, 50)) fassert(afnumpy.percentile(b, 50, axis=1), numpy.percentile(a, 50, axis=1)) fassert(afnumpy.percentile(b, 50, axis=1, keepdims=True), numpy.percentile(a, 50, axis=1, keepdims=True)) @xfail def test_percentile(): a = numpy.array([[10, 7, 4], [3, 2, 1]], dtype=numpy.float32) b = afnumpy.array(a) # Again problems with sorting not being supported on the slow axis fassert(afnumpy.percentile(b, 50, axis=0), numpy.percentile(a, 50, axis=0))
daurer/afnumpy
tests/test_lib.py
afnumpy/core/numeric.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple input/output related functionality that is not part of a larger framework or standard. """ import pickle __all__ = ['fnpickle', 'fnunpickle'] def fnunpickle(fileorname, number=0): """ Unpickle pickled objects from a specified file and return the contents. Parameters ---------- fileorname : str or file-like The file name or file from which to unpickle objects. If a file object, it should have been opened in binary mode. number : int If 0, a single object will be returned (the first in the file). If >0, this specifies the number of objects to be unpickled, and a list will be returned with exactly that many objects. If <0, all objects in the file will be unpickled and returned as a list. Raises ------ EOFError If ``number`` is >0 and there are fewer than ``number`` objects in the pickled file. Returns ------- contents : obj or list If ``number`` is 0, this is a individual object - the first one unpickled from the file. Otherwise, it is a list of objects unpickled from the file. """ if isinstance(fileorname, str): f = open(fileorname, 'rb') close = True else: f = fileorname close = False try: if number > 0: # get that number res = [] for i in range(number): res.append(pickle.load(f)) elif number < 0: # get all objects res = [] eof = False while not eof: try: res.append(pickle.load(f)) except EOFError: eof = True else: # number==0 res = pickle.load(f) finally: if close: f.close() return res def fnpickle(object, fileorname, protocol=None, append=False): """Pickle an object to a specified file. Parameters ---------- object The python object to pickle. fileorname : str or file-like The filename or file into which the `object` should be pickled. If a file object, it should have been opened in binary mode. protocol : int or None Pickle protocol to use - see the :mod:`pickle` module for details on these options. If None, the most recent protocol will be used. append : bool If True, the object is appended to the end of the file, otherwise the file will be overwritten (if a file object is given instead of a file name, this has no effect). """ if protocol is None: protocol = pickle.HIGHEST_PROTOCOL if isinstance(fileorname, str): f = open(fileorname, 'ab' if append else 'wb') close = True else: f = fileorname close = False try: pickle.dump(object, f, protocol=protocol) finally: if close: f.close()
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import pytest import numpy as np from numpy import testing as npt import erfa from astropy import units as u from astropy.time import Time from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import EarthLocation from astropy.coordinates import SkyCoord from astropy.utils import iers from .utils import randomly_sample_sphere # These fixtures are used in test_iau_fullstack @pytest.fixture(scope="function") def fullstack_icrs(): ra, dec, _ = randomly_sample_sphere(1000) return ICRS(ra=ra, dec=dec) @pytest.fixture(scope="function") def fullstack_fiducial_altaz(fullstack_icrs): altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m), obstime=Time('J2000')) with warnings.catch_warnings(): # Ignore remote_data warning warnings.simplefilter('ignore') result = fullstack_icrs.transform_to(altazframe) return result @pytest.fixture(scope="function", params=['J2000.1', 'J2010']) def fullstack_times(request): return Time(request.param) @pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)]) def fullstack_locations(request): return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg, height=request.param[0]*u.m) @pytest.fixture(scope="function", params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron), (1*u.bar, 10*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron), (1*u.bar, 0*u.deg_C, 0, 21*u.cm)]) def fullstack_obsconditions(request): return request.param def _erfa_check(ira, idec, astrom): """ This function does the same thing the astropy layer is supposed to do, but all in erfa """ cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom) az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom) alt = np.pi/2-zen cra2, cdec2 = erfa.atoiq('A', az, zen, astrom) ira2, idec2 = erfa.aticq(cra2, cdec2, astrom) dct = locals() del dct['astrom'] return dct def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz, fullstack_times, fullstack_locations, fullstack_obsconditions): """ Test the full transform from ICRS <-> AltAz """ # create the altaz frame altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations, pressure=fullstack_obsconditions[0], temperature=fullstack_obsconditions[1], relative_humidity=fullstack_obsconditions[2], obswl=fullstack_obsconditions[3]) aacoo = fullstack_icrs.transform_to(altazframe) # compare aacoo to the fiducial AltAz - should always be different assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond) assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond) # if the refraction correction is included, we *only* do the comparisons # where altitude >5 degrees. The SOFA guides imply that below 5 is where # where accuracy gets more problematic, and testing reveals that alt<~0 # gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty if fullstack_obsconditions[0].value == 0: # but if there is no refraction correction, check everything msk = slice(None) tol = 5*u.microarcsecond else: msk = aacoo.alt > 5*u.deg # most of them aren't this bad, but some of those at low alt are offset # this much. For alt > 10, this is always better than 100 masec tol = 750*u.milliarcsecond # now make sure the full stack round-tripping works icrs2 = aacoo.transform_to(ICRS()) adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk] addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk] assert np.all(adras < tol), f'largest RA change is {np.max(adras.arcsec * 1000)} mas, > {tol}' assert np.all(addecs < tol), f'largest Dec change is {np.max(addecs.arcsec * 1000)} mas, > {tol}' # check that we're consistent with the ERFA alt/az result iers_tab = iers.earth_orientation_table.get() xp, yp = u.Quantity(iers_tab.pm_xy(fullstack_times)).to_value(u.radian) lon = fullstack_locations.geodetic[0].to_value(u.radian) lat = fullstack_locations.geodetic[1].to_value(u.radian) height = fullstack_locations.geodetic[2].to_value(u.m) jd1, jd2 = get_jd12(fullstack_times, 'utc') pressure = fullstack_obsconditions[0].to_value(u.hPa) temperature = fullstack_obsconditions[1].to_value(u.deg_C) # Relative humidity can be a quantity or a number. relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value obswl = fullstack_obsconditions[3].to_value(u.micron) astrom, eo = erfa.apco13(jd1, jd2, fullstack_times.delta_ut1_utc, lon, lat, height, xp, yp, pressure, temperature, relative_humidity, obswl) erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom) npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7) npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7) def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz): """ Test the full transform from ICRS <-> AltAz """ aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz) # make sure the round-tripping works icrs2 = aacoo.transform_to(ICRS()) npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg) npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg) def test_future_altaz(): """ While this does test the full stack, it is mostly meant to check that a warning is raised when attempting to get to AltAz in the future (beyond IERS tables) """ from astropy.utils.exceptions import AstropyWarning # this is an ugly hack to get the warning to show up even if it has already # appeared from astropy.coordinates.builtin_frames import utils if hasattr(utils, '__warningregistry__'): utils.__warningregistry__.clear() location = EarthLocation(lat=0*u.deg, lon=0*u.deg) t = Time('J2161') # check that these message(s) appear among any other warnings. If tests are run with # --remote-data then the IERS table will be an instance of IERS_Auto which is # assured of being "fresh". In this case getting times outside the range of the # table does not raise an exception. Only if using IERS_B (which happens without # --remote-data, i.e. for all CI testing) do we expect another warning. with pytest.warns(AstropyWarning, match=r"Tried to get polar motions for " "times after IERS data is valid.*") as found_warnings: SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t)) if isinstance(iers.earth_orientation_table.get(), iers.IERS_B): messages_found = ["(some) times are outside of range covered by IERS " "table." in str(w.message) for w in found_warnings] assert any(messages_found)
dhomeier/astropy
astropy/coordinates/tests/test_iau_fullstack.py
astropy/io/misc/pickle_helpers.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import os from os.path import join import os.path import shutil import sys from collections import defaultdict from setuptools import Extension from setuptools.dep_util import newer_group import numpy from extension_helpers import import_file, write_if_different, get_compiler, pkg_config WCSROOT = os.path.relpath(os.path.dirname(__file__)) WCSVERSION = "7.6" def b(s): return s.encode('ascii') def string_escape(s): s = s.decode('ascii').encode('ascii', 'backslashreplace') s = s.replace(b'\n', b'\\n') s = s.replace(b'\0', b'\\0') return s.decode('ascii') def determine_64_bit_int(): """ The only configuration parameter needed at compile-time is how to specify a 64-bit signed integer. Python's ctypes module can get us that information. If we can't be absolutely certain, we default to "long long int", which is correct on most platforms (x86, x86_64). If we find platforms where this heuristic doesn't work, we may need to hardcode for them. """ try: try: import ctypes except ImportError: raise ValueError() if ctypes.sizeof(ctypes.c_longlong) == 8: return "long long int" elif ctypes.sizeof(ctypes.c_long) == 8: return "long int" elif ctypes.sizeof(ctypes.c_int) == 8: return "int" else: raise ValueError() except ValueError: return "long long int" def write_wcsconfig_h(paths): """ Writes out the wcsconfig.h header with local configuration. """ h_file = io.StringIO() h_file.write(""" /* The bundled version has WCSLIB_VERSION */ #define HAVE_WCSLIB_VERSION 1 /* WCSLIB library version number. */ #define WCSLIB_VERSION {} /* 64-bit integer data type. */ #define WCSLIB_INT64 {} /* Windows needs some other defines to prevent inclusion of wcsset() which conflicts with wcslib's wcsset(). These need to be set on code that *uses* astropy.wcs, in addition to astropy.wcs itself. */ #if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__) #ifndef YY_NO_UNISTD_H #define YY_NO_UNISTD_H #endif #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef _NO_OLDNAMES #define _NO_OLDNAMES #endif #ifndef NO_OLDNAMES #define NO_OLDNAMES #endif #ifndef __STDC__ #define __STDC__ 1 #endif #endif """.format(WCSVERSION, determine_64_bit_int())) content = h_file.getvalue().encode('ascii') for path in paths: write_if_different(path, content) ###################################################################### # GENERATE DOCSTRINGS IN C def generate_c_docstrings(): docstrings = import_file(os.path.join(WCSROOT, 'docstrings.py')) docstrings = docstrings.__dict__ keys = [ key for key, val in docstrings.items() if not key.startswith('__') and isinstance(val, str)] keys.sort() docs = {} for key in keys: docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0' h_file = io.StringIO() h_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py */ #ifndef __DOCSTRINGS_H__ #define __DOCSTRINGS_H__ """) for key in keys: val = docs[key] h_file.write(f'extern char doc_{key}[{len(val)}];\n') h_file.write("\n#endif\n\n") write_if_different( join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'), h_file.getvalue().encode('utf-8')) c_file = io.StringIO() c_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py The weirdness here with strncpy is because some C compilers, notably MSVC, do not support string literals greater than 256 characters. */ #include <string.h> #include "astropy_wcs/docstrings.h" """) for key in keys: val = docs[key] c_file.write(f'char doc_{key}[{len(val)}] = {{\n') for i in range(0, len(val), 12): section = val[i:i+12] c_file.write(' ') c_file.write(''.join(f'0x{x:02x}, ' for x in section)) c_file.write('\n') c_file.write(" };\n\n") write_if_different( join(WCSROOT, 'src', 'docstrings.c'), c_file.getvalue().encode('utf-8')) def get_wcslib_cfg(cfg, wcslib_files, include_paths): debug = '--debug' in sys.argv cfg['include_dirs'].append(numpy.get_include()) cfg['define_macros'].extend([ ('ECHO', None), ('WCSTRIG_MACRO', None), ('ASTROPY_WCS_BUILD', None), ('_GNU_SOURCE', None)]) if ((int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))) and not sys.platform == 'win32'): wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h') if os.path.exists(wcsconfig_h_path): os.unlink(wcsconfig_h_path) for k, v in pkg_config(['wcslib'], ['wcs']).items(): cfg[k].extend(v) else: write_wcsconfig_h(include_paths) wcslib_path = join("cextern", "wcslib") # Path to wcslib wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files) cfg['include_dirs'].append(wcslib_cpath) if debug: cfg['define_macros'].append(('DEBUG', None)) cfg['undef_macros'].append('NDEBUG') if (not sys.platform.startswith('sun') and not sys.platform == 'win32'): cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"]) else: # Define ECHO as nothing to prevent spurious newlines from # printing within the libwcs parser cfg['define_macros'].append(('NDEBUG', None)) cfg['undef_macros'].append('DEBUG') if sys.platform == 'win32': # These are written into wcsconfig.h, but that file is not # used by all parts of wcslib. cfg['define_macros'].extend([ ('YY_NO_UNISTD_H', None), ('_CRT_SECURE_NO_WARNINGS', None), ('_NO_OLDNAMES', None), # for mingw32 ('NO_OLDNAMES', None), # for mingw64 ('__STDC__', None) # for MSVC ]) if sys.platform.startswith('linux'): cfg['define_macros'].append(('HAVE_SINCOS', None)) # For 4.7+ enable C99 syntax in older compilers (need 'gnu99' std for gcc) if determine_64_bit_int() != 'int' and get_compiler() == 'unix': cfg['extra_compile_args'].extend(['-std=gnu99']) else: cfg['extra_compile_args'].extend(['-std=c99']) # Squelch a few compilation warnings in WCSLIB if get_compiler() in ('unix', 'mingw32'): if not debug: cfg['extra_compile_args'].extend([ '-Wno-strict-prototypes', '-Wno-unused-function', '-Wno-unused-value', '-Wno-uninitialized']) def get_extensions(): generate_c_docstrings() ###################################################################### # DISTUTILS SETUP cfg = defaultdict(list) wcslib_files = [ # List of wcslib files to compile 'flexed/wcsbth.c', 'flexed/wcspih.c', 'flexed/wcsulex.c', 'flexed/wcsutrn.c', 'cel.c', 'dis.c', 'lin.c', 'log.c', 'prj.c', 'spc.c', 'sph.c', 'spx.c', 'tab.c', 'wcs.c', 'wcserr.c', 'wcsfix.c', 'wcshdr.c', 'wcsprintf.c', 'wcsunits.c', 'wcsutil.c' ] wcslib_config_paths = [ join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'), join(WCSROOT, 'include', 'wcsconfig.h') ] get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(join(WCSROOT, "include")) astropy_wcs_files = [ # List of astropy.wcs files to compile 'distortion.c', 'distortion_wrap.c', 'docstrings.c', 'pipeline.c', 'pyutil.c', 'astropy_wcs.c', 'astropy_wcs_api.c', 'sip.c', 'sip_wrap.c', 'str_list_proxy.c', 'unit_list_proxy.c', 'util.c', 'wcslib_wrap.c', 'wcslib_auxprm_wrap.c', 'wcslib_tabprm_wrap.c', 'wcslib_wtbarr_wrap.c' ] cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in cfg.items()) # Copy over header files from WCSLIB into the installed version of Astropy # so that other Python packages can write extensions that link to it. We # do the copying here then include the data in [options.package_data] in # the setup.cfg file wcslib_headers = [ 'cel.h', 'lin.h', 'prj.h', 'spc.h', 'spx.h', 'tab.h', 'wcs.h', 'wcserr.h', 'wcsmath.h', 'wcsprintf.h', ] if not (int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))): for header in wcslib_headers: source = join('cextern', 'wcslib', 'C', header) dest = join('astropy', 'wcs', 'include', 'wcslib', header) if newer_group([source], dest, 'newer'): shutil.copy(source, dest) return [Extension('astropy.wcs._wcs', **cfg)]
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import pytest import numpy as np from numpy import testing as npt import erfa from astropy import units as u from astropy.time import Time from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import EarthLocation from astropy.coordinates import SkyCoord from astropy.utils import iers from .utils import randomly_sample_sphere # These fixtures are used in test_iau_fullstack @pytest.fixture(scope="function") def fullstack_icrs(): ra, dec, _ = randomly_sample_sphere(1000) return ICRS(ra=ra, dec=dec) @pytest.fixture(scope="function") def fullstack_fiducial_altaz(fullstack_icrs): altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m), obstime=Time('J2000')) with warnings.catch_warnings(): # Ignore remote_data warning warnings.simplefilter('ignore') result = fullstack_icrs.transform_to(altazframe) return result @pytest.fixture(scope="function", params=['J2000.1', 'J2010']) def fullstack_times(request): return Time(request.param) @pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)]) def fullstack_locations(request): return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg, height=request.param[0]*u.m) @pytest.fixture(scope="function", params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron), (1*u.bar, 10*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron), (1*u.bar, 0*u.deg_C, 0, 21*u.cm)]) def fullstack_obsconditions(request): return request.param def _erfa_check(ira, idec, astrom): """ This function does the same thing the astropy layer is supposed to do, but all in erfa """ cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom) az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom) alt = np.pi/2-zen cra2, cdec2 = erfa.atoiq('A', az, zen, astrom) ira2, idec2 = erfa.aticq(cra2, cdec2, astrom) dct = locals() del dct['astrom'] return dct def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz, fullstack_times, fullstack_locations, fullstack_obsconditions): """ Test the full transform from ICRS <-> AltAz """ # create the altaz frame altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations, pressure=fullstack_obsconditions[0], temperature=fullstack_obsconditions[1], relative_humidity=fullstack_obsconditions[2], obswl=fullstack_obsconditions[3]) aacoo = fullstack_icrs.transform_to(altazframe) # compare aacoo to the fiducial AltAz - should always be different assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond) assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond) # if the refraction correction is included, we *only* do the comparisons # where altitude >5 degrees. The SOFA guides imply that below 5 is where # where accuracy gets more problematic, and testing reveals that alt<~0 # gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty if fullstack_obsconditions[0].value == 0: # but if there is no refraction correction, check everything msk = slice(None) tol = 5*u.microarcsecond else: msk = aacoo.alt > 5*u.deg # most of them aren't this bad, but some of those at low alt are offset # this much. For alt > 10, this is always better than 100 masec tol = 750*u.milliarcsecond # now make sure the full stack round-tripping works icrs2 = aacoo.transform_to(ICRS()) adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk] addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk] assert np.all(adras < tol), f'largest RA change is {np.max(adras.arcsec * 1000)} mas, > {tol}' assert np.all(addecs < tol), f'largest Dec change is {np.max(addecs.arcsec * 1000)} mas, > {tol}' # check that we're consistent with the ERFA alt/az result iers_tab = iers.earth_orientation_table.get() xp, yp = u.Quantity(iers_tab.pm_xy(fullstack_times)).to_value(u.radian) lon = fullstack_locations.geodetic[0].to_value(u.radian) lat = fullstack_locations.geodetic[1].to_value(u.radian) height = fullstack_locations.geodetic[2].to_value(u.m) jd1, jd2 = get_jd12(fullstack_times, 'utc') pressure = fullstack_obsconditions[0].to_value(u.hPa) temperature = fullstack_obsconditions[1].to_value(u.deg_C) # Relative humidity can be a quantity or a number. relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value obswl = fullstack_obsconditions[3].to_value(u.micron) astrom, eo = erfa.apco13(jd1, jd2, fullstack_times.delta_ut1_utc, lon, lat, height, xp, yp, pressure, temperature, relative_humidity, obswl) erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom) npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7) npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7) def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz): """ Test the full transform from ICRS <-> AltAz """ aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz) # make sure the round-tripping works icrs2 = aacoo.transform_to(ICRS()) npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg) npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg) def test_future_altaz(): """ While this does test the full stack, it is mostly meant to check that a warning is raised when attempting to get to AltAz in the future (beyond IERS tables) """ from astropy.utils.exceptions import AstropyWarning # this is an ugly hack to get the warning to show up even if it has already # appeared from astropy.coordinates.builtin_frames import utils if hasattr(utils, '__warningregistry__'): utils.__warningregistry__.clear() location = EarthLocation(lat=0*u.deg, lon=0*u.deg) t = Time('J2161') # check that these message(s) appear among any other warnings. If tests are run with # --remote-data then the IERS table will be an instance of IERS_Auto which is # assured of being "fresh". In this case getting times outside the range of the # table does not raise an exception. Only if using IERS_B (which happens without # --remote-data, i.e. for all CI testing) do we expect another warning. with pytest.warns(AstropyWarning, match=r"Tried to get polar motions for " "times after IERS data is valid.*") as found_warnings: SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t)) if isinstance(iers.earth_orientation_table.get(), iers.IERS_B): messages_found = ["(some) times are outside of range covered by IERS " "table." in str(w.message) for w in found_warnings] assert any(messages_found)
dhomeier/astropy
astropy/coordinates/tests/test_iau_fullstack.py
astropy/wcs/setup_package.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """URL unescaper functions.""" # STDLIB from xml.sax import saxutils __all__ = ['unescape_all'] # This is DIY _bytes_entities = {b'&amp;': b'&', b'&lt;': b'<', b'&gt;': b'>', b'&amp;&amp;': b'&', b'&&': b'&', b'%2F': b'/'} _bytes_keys = [b'&amp;&amp;', b'&&', b'&amp;', b'&lt;', b'&gt;', b'%2F'] # This is used by saxutils _str_entities = {'&amp;&amp;': '&', '&&': '&', '%2F': '/'} _str_keys = ['&amp;&amp;', '&&', '&amp;', '&lt;', '&gt;', '%2F'] def unescape_all(url): """Recursively unescape a given URL. .. note:: '&amp;&amp;' becomes a single '&'. Parameters ---------- url : str or bytes URL to unescape. Returns ------- clean_url : str or bytes Unescaped URL. """ if isinstance(url, bytes): func2use = _unescape_bytes keys2use = _bytes_keys else: func2use = _unescape_str keys2use = _str_keys clean_url = func2use(url) not_done = [clean_url.count(key) > 0 for key in keys2use] if True in not_done: return unescape_all(clean_url) else: return clean_url def _unescape_str(url): return saxutils.unescape(url, _str_entities) def _unescape_bytes(url): clean_url = url for key in _bytes_keys: clean_url = clean_url.replace(key, _bytes_entities[key]) return clean_url
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import pytest import numpy as np from numpy import testing as npt import erfa from astropy import units as u from astropy.time import Time from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import EarthLocation from astropy.coordinates import SkyCoord from astropy.utils import iers from .utils import randomly_sample_sphere # These fixtures are used in test_iau_fullstack @pytest.fixture(scope="function") def fullstack_icrs(): ra, dec, _ = randomly_sample_sphere(1000) return ICRS(ra=ra, dec=dec) @pytest.fixture(scope="function") def fullstack_fiducial_altaz(fullstack_icrs): altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m), obstime=Time('J2000')) with warnings.catch_warnings(): # Ignore remote_data warning warnings.simplefilter('ignore') result = fullstack_icrs.transform_to(altazframe) return result @pytest.fixture(scope="function", params=['J2000.1', 'J2010']) def fullstack_times(request): return Time(request.param) @pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)]) def fullstack_locations(request): return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg, height=request.param[0]*u.m) @pytest.fixture(scope="function", params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron), (1*u.bar, 10*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron), (1*u.bar, 0*u.deg_C, 0, 21*u.cm)]) def fullstack_obsconditions(request): return request.param def _erfa_check(ira, idec, astrom): """ This function does the same thing the astropy layer is supposed to do, but all in erfa """ cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom) az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom) alt = np.pi/2-zen cra2, cdec2 = erfa.atoiq('A', az, zen, astrom) ira2, idec2 = erfa.aticq(cra2, cdec2, astrom) dct = locals() del dct['astrom'] return dct def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz, fullstack_times, fullstack_locations, fullstack_obsconditions): """ Test the full transform from ICRS <-> AltAz """ # create the altaz frame altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations, pressure=fullstack_obsconditions[0], temperature=fullstack_obsconditions[1], relative_humidity=fullstack_obsconditions[2], obswl=fullstack_obsconditions[3]) aacoo = fullstack_icrs.transform_to(altazframe) # compare aacoo to the fiducial AltAz - should always be different assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond) assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond) # if the refraction correction is included, we *only* do the comparisons # where altitude >5 degrees. The SOFA guides imply that below 5 is where # where accuracy gets more problematic, and testing reveals that alt<~0 # gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty if fullstack_obsconditions[0].value == 0: # but if there is no refraction correction, check everything msk = slice(None) tol = 5*u.microarcsecond else: msk = aacoo.alt > 5*u.deg # most of them aren't this bad, but some of those at low alt are offset # this much. For alt > 10, this is always better than 100 masec tol = 750*u.milliarcsecond # now make sure the full stack round-tripping works icrs2 = aacoo.transform_to(ICRS()) adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk] addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk] assert np.all(adras < tol), f'largest RA change is {np.max(adras.arcsec * 1000)} mas, > {tol}' assert np.all(addecs < tol), f'largest Dec change is {np.max(addecs.arcsec * 1000)} mas, > {tol}' # check that we're consistent with the ERFA alt/az result iers_tab = iers.earth_orientation_table.get() xp, yp = u.Quantity(iers_tab.pm_xy(fullstack_times)).to_value(u.radian) lon = fullstack_locations.geodetic[0].to_value(u.radian) lat = fullstack_locations.geodetic[1].to_value(u.radian) height = fullstack_locations.geodetic[2].to_value(u.m) jd1, jd2 = get_jd12(fullstack_times, 'utc') pressure = fullstack_obsconditions[0].to_value(u.hPa) temperature = fullstack_obsconditions[1].to_value(u.deg_C) # Relative humidity can be a quantity or a number. relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value obswl = fullstack_obsconditions[3].to_value(u.micron) astrom, eo = erfa.apco13(jd1, jd2, fullstack_times.delta_ut1_utc, lon, lat, height, xp, yp, pressure, temperature, relative_humidity, obswl) erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom) npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7) npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7) def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz): """ Test the full transform from ICRS <-> AltAz """ aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz) # make sure the round-tripping works icrs2 = aacoo.transform_to(ICRS()) npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg) npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg) def test_future_altaz(): """ While this does test the full stack, it is mostly meant to check that a warning is raised when attempting to get to AltAz in the future (beyond IERS tables) """ from astropy.utils.exceptions import AstropyWarning # this is an ugly hack to get the warning to show up even if it has already # appeared from astropy.coordinates.builtin_frames import utils if hasattr(utils, '__warningregistry__'): utils.__warningregistry__.clear() location = EarthLocation(lat=0*u.deg, lon=0*u.deg) t = Time('J2161') # check that these message(s) appear among any other warnings. If tests are run with # --remote-data then the IERS table will be an instance of IERS_Auto which is # assured of being "fresh". In this case getting times outside the range of the # table does not raise an exception. Only if using IERS_B (which happens without # --remote-data, i.e. for all CI testing) do we expect another warning. with pytest.warns(AstropyWarning, match=r"Tried to get polar motions for " "times after IERS data is valid.*") as found_warnings: SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t)) if isinstance(iers.earth_orientation_table.get(), iers.IERS_B): messages_found = ["(some) times are outside of range covered by IERS " "table." in str(w.message) for w in found_warnings] assert any(messages_found)
dhomeier/astropy
astropy/coordinates/tests/test_iau_fullstack.py
astropy/utils/xml/unescaper.py
import pytest import warnings # autouse makes this an all-coordinates-tests fixture # this can be eliminated if/when warnings in pytest are all turned to errors (gh issue #7928) @pytest.fixture(autouse=True) def representation_deprecation_to_error(): warnings.filterwarnings('error', 'The `representation` keyword/property name is deprecated in favor of `representation_type`') filt = warnings.filters[0] yield try: warnings.filters.remove(filt) except ValueError: pass # already removed
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import pytest import numpy as np from numpy import testing as npt import erfa from astropy import units as u from astropy.time import Time from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import EarthLocation from astropy.coordinates import SkyCoord from astropy.utils import iers from .utils import randomly_sample_sphere # These fixtures are used in test_iau_fullstack @pytest.fixture(scope="function") def fullstack_icrs(): ra, dec, _ = randomly_sample_sphere(1000) return ICRS(ra=ra, dec=dec) @pytest.fixture(scope="function") def fullstack_fiducial_altaz(fullstack_icrs): altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m), obstime=Time('J2000')) with warnings.catch_warnings(): # Ignore remote_data warning warnings.simplefilter('ignore') result = fullstack_icrs.transform_to(altazframe) return result @pytest.fixture(scope="function", params=['J2000.1', 'J2010']) def fullstack_times(request): return Time(request.param) @pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)]) def fullstack_locations(request): return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg, height=request.param[0]*u.m) @pytest.fixture(scope="function", params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron), (1*u.bar, 10*u.deg_C, 0, 1*u.micron), (1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron), (1*u.bar, 0*u.deg_C, 0, 21*u.cm)]) def fullstack_obsconditions(request): return request.param def _erfa_check(ira, idec, astrom): """ This function does the same thing the astropy layer is supposed to do, but all in erfa """ cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom) az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom) alt = np.pi/2-zen cra2, cdec2 = erfa.atoiq('A', az, zen, astrom) ira2, idec2 = erfa.aticq(cra2, cdec2, astrom) dct = locals() del dct['astrom'] return dct def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz, fullstack_times, fullstack_locations, fullstack_obsconditions): """ Test the full transform from ICRS <-> AltAz """ # create the altaz frame altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations, pressure=fullstack_obsconditions[0], temperature=fullstack_obsconditions[1], relative_humidity=fullstack_obsconditions[2], obswl=fullstack_obsconditions[3]) aacoo = fullstack_icrs.transform_to(altazframe) # compare aacoo to the fiducial AltAz - should always be different assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond) assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond) # if the refraction correction is included, we *only* do the comparisons # where altitude >5 degrees. The SOFA guides imply that below 5 is where # where accuracy gets more problematic, and testing reveals that alt<~0 # gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty if fullstack_obsconditions[0].value == 0: # but if there is no refraction correction, check everything msk = slice(None) tol = 5*u.microarcsecond else: msk = aacoo.alt > 5*u.deg # most of them aren't this bad, but some of those at low alt are offset # this much. For alt > 10, this is always better than 100 masec tol = 750*u.milliarcsecond # now make sure the full stack round-tripping works icrs2 = aacoo.transform_to(ICRS()) adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk] addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk] assert np.all(adras < tol), f'largest RA change is {np.max(adras.arcsec * 1000)} mas, > {tol}' assert np.all(addecs < tol), f'largest Dec change is {np.max(addecs.arcsec * 1000)} mas, > {tol}' # check that we're consistent with the ERFA alt/az result iers_tab = iers.earth_orientation_table.get() xp, yp = u.Quantity(iers_tab.pm_xy(fullstack_times)).to_value(u.radian) lon = fullstack_locations.geodetic[0].to_value(u.radian) lat = fullstack_locations.geodetic[1].to_value(u.radian) height = fullstack_locations.geodetic[2].to_value(u.m) jd1, jd2 = get_jd12(fullstack_times, 'utc') pressure = fullstack_obsconditions[0].to_value(u.hPa) temperature = fullstack_obsconditions[1].to_value(u.deg_C) # Relative humidity can be a quantity or a number. relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value obswl = fullstack_obsconditions[3].to_value(u.micron) astrom, eo = erfa.apco13(jd1, jd2, fullstack_times.delta_ut1_utc, lon, lat, height, xp, yp, pressure, temperature, relative_humidity, obswl) erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom) npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7) npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7) def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz): """ Test the full transform from ICRS <-> AltAz """ aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz) # make sure the round-tripping works icrs2 = aacoo.transform_to(ICRS()) npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg) npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg) def test_future_altaz(): """ While this does test the full stack, it is mostly meant to check that a warning is raised when attempting to get to AltAz in the future (beyond IERS tables) """ from astropy.utils.exceptions import AstropyWarning # this is an ugly hack to get the warning to show up even if it has already # appeared from astropy.coordinates.builtin_frames import utils if hasattr(utils, '__warningregistry__'): utils.__warningregistry__.clear() location = EarthLocation(lat=0*u.deg, lon=0*u.deg) t = Time('J2161') # check that these message(s) appear among any other warnings. If tests are run with # --remote-data then the IERS table will be an instance of IERS_Auto which is # assured of being "fresh". In this case getting times outside the range of the # table does not raise an exception. Only if using IERS_B (which happens without # --remote-data, i.e. for all CI testing) do we expect another warning. with pytest.warns(AstropyWarning, match=r"Tried to get polar motions for " "times after IERS data is valid.*") as found_warnings: SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t)) if isinstance(iers.earth_orientation_table.get(), iers.IERS_B): messages_found = ["(some) times are outside of range covered by IERS " "table." in str(w.message) for w in found_warnings] assert any(messages_found)
dhomeier/astropy
astropy/coordinates/tests/test_iau_fullstack.py
astropy/coordinates/tests/conftest.py
from typing import Any, Dict, Tuple import numpy as np from great_expectations.core.util import get_sql_dialect_floating_point_infinity_value from great_expectations.execution_engine import ( PandasExecutionEngine, SparkDFExecutionEngine, ) from great_expectations.execution_engine.execution_engine import MetricDomainTypes from great_expectations.execution_engine.sqlalchemy_execution_engine import ( SqlAlchemyExecutionEngine, ) from great_expectations.expectations.metrics.import_manager import sa from great_expectations.expectations.metrics.metric_provider import ( MetricProvider, metric_value, ) class ColumnValuesBetweenCount(MetricProvider): """This metric is an aggregate helper for rare cases.""" metric_name = "column_values.between.count" value_keys = ( "min_value", "max_value", "strict_min", "strict_max", ) @metric_value(engine=PandasExecutionEngine) def _pandas( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): min_value = metric_value_kwargs.get("min_value") max_value = metric_value_kwargs.get("max_value") strict_min = metric_value_kwargs.get("strict_min") strict_max = metric_value_kwargs.get("strict_max") if min_value is None and max_value is None: raise ValueError("min_value and max_value cannot both be None") if min_value is not None and max_value is not None and min_value > max_value: raise ValueError("min_value cannot be greater than max_value") ( df, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN ) val = df[accessor_domain_kwargs["column"]] if min_value is not None and max_value is not None: if strict_min and strict_max: series = (min_value < val) and (val < max_value) elif strict_min: series = (min_value < val) and (val <= max_value) elif strict_max: series = (min_value <= val) and (val < max_value) else: series = (min_value <= val) and (val <= max_value) elif min_value is None and max_value is not None: if strict_max: series = val < max_value else: series = val <= max_value elif min_value is not None and max_value is None: if strict_min: series = min_value < val else: series = min_value <= val else: raise ValueError("unable to parse domain and value kwargs") return np.count_nonzero(series) @metric_value(engine=SqlAlchemyExecutionEngine) def _sqlalchemy( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): min_value = metric_value_kwargs.get("min_value") max_value = metric_value_kwargs.get("max_value") strict_min = metric_value_kwargs.get("strict_min") strict_max = metric_value_kwargs.get("strict_max") if min_value is not None and max_value is not None and min_value > max_value: raise ValueError("min_value cannot be greater than max_value") if min_value is None and max_value is None: raise ValueError("min_value and max_value cannot both be None") dialect_name = execution_engine.engine.dialect.name.lower() if ( min_value == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=True ) ) or ( min_value == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=True ) ): min_value = get_sql_dialect_floating_point_infinity_value( schema=dialect_name, negative=True ) if ( min_value == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=False ) ) or ( min_value == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=False ) ): min_value = get_sql_dialect_floating_point_infinity_value( schema=dialect_name, negative=False ) if ( max_value == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=True ) ) or ( max_value == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=True ) ): max_value = get_sql_dialect_floating_point_infinity_value( schema=dialect_name, negative=True ) if ( max_value == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=False ) ) or ( max_value == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=False ) ): max_value = get_sql_dialect_floating_point_infinity_value( schema=dialect_name, negative=False ) ( selectable, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN ) column = sa.column(accessor_domain_kwargs["column"]) if min_value is None: if strict_max: condition = column < max_value else: condition = column <= max_value elif max_value is None: if strict_min: condition = column > min_value else: condition = column >= min_value else: if strict_min and strict_max: condition = sa.and_(column > min_value, column < max_value) elif strict_min: condition = sa.and_(column > min_value, column <= max_value) elif strict_max: condition = sa.and_(column >= min_value, column < max_value) else: condition = sa.and_(column >= min_value, column <= max_value) return execution_engine.engine.execute( sa.select([sa.func.count()]).select_from(selectable).where(condition) ).scalar() @metric_value(engine=SparkDFExecutionEngine) def _spark( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): min_value = metric_value_kwargs.get("min_value") max_value = metric_value_kwargs.get("max_value") strict_min = metric_value_kwargs.get("strict_min") strict_max = metric_value_kwargs.get("strict_max") if min_value is not None and max_value is not None and min_value > max_value: raise ValueError("min_value cannot be greater than max_value") if min_value is None and max_value is None: raise ValueError("min_value and max_value cannot both be None") ( df, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN ) column = df[accessor_domain_kwargs["column"]] if min_value is not None and max_value is not None and min_value > max_value: raise ValueError("min_value cannot be greater than max_value") if min_value is None and max_value is None: raise ValueError("min_value and max_value cannot both be None") if min_value is None: if strict_max: condition = column < max_value else: condition = column <= max_value elif max_value is None: if strict_min: condition = column > min_value else: condition = column >= min_value else: if strict_min and strict_max: condition = (column > min_value) & (column < max_value) elif strict_min: condition = (column > min_value) & (column <= max_value) elif strict_max: condition = (column >= min_value) & (column < max_value) else: condition = (column >= min_value) & (column <= max_value) return df.filter(condition).count()
import json import os from typing import Dict, List from unittest import mock import click import pytest from _pytest.capture import CaptureResult from click.testing import CliRunner, Result from great_expectations import DataContext from great_expectations.cli import cli from great_expectations.cli.suite import ( _process_suite_edit_flags_and_prompt, _process_suite_new_flags_and_prompt, ) from great_expectations.core import ExpectationConfiguration from great_expectations.core.batch import BatchRequest from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.util import lint_code from tests.cli.utils import assert_no_logging_messages_or_tracebacks from tests.render.test_util import ( find_code_in_notebook, load_notebook_from_path, run_notebook, ) def test_suite_help_output(caplog): runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, ["--v3-api", "suite"], catch_exceptions=False) assert result.exit_code == 0 stdout: str = result.stdout assert ( """ Usage: great_expectations suite [OPTIONS] COMMAND [ARGS]... Expectation Suite operations Options: --help Show this message and exit. Commands: delete Delete an Expectation Suite from the Expectation Store. demo This command is not supported in the v3 (Batch Request) API. edit Edit an existing Expectation Suite. list List existing Expectation Suites. new Create a new Expectation Suite. """ in stdout ) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_demo_deprecation_message( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite demo", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "This command is not supported in the v3 (Batch Request) API." in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.demo.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.demo.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list assert mock_emit.call_count == len(expected_call_args_list) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_default_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "warning" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input=f"1\n{expectation_suite_name}\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name} --no-jupyter", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_nonexistent_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request nonexistent_file.json --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert 'The JSON file with the path "nonexistent_file.json' in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_malformed_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json_file.write("not_proper_json") runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Error" in stdout assert "occurred while attempting to load the JSON file with the path" in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_valid_batch_request_from_json_file_in_notebook_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Error" not in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_interactive_batch_request_without_datasource_json_file_raises_helpful_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join( uncommitted_dir, f"batch_request_missing_datasource.json" ) with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite edit {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout assert ( "Please check that your batch_request is valid and is able to load a batch." in stdout ) assert 'The type of an datasource name must be a string (Python "str").' in stdout assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled config_file_path: str = os.path.join( context.root_directory, "great_expectations.yml" ) assert os.path.exists(config_file_path) monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "No Expectation Suites found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "1 Expectation Suite found" in stdout assert f"{expectation_suite_dir_name}.{expectation_suite_name}" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_multiple_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory # noinspection PyUnusedLocal suite_0: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="a.warning" ) # noinspection PyUnusedLocal suite_1: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="b.warning" ) # noinspection PyUnusedLocal suite_2: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="c.warning" ) config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "3 Expectation Suites found:" in stdout assert "a.warning" in stdout assert "b.warning" in stdout assert "c.warning" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suites found in the project" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_non_existent_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suite named not_a_suite found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_canceled_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Suite `{expectation_suite_dir_name}.{expectation_suite_name}` was not deleted" in stdout ) assert os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite_assume_yes_flag( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes suite delete {expectation_suite_dir_name}.{expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert "Would you like to proceed? [Y/n]:" not in stdout # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert result.exit_code == 0 assert "No Expectation Suites found" in stdout @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @pytest.fixture def suite_new_messages(): return { "no_msg": "", "happy_path_profile": "Entering interactive mode since you passed the --profile flag", "warning_profile": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to create your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data 3. Automatically, using a profiler """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", } @pytest.mark.parametrize( "interactive_flag,manual_flag,profile_flag,batch_request_flag,error_expected,prompt_input,return_interactive,return_profile,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True, return_profile = False pytest.param( True, False, False, None, False, None, True, False, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False, return_profile = False pytest.param( False, True, False, None, False, None, False, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, return_profile = True pytest.param( False, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--profile", ), pytest.param( True, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--interactive --profile", ), # batch_request not empty pytest.param( True, False, False, "batch_request.json", False, None, True, False, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, True, "batch_request.json", False, None, True, True, "happy_path_profile", "no_msg", id="--profile --batch-request", ), pytest.param( True, False, True, "batch_request.json", False, None, True, True, "no_msg", "no_msg", id="--interactive --profile --batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, False, None, False, "", False, False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite creation (default)", ), # Choice 1 - Manual suite creation (default) pytest.param( False, False, False, None, False, "1", False, False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite creation (default)", ), # Choice 2 - Interactive suite creation pytest.param( False, False, False, None, False, "2", True, False, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite creation", ), # Choice 3 - Automatic suite creation (profiler) pytest.param( False, False, False, None, False, "3", True, True, "no_msg", "no_msg", id="prompt: Choice 3 - Automatic suite creation (profiler)", ), # No error but warning expected # no-interactive flag with batch_request, with/without profile flag pytest.param( False, True, False, "batch_request.json", False, None, True, False, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), pytest.param( False, True, True, "batch_request.json", False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile --batch-request", ), # no-interactive flag with profile and without batch request flag pytest.param( False, True, True, None, False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile", ), # Yes error expected # both interactive flags, profile=False, with/without batch_request pytest.param( True, True, False, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, False, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, profile=True, with/without batch_request pytest.param( True, True, True, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile", ), pytest.param( True, True, True, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_new_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, profile_flag, batch_request_flag, error_expected, prompt_input, return_interactive, return_profile, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_new_messages, ): """ What does this test and why? _process_suite_new_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.new.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] processed_flags = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) assert processed_flags == { "interactive": return_interactive, "profile": return_profile, } # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_new_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ] @pytest.fixture def suite_edit_messages(): return { "no_msg": "", "happy_path_datasource_name": "Entering interactive mode since you passed the --datasource-name flag", "warning_datasource_name": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to edit your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", "error_both_datasource_name_and_batch_request_flags": """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \ options can be used. """, } @pytest.mark.parametrize( "interactive_flag,manual_flag,datasource_name_flag,batch_request_flag,error_expected,prompt_input,return_interactive,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True pytest.param( True, False, None, None, False, None, True, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False pytest.param( False, True, None, None, False, None, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, --datasource-name pytest.param( False, False, "some_datasource_name", None, False, None, True, "happy_path_datasource_name", "no_msg", id="--datasource-name", ), pytest.param( True, False, "some_datasource_name", None, False, None, True, "no_msg", "no_msg", id="--interactive --datasource-name", ), # batch_request not empty pytest.param( True, False, None, "batch_request.json", False, None, True, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, None, "batch_request.json", False, None, True, "happy_path_batch_request", "no_msg", id="--batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, None, None, False, "", False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite edit (default)", ), # # Choice 1 - Manual suite edit (default) pytest.param( False, False, None, None, False, "1", False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite edit (default)", ), # Choice 2 - Interactive suite edit pytest.param( False, False, None, None, False, "2", True, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite edit", ), # No error but warning expected # no-interactive flag with batch_request pytest.param( False, True, None, "batch_request.json", False, None, True, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), # no-interactive flag with datasource_name pytest.param( False, True, "some_datasource_name", None, False, None, True, "warning_datasource_name", "no_msg", id="warning: --manual --datasource-name", ), # Yes error expected # both interactive flags, datasource_name=None, with/without batch_request pytest.param( True, True, None, None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, None, "batch_request.json", True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, datasource_name=something, with/without batch_request pytest.param( True, True, "some_datasource_name", None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --datasource-name", ), pytest.param( True, True, "some_datasource_name", "batch_request.json", True, None, None, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --interactive --manual --datasource-name --batch-request", ), # both --datasource-name and --batch-request pytest.param( False, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --datasource-name --batch-request", ), pytest.param( True, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--interactive --datasource-name --batch-request", ), pytest.param( False, True, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--manual --datasource-name --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_edit_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, datasource_name_flag, batch_request_flag, error_expected, prompt_input, return_interactive, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_edit_messages, ): """ What does this test and why? _process_suite_edit_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.edit.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] interactive: bool = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) assert interactive == return_interactive # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_edit_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ]
great-expectations/great_expectations
tests/cli/test_suite.py
great_expectations/expectations/metrics/column_aggregate_metrics/column_values_between_count.py
""" This is a basic generated Great Expectations script that runs a checkpoint. Internally, a Checkpoint is a list of one or more batches paired with one or more Expectation Suites and a configurable Validation Operator. Checkpoints can be run directly without this script using the `great_expectations checkpoint run` command. This script is provided for those who wish to run checkpoints via python. Data that is validated is controlled by BatchKwargs, which can be adjusted in the checkpoint file: great_expectations/checkpoints/{0}.yml. Data are validated by use of the `ActionListValidationOperator` which is configured by default. The default configuration of this Validation Operator saves validation results to your results store and then updates Data Docs. This makes viewing validation results easy for you and your team. Usage: - Run this file: `python great_expectations/uncommitted/run_{0}.py`. - This can be run manually or via a scheduler such as cron. - If your pipeline runner supports python snippets you can paste this into your pipeline. """ import sys from great_expectations import DataContext # checkpoint configuration context = DataContext("{1}") checkpoint = context.get_checkpoint("{0}") # run the Checkpoint results = checkpoint.run() # take action based on results if not results["success"]: print("Validation failed!") sys.exit(1) print("Validation succeeded!") sys.exit(0)
import json import os from typing import Dict, List from unittest import mock import click import pytest from _pytest.capture import CaptureResult from click.testing import CliRunner, Result from great_expectations import DataContext from great_expectations.cli import cli from great_expectations.cli.suite import ( _process_suite_edit_flags_and_prompt, _process_suite_new_flags_and_prompt, ) from great_expectations.core import ExpectationConfiguration from great_expectations.core.batch import BatchRequest from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.util import lint_code from tests.cli.utils import assert_no_logging_messages_or_tracebacks from tests.render.test_util import ( find_code_in_notebook, load_notebook_from_path, run_notebook, ) def test_suite_help_output(caplog): runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, ["--v3-api", "suite"], catch_exceptions=False) assert result.exit_code == 0 stdout: str = result.stdout assert ( """ Usage: great_expectations suite [OPTIONS] COMMAND [ARGS]... Expectation Suite operations Options: --help Show this message and exit. Commands: delete Delete an Expectation Suite from the Expectation Store. demo This command is not supported in the v3 (Batch Request) API. edit Edit an existing Expectation Suite. list List existing Expectation Suites. new Create a new Expectation Suite. """ in stdout ) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_demo_deprecation_message( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite demo", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "This command is not supported in the v3 (Batch Request) API." in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.demo.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.demo.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list assert mock_emit.call_count == len(expected_call_args_list) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_default_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "warning" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input=f"1\n{expectation_suite_name}\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name} --no-jupyter", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_nonexistent_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request nonexistent_file.json --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert 'The JSON file with the path "nonexistent_file.json' in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_malformed_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json_file.write("not_proper_json") runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Error" in stdout assert "occurred while attempting to load the JSON file with the path" in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_valid_batch_request_from_json_file_in_notebook_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Error" not in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_interactive_batch_request_without_datasource_json_file_raises_helpful_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join( uncommitted_dir, f"batch_request_missing_datasource.json" ) with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite edit {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout assert ( "Please check that your batch_request is valid and is able to load a batch." in stdout ) assert 'The type of an datasource name must be a string (Python "str").' in stdout assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled config_file_path: str = os.path.join( context.root_directory, "great_expectations.yml" ) assert os.path.exists(config_file_path) monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "No Expectation Suites found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "1 Expectation Suite found" in stdout assert f"{expectation_suite_dir_name}.{expectation_suite_name}" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_multiple_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory # noinspection PyUnusedLocal suite_0: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="a.warning" ) # noinspection PyUnusedLocal suite_1: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="b.warning" ) # noinspection PyUnusedLocal suite_2: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="c.warning" ) config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "3 Expectation Suites found:" in stdout assert "a.warning" in stdout assert "b.warning" in stdout assert "c.warning" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suites found in the project" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_non_existent_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suite named not_a_suite found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_canceled_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Suite `{expectation_suite_dir_name}.{expectation_suite_name}` was not deleted" in stdout ) assert os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite_assume_yes_flag( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes suite delete {expectation_suite_dir_name}.{expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert "Would you like to proceed? [Y/n]:" not in stdout # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert result.exit_code == 0 assert "No Expectation Suites found" in stdout @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @pytest.fixture def suite_new_messages(): return { "no_msg": "", "happy_path_profile": "Entering interactive mode since you passed the --profile flag", "warning_profile": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to create your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data 3. Automatically, using a profiler """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", } @pytest.mark.parametrize( "interactive_flag,manual_flag,profile_flag,batch_request_flag,error_expected,prompt_input,return_interactive,return_profile,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True, return_profile = False pytest.param( True, False, False, None, False, None, True, False, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False, return_profile = False pytest.param( False, True, False, None, False, None, False, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, return_profile = True pytest.param( False, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--profile", ), pytest.param( True, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--interactive --profile", ), # batch_request not empty pytest.param( True, False, False, "batch_request.json", False, None, True, False, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, True, "batch_request.json", False, None, True, True, "happy_path_profile", "no_msg", id="--profile --batch-request", ), pytest.param( True, False, True, "batch_request.json", False, None, True, True, "no_msg", "no_msg", id="--interactive --profile --batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, False, None, False, "", False, False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite creation (default)", ), # Choice 1 - Manual suite creation (default) pytest.param( False, False, False, None, False, "1", False, False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite creation (default)", ), # Choice 2 - Interactive suite creation pytest.param( False, False, False, None, False, "2", True, False, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite creation", ), # Choice 3 - Automatic suite creation (profiler) pytest.param( False, False, False, None, False, "3", True, True, "no_msg", "no_msg", id="prompt: Choice 3 - Automatic suite creation (profiler)", ), # No error but warning expected # no-interactive flag with batch_request, with/without profile flag pytest.param( False, True, False, "batch_request.json", False, None, True, False, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), pytest.param( False, True, True, "batch_request.json", False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile --batch-request", ), # no-interactive flag with profile and without batch request flag pytest.param( False, True, True, None, False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile", ), # Yes error expected # both interactive flags, profile=False, with/without batch_request pytest.param( True, True, False, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, False, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, profile=True, with/without batch_request pytest.param( True, True, True, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile", ), pytest.param( True, True, True, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_new_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, profile_flag, batch_request_flag, error_expected, prompt_input, return_interactive, return_profile, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_new_messages, ): """ What does this test and why? _process_suite_new_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.new.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] processed_flags = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) assert processed_flags == { "interactive": return_interactive, "profile": return_profile, } # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_new_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ] @pytest.fixture def suite_edit_messages(): return { "no_msg": "", "happy_path_datasource_name": "Entering interactive mode since you passed the --datasource-name flag", "warning_datasource_name": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to edit your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", "error_both_datasource_name_and_batch_request_flags": """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \ options can be used. """, } @pytest.mark.parametrize( "interactive_flag,manual_flag,datasource_name_flag,batch_request_flag,error_expected,prompt_input,return_interactive,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True pytest.param( True, False, None, None, False, None, True, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False pytest.param( False, True, None, None, False, None, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, --datasource-name pytest.param( False, False, "some_datasource_name", None, False, None, True, "happy_path_datasource_name", "no_msg", id="--datasource-name", ), pytest.param( True, False, "some_datasource_name", None, False, None, True, "no_msg", "no_msg", id="--interactive --datasource-name", ), # batch_request not empty pytest.param( True, False, None, "batch_request.json", False, None, True, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, None, "batch_request.json", False, None, True, "happy_path_batch_request", "no_msg", id="--batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, None, None, False, "", False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite edit (default)", ), # # Choice 1 - Manual suite edit (default) pytest.param( False, False, None, None, False, "1", False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite edit (default)", ), # Choice 2 - Interactive suite edit pytest.param( False, False, None, None, False, "2", True, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite edit", ), # No error but warning expected # no-interactive flag with batch_request pytest.param( False, True, None, "batch_request.json", False, None, True, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), # no-interactive flag with datasource_name pytest.param( False, True, "some_datasource_name", None, False, None, True, "warning_datasource_name", "no_msg", id="warning: --manual --datasource-name", ), # Yes error expected # both interactive flags, datasource_name=None, with/without batch_request pytest.param( True, True, None, None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, None, "batch_request.json", True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, datasource_name=something, with/without batch_request pytest.param( True, True, "some_datasource_name", None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --datasource-name", ), pytest.param( True, True, "some_datasource_name", "batch_request.json", True, None, None, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --interactive --manual --datasource-name --batch-request", ), # both --datasource-name and --batch-request pytest.param( False, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --datasource-name --batch-request", ), pytest.param( True, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--interactive --datasource-name --batch-request", ), pytest.param( False, True, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--manual --datasource-name --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_edit_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, datasource_name_flag, batch_request_flag, error_expected, prompt_input, return_interactive, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_edit_messages, ): """ What does this test and why? _process_suite_edit_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.edit.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] interactive: bool = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) assert interactive == return_interactive # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_edit_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ]
great-expectations/great_expectations
tests/cli/test_suite.py
great_expectations/cli/v012/checkpoint_script_template.py
import logging from great_expectations.profile.base import ( DatasetProfiler, ProfilerCardinality, ProfilerDataType, ProfilerTypeMapping, ) try: from sqlalchemy.exc import OperationalError except ModuleNotFoundError: OperationalError = RuntimeError logger = logging.getLogger(__name__) class BasicDatasetProfilerBase(DatasetProfiler): """BasicDatasetProfilerBase provides basic logic of inferring the type and the cardinality of columns that is used by the dataset profiler classes that extend this class. """ # Deprecation Warning. If you are reading this code you are likely building # your own profiler. We are moving toward a profiler toolkit to simplify # building custom profilers. These mappings now exist in ProfilerTypeMapping # and will be deprecated in the future. INT_TYPE_NAMES = ProfilerTypeMapping.INT_TYPE_NAMES FLOAT_TYPE_NAMES = ProfilerTypeMapping.FLOAT_TYPE_NAMES STRING_TYPE_NAMES = ProfilerTypeMapping.STRING_TYPE_NAMES BOOLEAN_TYPE_NAMES = ProfilerTypeMapping.BOOLEAN_TYPE_NAMES DATETIME_TYPE_NAMES = ProfilerTypeMapping.DATETIME_TYPE_NAMES @classmethod def _get_column_type(cls, df, column): # list of types is used to support pandas and sqlalchemy df.set_config_value("interactive_evaluation", True) try: if df.expect_column_values_to_be_in_type_list( column, type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES)) ).success: type_ = ProfilerDataType.INT elif df.expect_column_values_to_be_in_type_list( column, type_list=sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES)) ).success: type_ = ProfilerDataType.FLOAT elif df.expect_column_values_to_be_in_type_list( column, type_list=sorted(list(ProfilerTypeMapping.STRING_TYPE_NAMES)) ).success: type_ = ProfilerDataType.STRING elif df.expect_column_values_to_be_in_type_list( column, type_list=sorted(list(ProfilerTypeMapping.BOOLEAN_TYPE_NAMES)) ).success: type_ = ProfilerDataType.BOOLEAN elif df.expect_column_values_to_be_in_type_list( column, type_list=sorted(list(ProfilerTypeMapping.DATETIME_TYPE_NAMES)) ).success: type_ = ProfilerDataType.DATETIME else: df.expect_column_values_to_be_in_type_list(column, type_list=None) type_ = ProfilerDataType.UNKNOWN except NotImplementedError: type_ = ProfilerDataType.UNKNOWN df.set_config_value("interactive_evaluation", False) return type_ @classmethod def _get_column_cardinality(cls, df, column): num_unique = None pct_unique = None df.set_config_value("interactive_evaluation", True) try: num_unique = df.expect_column_unique_value_count_to_be_between( column, None, None ).result["observed_value"] pct_unique = df.expect_column_proportion_of_unique_values_to_be_between( column, None, None ).result["observed_value"] except KeyError: # if observed_value value is not set logger.error( "Failed to get cardinality of column {:s} - continuing...".format( column ) ) if num_unique is None or num_unique == 0 or pct_unique is None: cardinality = ProfilerCardinality.NONE elif pct_unique == 1.0: cardinality = ProfilerCardinality.UNIQUE elif pct_unique > 0.1: cardinality = ProfilerCardinality.VERY_MANY elif pct_unique > 0.02: cardinality = ProfilerCardinality.MANY else: if num_unique == 1: cardinality = ProfilerCardinality.ONE elif num_unique == 2: cardinality = ProfilerCardinality.TWO elif num_unique < 60: cardinality = ProfilerCardinality.VERY_FEW elif num_unique < 1000: cardinality = ProfilerCardinality.FEW else: cardinality = ProfilerCardinality.MANY df.set_config_value("interactive_evaluation", False) return cardinality class BasicDatasetProfiler(BasicDatasetProfilerBase): """BasicDatasetProfiler is inspired by the beloved pandas_profiling project. The profiler examines a batch of data and creates a report that answers the basic questions most data practitioners would ask about a dataset during exploratory data analysis. The profiler reports how unique the values in the column are, as well as the percentage of empty values in it. Based on the column's type it provides a description of the column by computing a number of statistics, such as min, max, mean and median, for numeric columns, and distribution of values, when appropriate. """ @classmethod def _profile(cls, dataset, configuration=None): df = dataset df.set_default_expectation_argument("catch_exceptions", True) df.expect_table_row_count_to_be_between(min_value=0, max_value=None) df.expect_table_columns_to_match_ordered_list(None) df.set_config_value("interactive_evaluation", False) columns = df.get_table_columns() meta_columns = {} for column in columns: meta_columns[column] = {"description": ""} number_of_columns = len(columns) for i, column in enumerate(columns): logger.info( " Preparing column {} of {}: {}".format( i + 1, number_of_columns, column ) ) # df.expect_column_to_exist(column) type_ = cls._get_column_type(df, column) cardinality = cls._get_column_cardinality(df, column) df.expect_column_values_to_not_be_null( column, mostly=0.5 ) # The renderer will show a warning for columns that do not meet this expectation df.expect_column_values_to_be_in_set(column, [], result_format="SUMMARY") if type_ == ProfilerDataType.INT: if cardinality == ProfilerCardinality.UNIQUE: df.expect_column_values_to_be_unique(column) elif cardinality in [ ProfilerCardinality.ONE, ProfilerCardinality.TWO, ProfilerCardinality.VERY_FEW, ProfilerCardinality.FEW, ]: df.expect_column_distinct_values_to_be_in_set( column, value_set=None, result_format="SUMMARY" ) elif cardinality in [ ProfilerCardinality.MANY, ProfilerCardinality.VERY_MANY, ProfilerCardinality.UNIQUE, ]: # TODO: change to class-first expectation structure? df.expect_column_min_to_be_between( column, min_value=None, max_value=None ) df.expect_column_max_to_be_between( column, min_value=None, max_value=None ) df.expect_column_mean_to_be_between( column, min_value=None, max_value=None ) df.expect_column_median_to_be_between( column, min_value=None, max_value=None ) df.expect_column_stdev_to_be_between( column, min_value=None, max_value=None ) df.expect_column_quantile_values_to_be_between( column, quantile_ranges={ "quantiles": [0.05, 0.25, 0.5, 0.75, 0.95], "value_ranges": [ [None, None], [None, None], [None, None], [None, None], [None, None], ], }, ) df.expect_column_kl_divergence_to_be_less_than( column, partition_object=None, threshold=None, result_format="COMPLETE", ) else: # unknown cardinality - skip pass elif type_ == ProfilerDataType.FLOAT: if cardinality == ProfilerCardinality.UNIQUE: df.expect_column_values_to_be_unique(column) elif cardinality in [ ProfilerCardinality.ONE, ProfilerCardinality.TWO, ProfilerCardinality.VERY_FEW, ProfilerCardinality.FEW, ]: df.expect_column_distinct_values_to_be_in_set( column, value_set=None, result_format="SUMMARY" ) elif cardinality in [ ProfilerCardinality.MANY, ProfilerCardinality.VERY_MANY, ProfilerCardinality.UNIQUE, ]: # TODO: migrate to class first structure df.expect_column_min_to_be_between( column, min_value=None, max_value=None ) df.expect_column_max_to_be_between( column, min_value=None, max_value=None ) df.expect_column_mean_to_be_between( column, min_value=None, max_value=None ) df.expect_column_median_to_be_between( column, min_value=None, max_value=None ) df.expect_column_quantile_values_to_be_between( column, quantile_ranges={ "quantiles": [0.05, 0.25, 0.5, 0.75, 0.95], "value_ranges": [ [None, None], [None, None], [None, None], [None, None], [None, None], ], }, ) df.expect_column_kl_divergence_to_be_less_than( column, partition_object=None, threshold=None, result_format="COMPLETE", ) else: # unknown cardinality - skip pass elif type_ == ProfilerDataType.STRING: # Check for leading and trailing whitespace. #!!! It would be nice to build additional Expectations here, but #!!! the default logic for remove_expectations prevents us. df.expect_column_values_to_not_match_regex(column, r"^\s+|\s+$") if cardinality == ProfilerCardinality.UNIQUE: df.expect_column_values_to_be_unique(column) elif cardinality in [ ProfilerCardinality.ONE, ProfilerCardinality.TWO, ProfilerCardinality.VERY_FEW, ProfilerCardinality.FEW, ]: df.expect_column_distinct_values_to_be_in_set( column, value_set=None, result_format="SUMMARY" ) else: pass elif type_ == ProfilerDataType.DATETIME: df.expect_column_min_to_be_between( column, min_value=None, max_value=None ) df.expect_column_max_to_be_between( column, min_value=None, max_value=None ) # Re-add once kl_divergence has been modified to support datetimes # df.expect_column_kl_divergence_to_be_less_than(column, partition_object=None, # threshold=None, result_format='COMPLETE') if cardinality in [ ProfilerCardinality.ONE, ProfilerCardinality.TWO, ProfilerCardinality.VERY_FEW, ProfilerCardinality.FEW, ]: df.expect_column_distinct_values_to_be_in_set( column, value_set=None, result_format="SUMMARY" ) else: if cardinality == ProfilerCardinality.UNIQUE: df.expect_column_values_to_be_unique(column) elif cardinality in [ ProfilerCardinality.ONE, ProfilerCardinality.TWO, ProfilerCardinality.VERY_FEW, ProfilerCardinality.FEW, ]: df.expect_column_distinct_values_to_be_in_set( column, value_set=None, result_format="SUMMARY" ) else: pass df.set_config_value("interactive_evaluation", True) expectation_suite = df.get_expectation_suite( suppress_warnings=True, discard_failed_expectations=False ) expectation_suite.meta["columns"] = meta_columns return expectation_suite
import json import os from typing import Dict, List from unittest import mock import click import pytest from _pytest.capture import CaptureResult from click.testing import CliRunner, Result from great_expectations import DataContext from great_expectations.cli import cli from great_expectations.cli.suite import ( _process_suite_edit_flags_and_prompt, _process_suite_new_flags_and_prompt, ) from great_expectations.core import ExpectationConfiguration from great_expectations.core.batch import BatchRequest from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.util import lint_code from tests.cli.utils import assert_no_logging_messages_or_tracebacks from tests.render.test_util import ( find_code_in_notebook, load_notebook_from_path, run_notebook, ) def test_suite_help_output(caplog): runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, ["--v3-api", "suite"], catch_exceptions=False) assert result.exit_code == 0 stdout: str = result.stdout assert ( """ Usage: great_expectations suite [OPTIONS] COMMAND [ARGS]... Expectation Suite operations Options: --help Show this message and exit. Commands: delete Delete an Expectation Suite from the Expectation Store. demo This command is not supported in the v3 (Batch Request) API. edit Edit an existing Expectation Suite. list List existing Expectation Suites. new Create a new Expectation Suite. """ in stdout ) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_demo_deprecation_message( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite demo", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "This command is not supported in the v3 (Batch Request) API." in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.demo.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.demo.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list assert mock_emit.call_count == len(expected_call_args_list) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_default_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "warning" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input=f"1\n{expectation_suite_name}\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name} --no-jupyter", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_nonexistent_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request nonexistent_file.json --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert 'The JSON file with the path "nonexistent_file.json' in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_malformed_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json_file.write("not_proper_json") runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Error" in stdout assert "occurred while attempting to load the JSON file with the path" in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_valid_batch_request_from_json_file_in_notebook_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Error" not in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_interactive_batch_request_without_datasource_json_file_raises_helpful_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join( uncommitted_dir, f"batch_request_missing_datasource.json" ) with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite edit {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout assert ( "Please check that your batch_request is valid and is able to load a batch." in stdout ) assert 'The type of an datasource name must be a string (Python "str").' in stdout assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled config_file_path: str = os.path.join( context.root_directory, "great_expectations.yml" ) assert os.path.exists(config_file_path) monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "No Expectation Suites found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "1 Expectation Suite found" in stdout assert f"{expectation_suite_dir_name}.{expectation_suite_name}" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_multiple_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory # noinspection PyUnusedLocal suite_0: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="a.warning" ) # noinspection PyUnusedLocal suite_1: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="b.warning" ) # noinspection PyUnusedLocal suite_2: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="c.warning" ) config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "3 Expectation Suites found:" in stdout assert "a.warning" in stdout assert "b.warning" in stdout assert "c.warning" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suites found in the project" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_non_existent_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suite named not_a_suite found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_canceled_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Suite `{expectation_suite_dir_name}.{expectation_suite_name}` was not deleted" in stdout ) assert os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite_assume_yes_flag( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes suite delete {expectation_suite_dir_name}.{expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert "Would you like to proceed? [Y/n]:" not in stdout # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert result.exit_code == 0 assert "No Expectation Suites found" in stdout @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @pytest.fixture def suite_new_messages(): return { "no_msg": "", "happy_path_profile": "Entering interactive mode since you passed the --profile flag", "warning_profile": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to create your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data 3. Automatically, using a profiler """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", } @pytest.mark.parametrize( "interactive_flag,manual_flag,profile_flag,batch_request_flag,error_expected,prompt_input,return_interactive,return_profile,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True, return_profile = False pytest.param( True, False, False, None, False, None, True, False, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False, return_profile = False pytest.param( False, True, False, None, False, None, False, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, return_profile = True pytest.param( False, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--profile", ), pytest.param( True, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--interactive --profile", ), # batch_request not empty pytest.param( True, False, False, "batch_request.json", False, None, True, False, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, True, "batch_request.json", False, None, True, True, "happy_path_profile", "no_msg", id="--profile --batch-request", ), pytest.param( True, False, True, "batch_request.json", False, None, True, True, "no_msg", "no_msg", id="--interactive --profile --batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, False, None, False, "", False, False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite creation (default)", ), # Choice 1 - Manual suite creation (default) pytest.param( False, False, False, None, False, "1", False, False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite creation (default)", ), # Choice 2 - Interactive suite creation pytest.param( False, False, False, None, False, "2", True, False, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite creation", ), # Choice 3 - Automatic suite creation (profiler) pytest.param( False, False, False, None, False, "3", True, True, "no_msg", "no_msg", id="prompt: Choice 3 - Automatic suite creation (profiler)", ), # No error but warning expected # no-interactive flag with batch_request, with/without profile flag pytest.param( False, True, False, "batch_request.json", False, None, True, False, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), pytest.param( False, True, True, "batch_request.json", False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile --batch-request", ), # no-interactive flag with profile and without batch request flag pytest.param( False, True, True, None, False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile", ), # Yes error expected # both interactive flags, profile=False, with/without batch_request pytest.param( True, True, False, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, False, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, profile=True, with/without batch_request pytest.param( True, True, True, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile", ), pytest.param( True, True, True, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_new_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, profile_flag, batch_request_flag, error_expected, prompt_input, return_interactive, return_profile, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_new_messages, ): """ What does this test and why? _process_suite_new_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.new.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] processed_flags = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) assert processed_flags == { "interactive": return_interactive, "profile": return_profile, } # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_new_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ] @pytest.fixture def suite_edit_messages(): return { "no_msg": "", "happy_path_datasource_name": "Entering interactive mode since you passed the --datasource-name flag", "warning_datasource_name": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to edit your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", "error_both_datasource_name_and_batch_request_flags": """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \ options can be used. """, } @pytest.mark.parametrize( "interactive_flag,manual_flag,datasource_name_flag,batch_request_flag,error_expected,prompt_input,return_interactive,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True pytest.param( True, False, None, None, False, None, True, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False pytest.param( False, True, None, None, False, None, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, --datasource-name pytest.param( False, False, "some_datasource_name", None, False, None, True, "happy_path_datasource_name", "no_msg", id="--datasource-name", ), pytest.param( True, False, "some_datasource_name", None, False, None, True, "no_msg", "no_msg", id="--interactive --datasource-name", ), # batch_request not empty pytest.param( True, False, None, "batch_request.json", False, None, True, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, None, "batch_request.json", False, None, True, "happy_path_batch_request", "no_msg", id="--batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, None, None, False, "", False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite edit (default)", ), # # Choice 1 - Manual suite edit (default) pytest.param( False, False, None, None, False, "1", False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite edit (default)", ), # Choice 2 - Interactive suite edit pytest.param( False, False, None, None, False, "2", True, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite edit", ), # No error but warning expected # no-interactive flag with batch_request pytest.param( False, True, None, "batch_request.json", False, None, True, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), # no-interactive flag with datasource_name pytest.param( False, True, "some_datasource_name", None, False, None, True, "warning_datasource_name", "no_msg", id="warning: --manual --datasource-name", ), # Yes error expected # both interactive flags, datasource_name=None, with/without batch_request pytest.param( True, True, None, None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, None, "batch_request.json", True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, datasource_name=something, with/without batch_request pytest.param( True, True, "some_datasource_name", None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --datasource-name", ), pytest.param( True, True, "some_datasource_name", "batch_request.json", True, None, None, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --interactive --manual --datasource-name --batch-request", ), # both --datasource-name and --batch-request pytest.param( False, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --datasource-name --batch-request", ), pytest.param( True, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--interactive --datasource-name --batch-request", ), pytest.param( False, True, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--manual --datasource-name --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_edit_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, datasource_name_flag, batch_request_flag, error_expected, prompt_input, return_interactive, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_edit_messages, ): """ What does this test and why? _process_suite_edit_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.edit.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] interactive: bool = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) assert interactive == return_interactive # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_edit_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ]
great-expectations/great_expectations
tests/cli/test_suite.py
great_expectations/profile/basic_dataset_profiler.py
from typing import Any, Dict, Optional, Tuple import numpy as np from great_expectations.core import ExpectationConfiguration from great_expectations.execution_engine import ( ExecutionEngine, PandasExecutionEngine, SparkDFExecutionEngine, ) from great_expectations.execution_engine.execution_engine import MetricDomainTypes from great_expectations.execution_engine.sqlalchemy_execution_engine import ( SqlAlchemyExecutionEngine, ) from great_expectations.expectations.metrics.column_aggregate_metric import ( ColumnMetricProvider, column_aggregate_value, ) from great_expectations.expectations.metrics.import_manager import F, sa from great_expectations.expectations.metrics.metric_provider import ( MetricProvider, metric_value, ) from great_expectations.validator.validation_graph import MetricConfiguration class ColumnMedian(ColumnMetricProvider): """MetricProvider Class for Aggregate Mean MetricProvider""" metric_name = "column.median" @column_aggregate_value(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): """Pandas Median Implementation""" return column.median() @metric_value(engine=SqlAlchemyExecutionEngine, metric_fn_type="value") def _sqlalchemy( cls, execution_engine: "SqlAlchemyExecutionEngine", metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): ( selectable, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( metric_domain_kwargs, MetricDomainTypes.COLUMN ) column_name = accessor_domain_kwargs["column"] column = sa.column(column_name) sqlalchemy_engine = execution_engine.engine dialect = sqlalchemy_engine.dialect """SqlAlchemy Median Implementation""" if dialect.name.lower() == "awsathena": raise NotImplementedError("AWS Athena does not support OFFSET.") nonnull_count = metrics.get("column_values.nonnull.count") if not nonnull_count: return None element_values = sqlalchemy_engine.execute( sa.select([column]) .order_by(column) .where(column != None) .offset(max(nonnull_count // 2 - 1, 0)) .limit(2) .select_from(selectable) ) column_values = list(element_values.fetchall()) if len(column_values) == 0: column_median = None elif nonnull_count % 2 == 0: # An even number of column values: take the average of the two center values column_median = ( float( column_values[0][0] + column_values[1][0] # left center value # right center value ) / 2.0 ) # Average center values else: # An odd number of column values, we can just take the center value column_median = column_values[1][0] # True center value return column_median @metric_value(engine=SparkDFExecutionEngine, metric_fn_type="value") def _spark( cls, execution_engine: "SqlAlchemyExecutionEngine", metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): ( df, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( metric_domain_kwargs, MetricDomainTypes.COLUMN ) column = accessor_domain_kwargs["column"] # We will get the two middle values by choosing an epsilon to add # to the 50th percentile such that we always get exactly the middle two values # (i.e. 0 < epsilon < 1 / (2 * values)) # Note that this can be an expensive computation; we are not exposing # spark's ability to estimate. # We add two to 2 * n_values to maintain a legitimate quantile # in the degnerate case when n_values = 0 """Spark Median Implementation""" table_row_count = metrics.get("table.row_count") result = df.approxQuantile( column, [0.5, 0.5 + (1 / (2 + (2 * table_row_count)))], 0 ) return np.mean(result) @classmethod def _get_evaluation_dependencies( cls, metric: MetricConfiguration, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, ): """This should return a dictionary: { "dependency_name": MetricConfiguration, ... } """ dependencies: dict = super()._get_evaluation_dependencies( metric=metric, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) if isinstance(execution_engine, SqlAlchemyExecutionEngine): dependencies["column_values.nonnull.count"] = MetricConfiguration( metric_name="column_values.nonnull.count", metric_domain_kwargs=metric.metric_domain_kwargs, ) table_domain_kwargs: dict = { k: v for k, v in metric.metric_domain_kwargs.items() if k != MetricDomainTypes.COLUMN.value } dependencies["table.row_count"] = MetricConfiguration( metric_name="table.row_count", metric_domain_kwargs=table_domain_kwargs, metric_value_kwargs=None, metric_dependencies=None, ) return dependencies
import json import os from typing import Dict, List from unittest import mock import click import pytest from _pytest.capture import CaptureResult from click.testing import CliRunner, Result from great_expectations import DataContext from great_expectations.cli import cli from great_expectations.cli.suite import ( _process_suite_edit_flags_and_prompt, _process_suite_new_flags_and_prompt, ) from great_expectations.core import ExpectationConfiguration from great_expectations.core.batch import BatchRequest from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.util import lint_code from tests.cli.utils import assert_no_logging_messages_or_tracebacks from tests.render.test_util import ( find_code_in_notebook, load_notebook_from_path, run_notebook, ) def test_suite_help_output(caplog): runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, ["--v3-api", "suite"], catch_exceptions=False) assert result.exit_code == 0 stdout: str = result.stdout assert ( """ Usage: great_expectations suite [OPTIONS] COMMAND [ARGS]... Expectation Suite operations Options: --help Show this message and exit. Commands: delete Delete an Expectation Suite from the Expectation Store. demo This command is not supported in the v3 (Batch Request) API. edit Edit an existing Expectation Suite. list List existing Expectation Suites. new Create a new Expectation Suite. """ in stdout ) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_demo_deprecation_message( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite demo", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "This command is not supported in the v3 (Batch Request) API." in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.demo.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.demo.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list assert mock_emit.call_count == len(expected_call_args_list) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_default_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "warning" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input=f"1\n{expectation_suite_name}\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name} --no-jupyter", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_nonexistent_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request nonexistent_file.json --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert 'The JSON file with the path "nonexistent_file.json' in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_malformed_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json_file.write("not_proper_json") runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Error" in stdout assert "occurred while attempting to load the JSON file with the path" in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_valid_batch_request_from_json_file_in_notebook_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Error" not in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_interactive_batch_request_without_datasource_json_file_raises_helpful_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join( uncommitted_dir, f"batch_request_missing_datasource.json" ) with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite edit {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout assert ( "Please check that your batch_request is valid and is able to load a batch." in stdout ) assert 'The type of an datasource name must be a string (Python "str").' in stdout assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled config_file_path: str = os.path.join( context.root_directory, "great_expectations.yml" ) assert os.path.exists(config_file_path) monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "No Expectation Suites found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "1 Expectation Suite found" in stdout assert f"{expectation_suite_dir_name}.{expectation_suite_name}" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_multiple_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory # noinspection PyUnusedLocal suite_0: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="a.warning" ) # noinspection PyUnusedLocal suite_1: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="b.warning" ) # noinspection PyUnusedLocal suite_2: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="c.warning" ) config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "3 Expectation Suites found:" in stdout assert "a.warning" in stdout assert "b.warning" in stdout assert "c.warning" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suites found in the project" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_non_existent_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suite named not_a_suite found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_canceled_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Suite `{expectation_suite_dir_name}.{expectation_suite_name}` was not deleted" in stdout ) assert os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite_assume_yes_flag( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes suite delete {expectation_suite_dir_name}.{expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert "Would you like to proceed? [Y/n]:" not in stdout # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert result.exit_code == 0 assert "No Expectation Suites found" in stdout @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @pytest.fixture def suite_new_messages(): return { "no_msg": "", "happy_path_profile": "Entering interactive mode since you passed the --profile flag", "warning_profile": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to create your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data 3. Automatically, using a profiler """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", } @pytest.mark.parametrize( "interactive_flag,manual_flag,profile_flag,batch_request_flag,error_expected,prompt_input,return_interactive,return_profile,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True, return_profile = False pytest.param( True, False, False, None, False, None, True, False, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False, return_profile = False pytest.param( False, True, False, None, False, None, False, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, return_profile = True pytest.param( False, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--profile", ), pytest.param( True, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--interactive --profile", ), # batch_request not empty pytest.param( True, False, False, "batch_request.json", False, None, True, False, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, True, "batch_request.json", False, None, True, True, "happy_path_profile", "no_msg", id="--profile --batch-request", ), pytest.param( True, False, True, "batch_request.json", False, None, True, True, "no_msg", "no_msg", id="--interactive --profile --batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, False, None, False, "", False, False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite creation (default)", ), # Choice 1 - Manual suite creation (default) pytest.param( False, False, False, None, False, "1", False, False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite creation (default)", ), # Choice 2 - Interactive suite creation pytest.param( False, False, False, None, False, "2", True, False, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite creation", ), # Choice 3 - Automatic suite creation (profiler) pytest.param( False, False, False, None, False, "3", True, True, "no_msg", "no_msg", id="prompt: Choice 3 - Automatic suite creation (profiler)", ), # No error but warning expected # no-interactive flag with batch_request, with/without profile flag pytest.param( False, True, False, "batch_request.json", False, None, True, False, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), pytest.param( False, True, True, "batch_request.json", False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile --batch-request", ), # no-interactive flag with profile and without batch request flag pytest.param( False, True, True, None, False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile", ), # Yes error expected # both interactive flags, profile=False, with/without batch_request pytest.param( True, True, False, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, False, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, profile=True, with/without batch_request pytest.param( True, True, True, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile", ), pytest.param( True, True, True, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_new_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, profile_flag, batch_request_flag, error_expected, prompt_input, return_interactive, return_profile, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_new_messages, ): """ What does this test and why? _process_suite_new_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.new.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] processed_flags = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) assert processed_flags == { "interactive": return_interactive, "profile": return_profile, } # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_new_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ] @pytest.fixture def suite_edit_messages(): return { "no_msg": "", "happy_path_datasource_name": "Entering interactive mode since you passed the --datasource-name flag", "warning_datasource_name": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to edit your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", "error_both_datasource_name_and_batch_request_flags": """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \ options can be used. """, } @pytest.mark.parametrize( "interactive_flag,manual_flag,datasource_name_flag,batch_request_flag,error_expected,prompt_input,return_interactive,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True pytest.param( True, False, None, None, False, None, True, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False pytest.param( False, True, None, None, False, None, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, --datasource-name pytest.param( False, False, "some_datasource_name", None, False, None, True, "happy_path_datasource_name", "no_msg", id="--datasource-name", ), pytest.param( True, False, "some_datasource_name", None, False, None, True, "no_msg", "no_msg", id="--interactive --datasource-name", ), # batch_request not empty pytest.param( True, False, None, "batch_request.json", False, None, True, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, None, "batch_request.json", False, None, True, "happy_path_batch_request", "no_msg", id="--batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, None, None, False, "", False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite edit (default)", ), # # Choice 1 - Manual suite edit (default) pytest.param( False, False, None, None, False, "1", False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite edit (default)", ), # Choice 2 - Interactive suite edit pytest.param( False, False, None, None, False, "2", True, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite edit", ), # No error but warning expected # no-interactive flag with batch_request pytest.param( False, True, None, "batch_request.json", False, None, True, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), # no-interactive flag with datasource_name pytest.param( False, True, "some_datasource_name", None, False, None, True, "warning_datasource_name", "no_msg", id="warning: --manual --datasource-name", ), # Yes error expected # both interactive flags, datasource_name=None, with/without batch_request pytest.param( True, True, None, None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, None, "batch_request.json", True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, datasource_name=something, with/without batch_request pytest.param( True, True, "some_datasource_name", None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --datasource-name", ), pytest.param( True, True, "some_datasource_name", "batch_request.json", True, None, None, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --interactive --manual --datasource-name --batch-request", ), # both --datasource-name and --batch-request pytest.param( False, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --datasource-name --batch-request", ), pytest.param( True, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--interactive --datasource-name --batch-request", ), pytest.param( False, True, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--manual --datasource-name --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_edit_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, datasource_name_flag, batch_request_flag, error_expected, prompt_input, return_interactive, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_edit_messages, ): """ What does this test and why? _process_suite_edit_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.edit.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] interactive: bool = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) assert interactive == return_interactive # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_edit_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ]
great-expectations/great_expectations
tests/cli/test_suite.py
great_expectations/expectations/metrics/column_aggregate_metrics/column_median.py
import datetime import json import os import traceback from dateutil.parser import parse from great_expectations import DataContext from great_expectations.cli.upgrade_helpers.base_upgrade_helper import BaseUpgradeHelper from great_expectations.data_context.store import ( DatabaseStoreBackend, HtmlSiteStore, InMemoryStoreBackend, MetricStore, TupleFilesystemStoreBackend, TupleGCSStoreBackend, TupleS3StoreBackend, ValidationsStore, ) from great_expectations.data_context.types.resource_identifiers import ( ValidationResultIdentifier, ) class UpgradeHelperV11(BaseUpgradeHelper): def __init__(self, data_context=None, context_root_dir=None): assert ( data_context or context_root_dir ), "Please provide a data_context object or a context_root_dir." self.data_context = data_context or DataContext( context_root_dir=context_root_dir ) self.upgrade_log = { "skipped_validations_stores": { "database_store_backends": [], "unsupported": [], }, "skipped_docs_validations_stores": {"unsupported": []}, "skipped_metrics_stores": { "database_store_backends": [], "unsupported": [], }, "exceptions": [ # { # "validation_store_name": store_name # "src": src_url, # "dest": dest_url, # "exception_message": exception_message, # }, # { # "site_name": site_name, # "src": src_url, # "dest": dest_url, # "exception_message": exception_message, # } ], "upgraded_validations_stores": { # STORE_NAME: { # "validations_updated": [{ # "src": src_url, # "dest": dest_url # }], # "exceptions": BOOL # } }, "upgraded_docs_site_validations_stores": { # SITE_NAME: { # "validation_result_pages_updated": [{ # src: src_url, # dest: dest_url # }], # "exceptions": BOOL # } }, } self.upgrade_checklist = { "validations_store_backends": {}, "docs_validations_store_backends": {}, } self.validation_run_times = {} self.run_time_setters_by_backend_type = { TupleFilesystemStoreBackend: self._get_tuple_filesystem_store_backend_run_time, TupleS3StoreBackend: self._get_tuple_s3_store_backend_run_time, TupleGCSStoreBackend: self._get_tuple_gcs_store_backend_run_time, } self._generate_upgrade_checklist() def _generate_upgrade_checklist(self): for (store_name, store) in self.data_context.stores.items(): if not isinstance(store, (ValidationsStore, MetricStore)): continue elif isinstance(store, ValidationsStore): self._process_validations_store_for_checklist(store_name, store) elif isinstance(store, MetricStore): self._process_metrics_store_for_checklist(store_name, store) sites = ( self.data_context.project_config_with_variables_substituted.data_docs_sites ) if sites: for site_name, site_config in sites.items(): self._process_docs_site_for_checklist(site_name, site_config) def _process_docs_site_for_checklist(self, site_name, site_config): site_html_store = HtmlSiteStore( store_backend=site_config.get("store_backend"), runtime_environment={ "data_context": self.data_context, "root_directory": self.data_context.root_directory, "site_name": site_name, }, ) site_validations_store_backend = site_html_store.store_backends[ ValidationResultIdentifier ] if isinstance( site_validations_store_backend, tuple(list(self.run_time_setters_by_backend_type.keys())), ): self.upgrade_checklist["docs_validations_store_backends"][ site_name ] = site_validations_store_backend else: self.upgrade_log["skipped_docs_validations_stores"]["unsupported"].append( { "site_name": site_name, "validations_store_backend_class": type( site_validations_store_backend ).__name__, } ) def _process_validations_store_for_checklist(self, store_name, store): store_backend = store.store_backend if isinstance(store_backend, DatabaseStoreBackend): self.upgrade_log["skipped_validations_stores"][ "database_store_backends" ].append( { "store_name": store_name, "store_backend_class": type(store_backend).__name__, } ) elif isinstance( store_backend, tuple(list(self.run_time_setters_by_backend_type.keys())) ): self.upgrade_checklist["validations_store_backends"][ store_name ] = store_backend else: self.upgrade_log["skipped_validations_stores"]["unsupported"].append( { "store_name": store_name, "store_backend_class": type(store_backend).__name__, } ) def _process_metrics_store_for_checklist(self, store_name, store): store_backend = store.store_backend if isinstance(store_backend, DatabaseStoreBackend): self.upgrade_log["skipped_metrics_stores"][ "database_store_backends" ].append( { "store_name": store_name, "store_backend_class": type(store_backend).__name__, } ) elif isinstance(store_backend, InMemoryStoreBackend): pass else: self.upgrade_log["skipped_metrics_stores"]["unsupported"].append( { "store_name": store_name, "store_backend_class": type(store_backend).__name__, } ) def _upgrade_store_backend(self, store_backend, store_name=None, site_name=None): assert store_name or site_name, "Must pass either store_name or site_name." assert not ( store_name and site_name ), "Must pass either store_name or site_name, not both." try: validation_source_keys = store_backend.list_keys() except Exception as e: exception_traceback = traceback.format_exc() exception_message = ( f'{type(e).__name__}: "{str(e)}". ' f'Traceback: "{exception_traceback}".' ) self._update_upgrade_log( store_backend=store_backend, store_name=store_name, site_name=site_name, exception_message=exception_message, ) for source_key in validation_source_keys: try: run_name = source_key[-2] dest_key = None if run_name not in self.validation_run_times: self.run_time_setters_by_backend_type.get(type(store_backend))( source_key, store_backend ) dest_key_list = list(source_key) dest_key_list.insert(-1, self.validation_run_times[run_name]) dest_key = tuple(dest_key_list) except Exception as e: exception_traceback = traceback.format_exc() exception_message = ( f'{type(e).__name__}: "{str(e)}". ' f'Traceback: "{exception_traceback}".' ) self._update_upgrade_log( store_backend=store_backend, source_key=source_key, dest_key=dest_key, store_name=store_name, site_name=site_name, exception_message=exception_message, ) try: if store_name: self._update_validation_result_json( source_key=source_key, dest_key=dest_key, run_name=run_name, store_backend=store_backend, ) else: store_backend.move(source_key, dest_key) self._update_upgrade_log( store_backend=store_backend, source_key=source_key, dest_key=dest_key, store_name=store_name, site_name=site_name, ) except Exception as e: exception_traceback = traceback.format_exc() exception_message = ( f'{type(e).__name__}: "{str(e)}". ' f'Traceback: "{exception_traceback}".' ) self._update_upgrade_log( store_backend=store_backend, source_key=source_key, dest_key=dest_key, store_name=store_name, site_name=site_name, exception_message=exception_message, ) def _update_upgrade_log( self, store_backend, source_key=None, dest_key=None, store_name=None, site_name=None, exception_message=None, ): assert store_name or site_name, "Must pass either store_name or site_name." assert not ( store_name and site_name ), "Must pass either store_name or site_name, not both." try: src_url = store_backend.get_url_for_key(source_key) if source_key else "N/A" except Exception: src_url = f"Unable to generate URL for key: {source_key}" try: dest_url = store_backend.get_url_for_key(dest_key) if dest_key else "N/A" except Exception: dest_url = f"Unable to generate URL for key: {dest_key}" if not exception_message: log_dict = {"src": src_url, "dest": dest_url} else: key_name = "validation_store_name" if store_name else "site_name" log_dict = { key_name: store_name if store_name else site_name, "src": src_url, "dest": dest_url, "exception_message": exception_message, } self.upgrade_log["exceptions"].append(log_dict) if store_name: if exception_message: self.upgrade_log["upgraded_validations_stores"][store_name][ "exceptions" ] = True else: self.upgrade_log["upgraded_validations_stores"][store_name][ "validations_updated" ].append(log_dict) else: if exception_message: self.upgrade_log["upgraded_docs_site_validations_stores"][site_name][ "exceptions" ] = True else: self.upgrade_log["upgraded_docs_site_validations_stores"][site_name][ "validation_result_pages_updated" ].append(log_dict) def _update_validation_result_json( self, source_key, dest_key, run_name, store_backend ): new_run_id_dict = { "run_name": run_name, "run_time": self.validation_run_times[run_name], } validation_json_dict = json.loads(store_backend.get(source_key)) validation_json_dict["meta"]["run_id"] = new_run_id_dict store_backend.set(dest_key, json.dumps(validation_json_dict)) store_backend.remove_key(source_key) def _get_tuple_filesystem_store_backend_run_time(self, source_key, store_backend): run_name = source_key[-2] try: self.validation_run_times[run_name] = parse(run_name).strftime( "%Y%m%dT%H%M%S.%fZ" ) except (ValueError, TypeError): source_path = os.path.join( store_backend.full_base_directory, store_backend._convert_key_to_filepath(source_key), ) path_mod_timestamp = os.path.getmtime(source_path) path_mod_iso_str = datetime.datetime.fromtimestamp( path_mod_timestamp ).strftime("%Y%m%dT%H%M%S.%fZ") self.validation_run_times[run_name] = path_mod_iso_str def _get_tuple_s3_store_backend_run_time(self, source_key, store_backend): import boto3 s3 = boto3.resource("s3") run_name = source_key[-2] try: self.validation_run_times[run_name] = parse(run_name).strftime( "%Y%m%dT%H%M%S.%fZ" ) except (ValueError, TypeError): source_path = store_backend._convert_key_to_filepath(source_key) if not source_path.startswith(store_backend.prefix): source_path = os.path.join(store_backend.prefix, source_path) source_object = s3.Object(store_backend.bucket, source_path) source_object_last_mod = source_object.last_modified.strftime( "%Y%m%dT%H%M%S.%fZ" ) self.validation_run_times[run_name] = source_object_last_mod def _get_tuple_gcs_store_backend_run_time(self, source_key, store_backend): from google.cloud import storage gcs = storage.Client(project=store_backend.project) bucket = gcs.get_bucket(store_backend.bucket) run_name = source_key[-2] try: self.validation_run_times[run_name] = parse(run_name).strftime( "%Y%m%dT%H%M%S.%fZ" ) except (ValueError, TypeError): source_path = store_backend._convert_key_to_filepath(source_key) if not source_path.startswith(store_backend.prefix): source_path = os.path.join(store_backend.prefix, source_path) source_blob_created_time = bucket.get_blob( source_path ).time_created.strftime("%Y%m%dT%H%M%S.%fZ") self.validation_run_times[run_name] = source_blob_created_time def _get_skipped_store_and_site_names(self): validations_stores_with_database_backends = [ store_dict.get("store_name") for store_dict in self.upgrade_log["skipped_validations_stores"][ "database_store_backends" ] ] metrics_stores_with_database_backends = [ store_dict.get("store_name") for store_dict in self.upgrade_log["skipped_metrics_stores"][ "database_store_backends" ] ] unsupported_validations_stores = [ store_dict.get("store_name") for store_dict in self.upgrade_log["skipped_validations_stores"][ "unsupported" ] ] unsupported_metrics_stores = [ store_dict.get("store_name") for store_dict in self.upgrade_log["skipped_metrics_stores"]["unsupported"] ] stores_with_database_backends = ( validations_stores_with_database_backends + metrics_stores_with_database_backends ) stores_with_unsupported_backends = ( unsupported_validations_stores + unsupported_metrics_stores ) doc_sites_with_unsupported_backends = [ doc_site_dict.get("site_name") for doc_site_dict in self.upgrade_log["skipped_docs_validations_stores"][ "unsupported" ] ] return ( stores_with_database_backends, stores_with_unsupported_backends, doc_sites_with_unsupported_backends, ) def get_upgrade_overview(self): ( skip_with_database_backends, skip_with_unsupported_backends, skip_doc_sites_with_unsupported_backends, ) = self._get_skipped_store_and_site_names() validations_store_name_checklist = [ store_name for store_name in self.upgrade_checklist[ "validations_store_backends" ].keys() ] site_name_checklist = [ site_name for site_name in self.upgrade_checklist[ "docs_validations_store_backends" ].keys() ] upgrade_overview = f"""\ <cyan>\ ++====================================++ || UpgradeHelperV11: Upgrade Overview || ++====================================++\ </cyan> UpgradeHelperV11 will upgrade your project to be compatible with Great Expectations 0.11.x. """ if not any( [ validations_store_name_checklist, site_name_checklist, skip_with_database_backends, skip_with_unsupported_backends, skip_doc_sites_with_unsupported_backends, ] ): upgrade_overview += """ <green>\ Good news! No special upgrade steps are required to bring your project up to date. The Upgrade Helper will simply increment the config_version of your great_expectations.yml for you. </green> Would you like to proceed? """ else: upgrade_overview += """ <red>**WARNING**: Before proceeding, please make sure you have appropriate backups of your project.</red> """ if validations_store_name_checklist or site_name_checklist: upgrade_overview += """ <cyan>\ Automated Steps ================ </cyan> The following Stores and/or Data Docs sites will be upgraded: """ upgrade_overview += ( f"""\ - Validation Stores: {", ".join(validations_store_name_checklist)} """ if validations_store_name_checklist else "" ) upgrade_overview += ( f"""\ - Data Docs Sites: {", ".join(site_name_checklist)} """ if site_name_checklist else "" ) if any( [ skip_with_database_backends, skip_with_unsupported_backends, skip_doc_sites_with_unsupported_backends, ] ): upgrade_overview += """ <cyan>\ Manual Steps ============= </cyan> The following Stores and/or Data Docs sites must be upgraded manually, due to having a database backend, or backend type that is unsupported or unrecognized: """ upgrade_overview += ( f"""\ - Stores with database backends: {", ".join(skip_with_database_backends)} """ if skip_with_database_backends else "" ) upgrade_overview += ( f"""\ - Stores with unsupported/unrecognized backends: {", ".join(skip_with_unsupported_backends)} """ if skip_with_unsupported_backends else "" ) upgrade_overview += ( f"""\ - Data Docs sites with unsupported/unrecognized backends: {", ".join(skip_doc_sites_with_unsupported_backends)} """ if skip_doc_sites_with_unsupported_backends else "" ) else: upgrade_overview += """ <cyan>\ Manual Steps ============= </cyan> No manual upgrade steps are required. """ upgrade_overview += """ <cyan>\ Upgrade Confirmation ===================== </cyan> Please consult the 0.11.x migration guide for instructions on how to complete any required manual steps or to learn more about the automated upgrade process: <cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html#id1</cyan> Would you like to proceed with the project upgrade?\ """ return upgrade_overview, True def _save_upgrade_log(self): current_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) dest_path = os.path.join( self.data_context._context_root_directory, "uncommitted", "logs", "project_upgrades", f"UpgradeHelperV11_{current_time}.json", ) dest_dir, dest_filename = os.path.split(dest_path) os.makedirs(dest_dir, exist_ok=True) with open(dest_path, "w") as outfile: json.dump(self.upgrade_log, outfile, indent=2) return dest_path def _generate_upgrade_report(self): upgrade_log_path = self._save_upgrade_log() skipped_stores_or_sites = any(self._get_skipped_store_and_site_names()) exception_occurred = False exceptions = self.upgrade_log.get("exceptions") if skipped_stores_or_sites or exceptions: increment_version = False else: increment_version = True upgrade_report = f"""\ <cyan>\ ++================++ || Upgrade Report || ++================++\ </cyan> """ if increment_version: upgrade_report += f""" <green>\ Your project was successfully upgraded to be compatible with Great Expectations 0.11.x. The config_version of your great_expectations.yml has been automatically incremented to 2.0. A log detailing the upgrade can be found here: - {upgrade_log_path}\ </green>\ """ else: if exceptions: exception_occurred = True upgrade_report += f""" <red>\ The Upgrade Helper encountered some exceptions during the upgrade process. Please review the exceptions section of the upgrade log and migrate the affected files manually, as detailed in the 0.11.x migration guide. The upgrade log can be found here: - {upgrade_log_path}\ </red>\ """ else: upgrade_report += f""" <yellow>\ The Upgrade Helper has completed the automated upgrade steps. A log detailing the upgrade can be found here: - {upgrade_log_path}\ </yellow>\ """ return upgrade_report, increment_version, exception_occurred def upgrade_project(self): try: for (store_name, store_backend) in self.upgrade_checklist[ "validations_store_backends" ].items(): self.upgrade_log["upgraded_validations_stores"][store_name] = { "validations_updated": [], "exceptions": False, } self._upgrade_store_backend(store_backend, store_name=store_name) except Exception: pass try: for (site_name, store_backend) in self.upgrade_checklist[ "docs_validations_store_backends" ].items(): self.upgrade_log["upgraded_docs_site_validations_stores"][site_name] = { "validation_result_pages_updated": [], "exceptions": False, } self._upgrade_store_backend(store_backend, site_name=site_name) except Exception: pass # return a report of what happened, boolean indicating whether version should be incremented # if the version should not be incremented, the report should include instructions for steps to # be performed manually ( upgrade_report, increment_version, exception_occurred, ) = self._generate_upgrade_report() return upgrade_report, increment_version, exception_occurred
import json import os from typing import Dict, List from unittest import mock import click import pytest from _pytest.capture import CaptureResult from click.testing import CliRunner, Result from great_expectations import DataContext from great_expectations.cli import cli from great_expectations.cli.suite import ( _process_suite_edit_flags_and_prompt, _process_suite_new_flags_and_prompt, ) from great_expectations.core import ExpectationConfiguration from great_expectations.core.batch import BatchRequest from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.util import lint_code from tests.cli.utils import assert_no_logging_messages_or_tracebacks from tests.render.test_util import ( find_code_in_notebook, load_notebook_from_path, run_notebook, ) def test_suite_help_output(caplog): runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, ["--v3-api", "suite"], catch_exceptions=False) assert result.exit_code == 0 stdout: str = result.stdout assert ( """ Usage: great_expectations suite [OPTIONS] COMMAND [ARGS]... Expectation Suite operations Options: --help Show this message and exit. Commands: delete Delete an Expectation Suite from the Expectation Store. demo This command is not supported in the v3 (Batch Request) API. edit Edit an existing Expectation Suite. list List existing Expectation Suites. new Create a new Expectation Suite. """ in stdout ) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_demo_deprecation_message( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite demo", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "This command is not supported in the v3 (Batch Request) API." in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.demo.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.demo.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list assert mock_emit.call_count == len(expected_call_args_list) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_default_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "warning" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input=f"1\n{expectation_suite_name}\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name} --no-jupyter", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_nonexistent_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request nonexistent_file.json --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert 'The JSON file with the path "nonexistent_file.json' in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_malformed_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json_file.write("not_proper_json") runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Error" in stdout assert "occurred while attempting to load the JSON file with the path" in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_valid_batch_request_from_json_file_in_notebook_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Error" not in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_interactive_batch_request_without_datasource_json_file_raises_helpful_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join( uncommitted_dir, f"batch_request_missing_datasource.json" ) with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite edit {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout assert ( "Please check that your batch_request is valid and is able to load a batch." in stdout ) assert 'The type of an datasource name must be a string (Python "str").' in stdout assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled config_file_path: str = os.path.join( context.root_directory, "great_expectations.yml" ) assert os.path.exists(config_file_path) monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "No Expectation Suites found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "1 Expectation Suite found" in stdout assert f"{expectation_suite_dir_name}.{expectation_suite_name}" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_multiple_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory # noinspection PyUnusedLocal suite_0: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="a.warning" ) # noinspection PyUnusedLocal suite_1: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="b.warning" ) # noinspection PyUnusedLocal suite_2: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="c.warning" ) config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "3 Expectation Suites found:" in stdout assert "a.warning" in stdout assert "b.warning" in stdout assert "c.warning" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suites found in the project" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_non_existent_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suite named not_a_suite found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_canceled_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Suite `{expectation_suite_dir_name}.{expectation_suite_name}` was not deleted" in stdout ) assert os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite_assume_yes_flag( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes suite delete {expectation_suite_dir_name}.{expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert "Would you like to proceed? [Y/n]:" not in stdout # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert result.exit_code == 0 assert "No Expectation Suites found" in stdout @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @pytest.fixture def suite_new_messages(): return { "no_msg": "", "happy_path_profile": "Entering interactive mode since you passed the --profile flag", "warning_profile": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to create your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data 3. Automatically, using a profiler """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", } @pytest.mark.parametrize( "interactive_flag,manual_flag,profile_flag,batch_request_flag,error_expected,prompt_input,return_interactive,return_profile,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True, return_profile = False pytest.param( True, False, False, None, False, None, True, False, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False, return_profile = False pytest.param( False, True, False, None, False, None, False, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, return_profile = True pytest.param( False, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--profile", ), pytest.param( True, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--interactive --profile", ), # batch_request not empty pytest.param( True, False, False, "batch_request.json", False, None, True, False, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, True, "batch_request.json", False, None, True, True, "happy_path_profile", "no_msg", id="--profile --batch-request", ), pytest.param( True, False, True, "batch_request.json", False, None, True, True, "no_msg", "no_msg", id="--interactive --profile --batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, False, None, False, "", False, False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite creation (default)", ), # Choice 1 - Manual suite creation (default) pytest.param( False, False, False, None, False, "1", False, False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite creation (default)", ), # Choice 2 - Interactive suite creation pytest.param( False, False, False, None, False, "2", True, False, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite creation", ), # Choice 3 - Automatic suite creation (profiler) pytest.param( False, False, False, None, False, "3", True, True, "no_msg", "no_msg", id="prompt: Choice 3 - Automatic suite creation (profiler)", ), # No error but warning expected # no-interactive flag with batch_request, with/without profile flag pytest.param( False, True, False, "batch_request.json", False, None, True, False, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), pytest.param( False, True, True, "batch_request.json", False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile --batch-request", ), # no-interactive flag with profile and without batch request flag pytest.param( False, True, True, None, False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile", ), # Yes error expected # both interactive flags, profile=False, with/without batch_request pytest.param( True, True, False, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, False, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, profile=True, with/without batch_request pytest.param( True, True, True, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile", ), pytest.param( True, True, True, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_new_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, profile_flag, batch_request_flag, error_expected, prompt_input, return_interactive, return_profile, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_new_messages, ): """ What does this test and why? _process_suite_new_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.new.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] processed_flags = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) assert processed_flags == { "interactive": return_interactive, "profile": return_profile, } # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_new_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ] @pytest.fixture def suite_edit_messages(): return { "no_msg": "", "happy_path_datasource_name": "Entering interactive mode since you passed the --datasource-name flag", "warning_datasource_name": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to edit your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", "error_both_datasource_name_and_batch_request_flags": """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \ options can be used. """, } @pytest.mark.parametrize( "interactive_flag,manual_flag,datasource_name_flag,batch_request_flag,error_expected,prompt_input,return_interactive,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True pytest.param( True, False, None, None, False, None, True, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False pytest.param( False, True, None, None, False, None, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, --datasource-name pytest.param( False, False, "some_datasource_name", None, False, None, True, "happy_path_datasource_name", "no_msg", id="--datasource-name", ), pytest.param( True, False, "some_datasource_name", None, False, None, True, "no_msg", "no_msg", id="--interactive --datasource-name", ), # batch_request not empty pytest.param( True, False, None, "batch_request.json", False, None, True, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, None, "batch_request.json", False, None, True, "happy_path_batch_request", "no_msg", id="--batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, None, None, False, "", False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite edit (default)", ), # # Choice 1 - Manual suite edit (default) pytest.param( False, False, None, None, False, "1", False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite edit (default)", ), # Choice 2 - Interactive suite edit pytest.param( False, False, None, None, False, "2", True, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite edit", ), # No error but warning expected # no-interactive flag with batch_request pytest.param( False, True, None, "batch_request.json", False, None, True, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), # no-interactive flag with datasource_name pytest.param( False, True, "some_datasource_name", None, False, None, True, "warning_datasource_name", "no_msg", id="warning: --manual --datasource-name", ), # Yes error expected # both interactive flags, datasource_name=None, with/without batch_request pytest.param( True, True, None, None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, None, "batch_request.json", True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, datasource_name=something, with/without batch_request pytest.param( True, True, "some_datasource_name", None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --datasource-name", ), pytest.param( True, True, "some_datasource_name", "batch_request.json", True, None, None, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --interactive --manual --datasource-name --batch-request", ), # both --datasource-name and --batch-request pytest.param( False, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --datasource-name --batch-request", ), pytest.param( True, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--interactive --datasource-name --batch-request", ), pytest.param( False, True, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--manual --datasource-name --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_edit_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, datasource_name_flag, batch_request_flag, error_expected, prompt_input, return_interactive, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_edit_messages, ): """ What does this test and why? _process_suite_edit_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.edit.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] interactive: bool = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) assert interactive == return_interactive # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_edit_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ]
great-expectations/great_expectations
tests/cli/test_suite.py
great_expectations/cli/upgrade_helpers/upgrade_helper_v11.py
import warnings from great_expectations.render.renderer import ValidationResultsPageRenderer from great_expectations.render.view import DefaultMarkdownPageView from great_expectations.validation_operators.types.validation_operator_result import ( ValidationOperatorResult, ) def render_multiple_validation_result_pages_markdown( validation_operator_result: ValidationOperatorResult, run_info_at_end: bool = True, ) -> str: """ Loop through and render multiple validation results to markdown. Args: validation_operator_result: (ValidationOperatorResult) Result of validation operator run run_info_at_end: move run info below expectation results Returns: string containing formatted markdown validation results """ warnings.warn( "This 'render_multiple_validation_result_pages_markdown' function will be deprecated " "Please use ValidationResultsPageRenderer.render_validation_operator_result() instead." "E.g. to replicate the functionality of rendering a ValidationOperatorResult to markdown:" "validation_results_page_renderer = ValidationResultsPageRenderer(" " run_info_at_end=run_info_at_end" ")" "rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result(" " validation_operator_result=validation_operator_result" ")" 'return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))' "Please update code accordingly.", DeprecationWarning, ) validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=run_info_at_end ) rendered_document_content_list = ( validation_results_page_renderer.render_validation_operator_result( validation_operator_result=validation_operator_result ) ) return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))
import json import os from typing import Dict, List from unittest import mock import click import pytest from _pytest.capture import CaptureResult from click.testing import CliRunner, Result from great_expectations import DataContext from great_expectations.cli import cli from great_expectations.cli.suite import ( _process_suite_edit_flags_and_prompt, _process_suite_new_flags_and_prompt, ) from great_expectations.core import ExpectationConfiguration from great_expectations.core.batch import BatchRequest from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.util import lint_code from tests.cli.utils import assert_no_logging_messages_or_tracebacks from tests.render.test_util import ( find_code_in_notebook, load_notebook_from_path, run_notebook, ) def test_suite_help_output(caplog): runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, ["--v3-api", "suite"], catch_exceptions=False) assert result.exit_code == 0 stdout: str = result.stdout assert ( """ Usage: great_expectations suite [OPTIONS] COMMAND [ARGS]... Expectation Suite operations Options: --help Show this message and exit. Commands: delete Delete an Expectation Suite from the Expectation Store. demo This command is not supported in the v3 (Batch Request) API. edit Edit an existing Expectation Suite. list List existing Expectation Suites. new Create a new Expectation Suite. """ in stdout ) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_demo_deprecation_message( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite demo", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "This command is not supported in the v3 (Batch Request) API." in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.demo.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.demo.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list assert mock_emit.call_count == len(expected_call_args_list) assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_default_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "warning" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_prompted_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new", input=f"1\n{expectation_suite_name}\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert f"Name the new Expectation Suite [warning]:" in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_non_interactive_with_suite_name_arg_custom_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite new --expectation-suite {expectation_suite_name} --no-jupyter", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=suite_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_nonexistent_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request nonexistent_file.json --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert 'The JSON file with the path "nonexistent_file.json' in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_malformed_batch_request_json_file_raises_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json_file.write("not_proper_json") runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Error" in stdout assert "occurred while attempting to load the JSON file with the path" in stdout context = DataContext(context_root_dir=project_dir) assert expectation_suite_name not in context.list_expectation_suite_names() assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 4 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_interactive_valid_batch_request_from_json_file_in_notebook_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite new --expectation-suite {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Error" not in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_edit_interactive_batch_request_without_datasource_json_file_raises_helpful_error( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join( uncommitted_dir, f"batch_request_missing_datasource.json" ) with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"""--v3-api suite edit {expectation_suite_name} --interactive --batch-request {batch_request_file_path} --no-jupyter """, catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout assert ( "Please check that your batch_request is valid and is able to load a batch." in stdout ) assert 'The type of an datasource name must be a string (Python "str").' in stdout assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled config_file_path: str = os.path.join( context.root_directory, "great_expectations.yml" ) assert os.path.exists(config_file_path) monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "No Expectation Suites found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "1 Expectation Suite found" in stdout assert f"{expectation_suite_dir_name}.{expectation_suite_name}" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_list_with_multiple_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory # noinspection PyUnusedLocal suite_0: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="a.warning" ) # noinspection PyUnusedLocal suite_1: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="b.warning" ) # noinspection PyUnusedLocal suite_2: ExpectationSuite = context.create_expectation_suite( expectation_suite_name="c.warning" ) config_file_path: str = os.path.join(project_dir, "great_expectations.yml") assert os.path.exists(config_file_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "3 Expectation Suites found:" in stdout assert "a.warning" in stdout assert "b.warning" in stdout assert "c.warning" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_zero_suites( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suites found in the project" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_non_existent_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete not_a_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "No expectation suite named not_a_suite found" in stdout assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_canceled_with_one_suite( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = empty_data_context_stats_enabled.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite delete {expectation_suite_dir_name}.{expectation_suite_name}", input="n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Suite `{expectation_suite_dir_name}.{expectation_suite_name}` was not deleted" in stdout ) assert os.path.isfile(suite_path) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_delete_with_one_suite_assume_yes_flag( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory expectation_suite_dir_name: str = "a_dir" expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=f"{expectation_suite_dir_name}.{expectation_suite_name}" ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == f"{expectation_suite_dir_name}.{expectation_suite_name}" ) mock_emit.reset_mock() suite_dir: str = os.path.join( project_dir, "expectations", expectation_suite_dir_name ) suite_path: str = os.path.join(suite_dir, f"{expectation_suite_name}.json") assert os.path.isfile(suite_path) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes suite delete {expectation_suite_dir_name}.{expectation_suite_name}", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert ( f"Deleted the expectation suite named: {expectation_suite_dir_name}.{expectation_suite_name}" in stdout ) assert "Would you like to proceed? [Y/n]:" not in stdout # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert not os.path.isfile(suite_path) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) result = runner.invoke( cli, f"--v3-api suite list", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert result.exit_code == 0 assert "No Expectation Suites found" in stdout @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) @mock.patch("subprocess.call", return_value=True, side_effect=None) @mock.patch("webbrowser.open", return_value=True, side_effect=None) def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, ) @pytest.fixture def suite_new_messages(): return { "no_msg": "", "happy_path_profile": "Entering interactive mode since you passed the --profile flag", "warning_profile": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to create your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data 3. Automatically, using a profiler """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", } @pytest.mark.parametrize( "interactive_flag,manual_flag,profile_flag,batch_request_flag,error_expected,prompt_input,return_interactive,return_profile,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True, return_profile = False pytest.param( True, False, False, None, False, None, True, False, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False, return_profile = False pytest.param( False, True, False, None, False, None, False, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, return_profile = True pytest.param( False, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--profile", ), pytest.param( True, False, True, None, False, None, True, True, "no_msg", "no_msg", id="--interactive --profile", ), # batch_request not empty pytest.param( True, False, False, "batch_request.json", False, None, True, False, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, True, "batch_request.json", False, None, True, True, "happy_path_profile", "no_msg", id="--profile --batch-request", ), pytest.param( True, False, True, "batch_request.json", False, None, True, True, "no_msg", "no_msg", id="--interactive --profile --batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, False, None, False, "", False, False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite creation (default)", ), # Choice 1 - Manual suite creation (default) pytest.param( False, False, False, None, False, "1", False, False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite creation (default)", ), # Choice 2 - Interactive suite creation pytest.param( False, False, False, None, False, "2", True, False, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite creation", ), # Choice 3 - Automatic suite creation (profiler) pytest.param( False, False, False, None, False, "3", True, True, "no_msg", "no_msg", id="prompt: Choice 3 - Automatic suite creation (profiler)", ), # No error but warning expected # no-interactive flag with batch_request, with/without profile flag pytest.param( False, True, False, "batch_request.json", False, None, True, False, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), pytest.param( False, True, True, "batch_request.json", False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile --batch-request", ), # no-interactive flag with profile and without batch request flag pytest.param( False, True, True, None, False, None, True, True, "warning_profile", "no_msg", id="warning: --manual --profile", ), # Yes error expected # both interactive flags, profile=False, with/without batch_request pytest.param( True, True, False, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, False, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, profile=True, with/without batch_request pytest.param( True, True, True, None, True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile", ), pytest.param( True, True, True, "batch_request.json", True, None, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --profile --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_new_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, profile_flag, batch_request_flag, error_expected, prompt_input, return_interactive, return_profile, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_new_messages, ): """ What does this test and why? _process_suite_new_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.new.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] processed_flags = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) assert processed_flags == { "interactive": return_interactive, "profile": return_profile, } # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_new_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_new_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, profile=profile_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_new_messages[stdout_fixture] in captured.out assert suite_new_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ] @pytest.fixture def suite_edit_messages(): return { "no_msg": "", "happy_path_datasource_name": "Entering interactive mode since you passed the --datasource-name flag", "warning_datasource_name": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag", "happy_path_batch_request": "Entering interactive mode since you passed the --batch-request flag", "warning_batch_request": "Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag", "happy_path_prompt_call": """\ How would you like to edit your Expectation Suite? 1. Manually, without interacting with a sample batch of data (default) 2. Interactively, with a sample batch of data """, "error_both_interactive_flags": "Please choose either --interactive or --manual, you may not choose both.", "error_both_datasource_name_and_batch_request_flags": """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \ options can be used. """, } @pytest.mark.parametrize( "interactive_flag,manual_flag,datasource_name_flag,batch_request_flag,error_expected,prompt_input,return_interactive,stdout_fixture,stderr_fixture", [ # No error expected # return_interactive = True pytest.param( True, False, None, None, False, None, True, "no_msg", "no_msg", id="--interactive", ), # return_interactive = False pytest.param( False, True, None, None, False, None, False, "no_msg", "no_msg", id="--manual", ), # return_interactive = True, --datasource-name pytest.param( False, False, "some_datasource_name", None, False, None, True, "happy_path_datasource_name", "no_msg", id="--datasource-name", ), pytest.param( True, False, "some_datasource_name", None, False, None, True, "no_msg", "no_msg", id="--interactive --datasource-name", ), # batch_request not empty pytest.param( True, False, None, "batch_request.json", False, None, True, "no_msg", "no_msg", id="--interactive --batch-request", ), pytest.param( False, False, None, "batch_request.json", False, None, True, "happy_path_batch_request", "no_msg", id="--batch-request", ), # Prompts # Just hit enter (default choice) pytest.param( False, False, None, None, False, "", False, "no_msg", "no_msg", id="prompt: Default Choice 1 - Manual suite edit (default)", ), # # Choice 1 - Manual suite edit (default) pytest.param( False, False, None, None, False, "1", False, "no_msg", "no_msg", id="prompt: Choice 1 - Manual suite edit (default)", ), # Choice 2 - Interactive suite edit pytest.param( False, False, None, None, False, "2", True, "no_msg", "no_msg", id="prompt: Choice 2 - Interactive suite edit", ), # No error but warning expected # no-interactive flag with batch_request pytest.param( False, True, None, "batch_request.json", False, None, True, "warning_batch_request", "no_msg", id="warning: --manual --batch-request", ), # no-interactive flag with datasource_name pytest.param( False, True, "some_datasource_name", None, False, None, True, "warning_datasource_name", "no_msg", id="warning: --manual --datasource-name", ), # Yes error expected # both interactive flags, datasource_name=None, with/without batch_request pytest.param( True, True, None, None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual", ), pytest.param( True, True, None, "batch_request.json", True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --batch-request", ), # both interactive flags, datasource_name=something, with/without batch_request pytest.param( True, True, "some_datasource_name", None, True, None, None, "error_both_interactive_flags", "no_msg", id="error: --interactive --manual --datasource-name", ), pytest.param( True, True, "some_datasource_name", "batch_request.json", True, None, None, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --interactive --manual --datasource-name --batch-request", ), # both --datasource-name and --batch-request pytest.param( False, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="error: --datasource-name --batch-request", ), pytest.param( True, False, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--interactive --datasource-name --batch-request", ), pytest.param( False, True, "some_datasource_name", "batch_request.json", True, None, True, "error_both_datasource_name_and_batch_request_flags", "no_msg", id="--manual --datasource-name --batch-request", ), ], ) @mock.patch("click.prompt") @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test__process_suite_edit_flags_and_prompt( mock_emit, mock_prompt, interactive_flag, manual_flag, datasource_name_flag, batch_request_flag, error_expected, prompt_input, return_interactive, stdout_fixture, stderr_fixture, empty_data_context_stats_enabled, capsys, suite_edit_messages, ): """ What does this test and why? _process_suite_edit_flags_and_prompt should return the correct configuration or error based on input flags. """ usage_event_end: str = "cli.suite.edit.end" context: DataContext = empty_data_context_stats_enabled # test happy paths if not error_expected: if prompt_input is not None: mock_prompt.side_effect = [prompt_input] interactive: bool = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) assert interactive == return_interactive # Note - in this method on happy path no usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 0 assert mock_emit.call_args_list == [] # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err # Check prompt text and called only when appropriate if prompt_input is not None: assert mock_prompt.call_count == 1 assert ( mock_prompt.call_args_list[0][0][0] == suite_edit_messages["happy_path_prompt_call"] ) else: assert mock_prompt.call_count == 0 # test error cases elif error_expected: with pytest.raises(SystemExit): _ = _process_suite_edit_flags_and_prompt( context=context, usage_event_end=usage_event_end, interactive_flag=interactive_flag, manual_flag=manual_flag, datasource_name=datasource_name_flag, batch_request=batch_request_flag, ) # Check output captured: CaptureResult = capsys.readouterr() assert suite_edit_messages[stdout_fixture] in captured.out assert suite_edit_messages[stderr_fixture] in captured.err assert mock_prompt.call_count == 0 # Note - in this method only a single usage stats message is sent. Other messages are sent during the full # CLI suite new flow of creating a notebook etc. assert mock_emit.call_count == 1 assert mock_emit.call_args_list == [ mock.call( { "event": usage_event_end, "event_payload": {"api_version": "v3"}, "success": False, } ), ]
great-expectations/great_expectations
tests/cli/test_suite.py
great_expectations/render/page_renderer_util.py
import argparse from mitmproxy import options from mitmproxy.tools import cmdline, web, dump, console from mitmproxy.tools import main def test_common(): parser = argparse.ArgumentParser() opts = options.Options() cmdline.common_options(parser, opts) args = parser.parse_args(args=[]) assert main.process_options(parser, opts, args) def test_mitmproxy(): opts = options.Options() console.master.ConsoleMaster(opts) ap = cmdline.mitmproxy(opts) assert ap def test_mitmdump(): opts = options.Options() dump.DumpMaster(opts) ap = cmdline.mitmdump(opts) assert ap def test_mitmweb(): opts = options.Options() web.master.WebMaster(opts) ap = cmdline.mitmweb(opts) assert ap
from unittest import mock import pytest from mitmproxy.test import tflow from mitmproxy.net.http import http1 from mitmproxy.net.tcp import TCPClient from mitmproxy.test.tutils import treq from ... import tservers class TestHTTPFlow: def test_repr(self): f = tflow.tflow(resp=True, err=True) assert repr(f) class TestInvalidRequests(tservers.HTTPProxyTest): ssl = True def test_double_connect(self): p = self.pathoc() with p.connect(): r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port)) assert r.status_code == 400 assert b"Unexpected CONNECT" in r.content def test_relative_request(self): p = self.pathoc_raw() with p.connect(): r = p.request("get:/p/200") assert r.status_code == 400 assert b"Invalid HTTP request form" in r.content class TestProxyMisconfiguration(tservers.TransparentProxyTest): def test_absolute_request(self): p = self.pathoc() with p.connect(): r = p.request("get:'http://localhost:%d/p/200'" % self.server.port) assert r.status_code == 400 assert b"misconfiguration" in r.content class TestExpectHeader(tservers.HTTPProxyTest): def test_simple(self): client = TCPClient(("127.0.0.1", self.proxy.port)) client.connect() # call pathod server, wait a second to complete the request client.wfile.write( b"POST http://localhost:%d/p/200 HTTP/1.1\r\n" b"Expect: 100-continue\r\n" b"Content-Length: 16\r\n" b"\r\n" % self.server.port ) client.wfile.flush() assert client.rfile.readline() == b"HTTP/1.1 100 Continue\r\n" assert client.rfile.readline() == b"content-length: 0\r\n" assert client.rfile.readline() == b"\r\n" client.wfile.write(b"0123456789abcdef\r\n") client.wfile.flush() resp = http1.read_response(client.rfile, treq()) assert resp.status_code == 200 client.finish() client.close() class TestHeadContentLength(tservers.HTTPProxyTest): def test_head_content_length(self): p = self.pathoc() with p.connect(): resp = p.request( """head:'%s/p/200:h"Content-Length"="42"'""" % self.server.urlbase ) assert resp.headers["Content-Length"] == "42" class TestStreaming(tservers.HTTPProxyTest): @pytest.mark.parametrize('streaming', [True, False]) def test_streaming(self, streaming): class Stream: def requestheaders(self, f): f.request.stream = streaming def responseheaders(self, f): f.response.stream = streaming def assert_write(self, v): if streaming: assert len(v) <= 4096 return self.o.write(v) self.master.addons.add(Stream()) p = self.pathoc() with p.connect(): with mock.patch("mitmproxy.net.tcp.Writer.write", side_effect=assert_write, autospec=True): # response with 10000 bytes r = p.request("post:'%s/p/200:b@10000'" % self.server.urlbase) assert len(r.content) == 10000 # request with 10000 bytes assert p.request("post:'%s/p/200':b@10000" % self.server.urlbase)
vhaupert/mitmproxy
test/mitmproxy/proxy/protocol/test_http1.py
test/mitmproxy/tools/test_cmdline.py
from io import BytesIO import sys from mitmproxy.net import wsgi from mitmproxy.net.http import Headers def tflow(): headers = Headers(test=b"value") req = wsgi.Request("http", "GET", "/", "HTTP/1.1", headers, "") return wsgi.Flow(("127.0.0.1", 8888), req) class ExampleApp: def __init__(self): self.called = False def __call__(self, environ, start_response): self.called = True status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) return [b'Hello', b' world!\n'] class TestWSGI: def test_make_environ(self): w = wsgi.WSGIAdaptor(None, "foo", 80, "version") tf = tflow() assert w.make_environ(tf, None) tf.request.path = "/foo?bar=voing" r = w.make_environ(tf, None) assert r["QUERY_STRING"] == "bar=voing" def test_serve(self): ta = ExampleApp() w = wsgi.WSGIAdaptor(ta, "foo", 80, "version") f = tflow() f.request.host = "foo" f.request.port = 80 wfile = BytesIO() err = w.serve(f, wfile) assert ta.called assert not err val = wfile.getvalue() assert b"Hello world" in val assert b"Server:" in val def _serve(self, app): w = wsgi.WSGIAdaptor(app, "foo", 80, "version") f = tflow() f.request.host = "foo" f.request.port = 80 wfile = BytesIO() w.serve(f, wfile) return wfile.getvalue() def test_serve_empty_body(self): def app(environ, start_response): status = '200 OK' response_headers = [('Foo', 'bar')] start_response(status, response_headers) return [] assert self._serve(app) def test_serve_double_start(self): def app(environ, start_response): try: raise ValueError("foo") except: sys.exc_info() status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) start_response(status, response_headers) assert b"Internal Server Error" in self._serve(app) def test_serve_single_err(self): def app(environ, start_response): try: raise ValueError("foo") except: ei = sys.exc_info() status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers, ei) yield b"" assert b"Internal Server Error" in self._serve(app) def test_serve_double_err(self): def app(environ, start_response): try: raise ValueError("foo") except: ei = sys.exc_info() status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) yield b"aaa" start_response(status, response_headers, ei) yield b"bbb" assert b"Internal Server Error" in self._serve(app)
from unittest import mock import pytest from mitmproxy.test import tflow from mitmproxy.net.http import http1 from mitmproxy.net.tcp import TCPClient from mitmproxy.test.tutils import treq from ... import tservers class TestHTTPFlow: def test_repr(self): f = tflow.tflow(resp=True, err=True) assert repr(f) class TestInvalidRequests(tservers.HTTPProxyTest): ssl = True def test_double_connect(self): p = self.pathoc() with p.connect(): r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port)) assert r.status_code == 400 assert b"Unexpected CONNECT" in r.content def test_relative_request(self): p = self.pathoc_raw() with p.connect(): r = p.request("get:/p/200") assert r.status_code == 400 assert b"Invalid HTTP request form" in r.content class TestProxyMisconfiguration(tservers.TransparentProxyTest): def test_absolute_request(self): p = self.pathoc() with p.connect(): r = p.request("get:'http://localhost:%d/p/200'" % self.server.port) assert r.status_code == 400 assert b"misconfiguration" in r.content class TestExpectHeader(tservers.HTTPProxyTest): def test_simple(self): client = TCPClient(("127.0.0.1", self.proxy.port)) client.connect() # call pathod server, wait a second to complete the request client.wfile.write( b"POST http://localhost:%d/p/200 HTTP/1.1\r\n" b"Expect: 100-continue\r\n" b"Content-Length: 16\r\n" b"\r\n" % self.server.port ) client.wfile.flush() assert client.rfile.readline() == b"HTTP/1.1 100 Continue\r\n" assert client.rfile.readline() == b"content-length: 0\r\n" assert client.rfile.readline() == b"\r\n" client.wfile.write(b"0123456789abcdef\r\n") client.wfile.flush() resp = http1.read_response(client.rfile, treq()) assert resp.status_code == 200 client.finish() client.close() class TestHeadContentLength(tservers.HTTPProxyTest): def test_head_content_length(self): p = self.pathoc() with p.connect(): resp = p.request( """head:'%s/p/200:h"Content-Length"="42"'""" % self.server.urlbase ) assert resp.headers["Content-Length"] == "42" class TestStreaming(tservers.HTTPProxyTest): @pytest.mark.parametrize('streaming', [True, False]) def test_streaming(self, streaming): class Stream: def requestheaders(self, f): f.request.stream = streaming def responseheaders(self, f): f.response.stream = streaming def assert_write(self, v): if streaming: assert len(v) <= 4096 return self.o.write(v) self.master.addons.add(Stream()) p = self.pathoc() with p.connect(): with mock.patch("mitmproxy.net.tcp.Writer.write", side_effect=assert_write, autospec=True): # response with 10000 bytes r = p.request("post:'%s/p/200:b@10000'" % self.server.urlbase) assert len(r.content) == 10000 # request with 10000 bytes assert p.request("post:'%s/p/200':b@10000" % self.server.urlbase)
vhaupert/mitmproxy
test/mitmproxy/proxy/protocol/test_http1.py
test/mitmproxy/net/test_wsgi.py
# flake8: noqa __docformat__ = "restructuredtext" # Let users know if they're missing any of our hard dependencies hard_dependencies = ("numpy", "pytz", "dateutil") missing_dependencies = [] for dependency in hard_dependencies: try: __import__(dependency) except ImportError as e: missing_dependencies.append(f"{dependency}: {e}") if missing_dependencies: raise ImportError( "Unable to import required dependencies:\n" + "\n".join(missing_dependencies) ) del hard_dependencies, dependency, missing_dependencies # numpy compat from pandas.compat import ( np_version_under1p18 as _np_version_under1p18, is_numpy_dev as _is_numpy_dev, ) try: from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib except ImportError as e: # pragma: no cover # hack but overkill to use re module = str(e).replace("cannot import name ", "") raise ImportError( f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --force' to build the C extensions first." ) from e from pandas._config import ( get_option, set_option, reset_option, describe_option, option_context, options, ) # let init-time option registration happen import pandas.core.config_init from pandas.core.api import ( # dtype Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype, Float32Dtype, Float64Dtype, CategoricalDtype, PeriodDtype, IntervalDtype, DatetimeTZDtype, StringDtype, BooleanDtype, # missing NA, isna, isnull, notna, notnull, # indexes Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, IndexSlice, # tseries NaT, Period, period_range, Timedelta, timedelta_range, Timestamp, date_range, bdate_range, Interval, interval_range, DateOffset, # conversion to_numeric, to_datetime, to_timedelta, # misc Flags, Grouper, factorize, unique, value_counts, NamedAgg, array, Categorical, set_eng_float_format, Series, DataFrame, ) from pandas.core.arrays.sparse import SparseDtype from pandas.tseries.api import infer_freq from pandas.tseries import offsets from pandas.core.computation.api import eval from pandas.core.reshape.api import ( concat, lreshape, melt, wide_to_long, merge, merge_asof, merge_ordered, crosstab, pivot, pivot_table, get_dummies, cut, qcut, ) import pandas.api from pandas.util._print_versions import show_versions from pandas.io.api import ( # excel ExcelFile, ExcelWriter, read_excel, # parsers read_csv, read_fwf, read_table, # pickle read_pickle, to_pickle, # pytables HDFStore, read_hdf, # sql read_sql, read_sql_query, read_sql_table, # misc read_clipboard, read_parquet, read_orc, read_feather, read_gbq, read_html, read_xml, read_json, read_stata, read_sas, read_spss, ) from pandas.io.json import _json_normalize as json_normalize from pandas.util._tester import test import pandas.testing import pandas.arrays # use the closest tagged version if possible from pandas._version import get_versions v = get_versions() __version__ = v.get("closest-tag", v["version"]) __git_version__ = v.get("full-revisionid") del get_versions, v # GH 27101 def __getattr__(name): import warnings if name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " "Import from datetime module instead.", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == "np": warnings.warn( "The pandas.np module is deprecated " "and will be removed from pandas in a future version. " "Import numpy directly instead", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {"SparseSeries", "SparseDataFrame"}: warnings.warn( f"The {name} class is removed from pandas. Accessing it from " "the top-level namespace will also be removed in the next version", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == "SparseArray": warnings.warn( "The pandas.SparseArray class is deprecated " "and will be removed from pandas in a future version. " "Use pandas.arrays.SparseArray instead.", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f"module 'pandas' has no attribute '{name}'") # module level doc-string __doc__ = """ pandas - a powerful data analysis and manipulation library for Python ===================================================================== **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. Main Features ------------- Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. - Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large data sets. - Intuitive merging and joining data sets. - Flexible reshaping and pivoting of data sets. - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, date shifting and lagging. """
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/__init__.py
import numpy as np import pandas as pd from pandas import ( Categorical, DataFrame, Index, Series, Timestamp, ) import pandas._testing as tm from pandas.core.arrays import IntervalArray class TestGetNumericData: def test_get_numeric_data_preserve_dtype(self): # get the numeric data obj = DataFrame({"A": [1, "2", 3.0]}) result = obj._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) tm.assert_frame_equal(result, expected) def test_get_numeric_data(self): datetime64name = np.dtype("M8[ns]").name objectname = np.dtype(np.object_).name df = DataFrame( {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")}, index=np.arange(10), ) result = df.dtypes expected = Series( [ np.dtype("float64"), np.dtype("int64"), np.dtype(objectname), np.dtype(datetime64name), ], index=["a", "b", "c", "f"], ) tm.assert_series_equal(result, expected) df = DataFrame( { "a": 1.0, "b": 2, "c": "foo", "d": np.array([1.0] * 10, dtype="float32"), "e": np.array([1] * 10, dtype="int32"), "f": np.array([1] * 10, dtype="int16"), "g": Timestamp("20010102"), }, index=np.arange(10), ) result = df._get_numeric_data() expected = df.loc[:, ["a", "b", "d", "e", "f"]] tm.assert_frame_equal(result, expected) only_obj = df.loc[:, ["c", "g"]] result = only_obj._get_numeric_data() expected = df.loc[:, []] tm.assert_frame_equal(result, expected) df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]}) result = df._get_numeric_data() expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]}) tm.assert_frame_equal(result, expected) df = result.copy() result = df._get_numeric_data() expected = df tm.assert_frame_equal(result, expected) def test_get_numeric_data_mixed_dtype(self): # numeric and object columns df = DataFrame( { "a": [1, 2, 3], "b": [True, False, True], "c": ["foo", "bar", "baz"], "d": [None, None, None], "e": [3.14, 0.577, 2.773], } ) result = df._get_numeric_data() tm.assert_index_equal(result.columns, Index(["a", "b", "e"])) def test_get_numeric_data_extension_dtype(self): # GH#22290 df = DataFrame( { "A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"), "B": Categorical(list("abcabc")), "C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"), "D": IntervalArray.from_breaks(range(7)), } ) result = df._get_numeric_data() expected = df.loc[:, ["A", "C"]] tm.assert_frame_equal(result, expected)
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/tests/frame/methods/test_get_numeric_data.py
import warnings import pytest import pandas as pd import pandas._testing as tm from pandas.tests.extension.base.base import BaseExtensionTests class BaseReduceTests(BaseExtensionTests): """ Reduction specific tests. Generally these only make sense for numeric/boolean operations. """ def check_reduce(self, s, op_name, skipna): result = getattr(s, op_name)(skipna=skipna) expected = getattr(s.astype("float64"), op_name)(skipna=skipna) tm.assert_almost_equal(result, expected) class BaseNoReduceTests(BaseReduceTests): """ we don't define any reductions """ @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) class BaseNumericReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) # min/max with empty produce numpy warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) self.check_reduce(s, op_name, skipna) class BaseBooleanReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) self.check_reduce(s, op_name, skipna)
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/tests/extension/base/reduce.py
from __future__ import annotations from contextlib import suppress from typing import ( TYPE_CHECKING, Any, Hashable, Sequence, ) import warnings import numpy as np from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import ( AbstractMethodError, InvalidIndexError, ) from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, isna, ) import pandas.core.common as com from pandas.core.construction import array as pd_array from pandas.core.indexers import ( check_array_indexer, is_empty_indexer, is_exact_shape_match, is_list_like_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) if TYPE_CHECKING: from pandas import ( DataFrame, Series, ) # "null slice" _NS = slice(None, None) # the public IndexSlicerMaker class _IndexSlice: """ Create an object to more easily perform multi-index slicing. See Also -------- MultiIndex.remove_unused_levels : New MultiIndex with no unused levels. Notes ----- See :ref:`Defined Levels <advanced.shown_levels>` for further info on slicing a MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) >>> columns = ['foo', 'bar'] >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), ... index=midx, columns=columns) Using the default slice command: >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 Using the IndexSlice class for a more intuitive command: >>> idx = pd.IndexSlice >>> dfmi.loc[idx[:, 'B0':'B1'], :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 """ def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingError(Exception): pass class IndexingMixin: """ Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series. """ @property def iloc(self) -> _iLocIndexer: """ Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean array. Allowed inputs are: - An integer, e.g. ``5``. - A list or array of integers, e.g. ``[4, 3, 0]``. - A slice object with ints, e.g. ``1:7``. - A boolean array. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See more at :ref:`Selection by Position <indexing.integer>`. See Also -------- DataFrame.iat : Fast integer location scalar accessor. DataFrame.loc : Purely label-location based indexer for selection by label. Series.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] >>> df = pd.DataFrame(mydict) >>> df a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing just the rows** With a scalar integer. >>> type(df.iloc[0]) <class 'pandas.core.series.Series'> >>> df.iloc[0] a 1 b 2 c 3 d 4 Name: 0, dtype: int64 With a list of integers. >>> df.iloc[[0]] a b c d 0 1 2 3 4 >>> type(df.iloc[[0]]) <class 'pandas.core.frame.DataFrame'> >>> df.iloc[[0, 1]] a b c d 0 1 2 3 4 1 100 200 300 400 With a `slice` object. >>> df.iloc[:3] a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 With a boolean mask the same length as the index. >>> df.iloc[[True, False, True]] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 With a callable, useful in method chains. The `x` passed to the ``lambda`` is the DataFrame being sliced. This selects the rows whose index label even. >>> df.iloc[lambda x: x.index % 2 == 0] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 **Indexing both axes** You can mix the indexer types for the index and columns. Use ``:`` to select the entire axis. With scalar integers. >>> df.iloc[0, 1] 2 With lists of integers. >>> df.iloc[[0, 2], [1, 3]] b d 0 2 4 2 2000 4000 With `slice` objects. >>> df.iloc[1:3, 0:3] a b c 1 100 200 300 2 1000 2000 3000 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 With a callable function that expects the Series or DataFrame. >>> df.iloc[:, lambda df: [0, 2]] a c 0 1 3 1 100 300 2 1000 3000 """ return _iLocIndexer("iloc", self) @property def loc(self) -> _LocIndexer: """ Access a group of rows and columns by label(s) or a boolean array. ``.loc[]`` is primarily label based, but may also be used with a boolean array. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index). - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'``. .. warning:: Note that contrary to usual python slices, **both** the start and the stop are included - A boolean array of the same length as the axis being sliced, e.g. ``[True, False, True]``. - An alignable boolean Series. The index of the key will be aligned before masking. - An alignable Index. The Index of the returned selection will be the input. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above) See more at :ref:`Selection by Label <indexing.label>`. Raises ------ KeyError If any items are not found. IndexingError If an indexed key is passed and its index is unalignable to the frame index. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.iloc : Access group of rows and columns by integer position(s). DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the Series/DataFrame. Series.loc : Access group of values using labels. Examples -------- **Getting values** >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 Single label. Note this returns the row as a Series. >>> df.loc['viper'] max_speed 4 shield 5 Name: viper, dtype: int64 List of labels. Note using ``[[]]`` returns a DataFrame. >>> df.loc[['viper', 'sidewinder']] max_speed shield viper 4 5 sidewinder 7 8 Single label for row and column >>> df.loc['cobra', 'shield'] 2 Slice with labels for row and single label for column. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc['cobra':'viper', 'max_speed'] cobra 1 viper 4 Name: max_speed, dtype: int64 Boolean list with the same length as the row axis >>> df.loc[[False, False, True]] max_speed shield sidewinder 7 8 Alignable boolean Series: >>> df.loc[pd.Series([False, True, False], ... index=['viper', 'sidewinder', 'cobra'])] max_speed shield sidewinder 7 8 Index (same behavior as ``df.reindex``) >>> df.loc[pd.Index(["cobra", "viper"], name="foo")] max_speed shield foo cobra 1 2 viper 4 5 Conditional that returns a boolean Series >>> df.loc[df['shield'] > 6] max_speed shield sidewinder 7 8 Conditional that returns a boolean Series with column labels specified >>> df.loc[df['shield'] > 6, ['max_speed']] max_speed sidewinder 7 Callable that returns a boolean Series >>> df.loc[lambda df: df['shield'] == 8] max_speed shield sidewinder 7 8 **Setting values** Set value for all items matching the list of labels >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50 >>> df max_speed shield cobra 1 2 viper 4 50 sidewinder 7 50 Set value for an entire row >>> df.loc['cobra'] = 10 >>> df max_speed shield cobra 10 10 viper 4 50 sidewinder 7 50 Set value for an entire column >>> df.loc[:, 'max_speed'] = 30 >>> df max_speed shield cobra 30 10 viper 30 50 sidewinder 30 50 Set value for rows matching callable condition >>> df.loc[df['shield'] > 35] = 0 >>> df max_speed shield cobra 30 10 viper 0 0 sidewinder 0 0 **Getting values on a DataFrame with an index that has integer labels** Another example using integers for the index >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=[7, 8, 9], columns=['max_speed', 'shield']) >>> df max_speed shield 7 1 2 8 4 5 9 7 8 Slice with integer labels for rows. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc[7:9] max_speed shield 7 1 2 8 4 5 9 7 8 **Getting values with a MultiIndex** A number of examples using a DataFrame with a MultiIndex >>> tuples = [ ... ('cobra', 'mark i'), ('cobra', 'mark ii'), ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'), ... ('viper', 'mark ii'), ('viper', 'mark iii') ... ] >>> index = pd.MultiIndex.from_tuples(tuples) >>> values = [[12, 2], [0, 4], [10, 20], ... [1, 4], [7, 1], [16, 36]] >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index) >>> df max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Single label. Note this returns a DataFrame with a single index. >>> df.loc['cobra'] max_speed shield mark i 12 2 mark ii 0 4 Single index tuple. Note this returns a Series. >>> df.loc[('cobra', 'mark ii')] max_speed 0 shield 4 Name: (cobra, mark ii), dtype: int64 Single label for row and column. Similar to passing in a tuple, this returns a Series. >>> df.loc['cobra', 'mark i'] max_speed 12 shield 2 Name: (cobra, mark i), dtype: int64 Single tuple. Note using ``[[]]`` returns a DataFrame. >>> df.loc[[('cobra', 'mark ii')]] max_speed shield cobra mark ii 0 4 Single tuple for the index with a single label for the column >>> df.loc[('cobra', 'mark i'), 'shield'] 2 Slice from index tuple to single label >>> df.loc[('cobra', 'mark i'):'viper'] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Slice from index tuple to index tuple >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 """ return _LocIndexer("loc", self) @property def at(self) -> _AtIndexer: """ Access a single value for a row/column label pair. Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ KeyError If 'label' does not exist in DataFrame. See Also -------- DataFrame.iat : Access a single value for a row/column pair by integer position. DataFrame.loc : Access a group of rows and columns by label(s). Series.at : Access a single value using a label. Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... index=[4, 5, 6], columns=['A', 'B', 'C']) >>> df A B C 4 0 2 3 5 0 4 1 6 10 20 30 Get value at specified row/column pair >>> df.at[4, 'B'] 2 Set value at specified row/column pair >>> df.at[4, 'B'] = 10 >>> df.at[4, 'B'] 10 Get value within a Series >>> df.loc[5].at['B'] 4 """ return _AtIndexer("at", self) @property def iat(self) -> _iAtIndexer: """ Access a single value for a row/column pair by integer position. Similar to ``iloc``, in that both provide integer-based lookups. Use ``iat`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ IndexError When integer position is out of bounds. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.loc : Access a group of rows and columns by label(s). DataFrame.iloc : Access a group of rows and columns by integer position(s). Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... columns=['A', 'B', 'C']) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] 1 Set value at specified row/column pair >>> df.iat[1, 2] = 10 >>> df.iat[1, 2] 10 Get value within a series >>> df.loc[0].iat[1] 2 """ return _iAtIndexer("iat", self) class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis = None def __call__(self, axis=None): # we need to return a copy of ourselves new_self = type(self)(self.name, self.obj) if axis is not None: axis = self.obj._get_axis_number(axis) new_self.axis = axis return new_self def _get_setitem_indexer(self, key): """ Convert a potentially-label-based key into a positional indexer. """ if self.name == "loc": self._ensure_listlike_indexer(key) if self.axis is not None: return self._convert_tuple(key, is_setter=True) ax = self.obj._get_axis(0) if isinstance(ax, MultiIndex) and self.name != "iloc": with suppress(TypeError, KeyError, InvalidIndexError): # TypeError e.g. passed a bool return ax.get_loc(key) if isinstance(key, tuple): with suppress(IndexingError): return self._convert_tuple(key, is_setter=True) if isinstance(key, range): return list(key) try: return self._convert_to_indexer(key, axis=0, is_setter=True) except TypeError as e: # invalid indexer type vs 'other' indexing errors if "cannot do" in str(e): raise elif "unhashable type" in str(e): raise raise IndexingError(key) from e def _ensure_listlike_indexer(self, key, axis=None, value=None): """ Ensure that a list-like of column labels are all present by adding them if they do not already exist. Parameters ---------- key : list-like of column labels Target labels. axis : key axis if known """ column_axis = 1 # column only exists in 2-dimensional DataFrame if self.ndim != 2: return if isinstance(key, tuple) and len(key) > 1: # key may be a tuple if we are .loc # if length of key is > 1 set key to column part key = key[column_axis] axis = column_axis if ( axis == column_axis and not isinstance(self.obj.columns, MultiIndex) and is_list_like_indexer(key) and not com.is_bool_indexer(key) and all(is_hashable(k) for k in key) ): # GH#38148 keys = self.obj.columns.union(key, sort=False) self.obj._mgr = self.obj._mgr.reindex_axis( keys, axis=0, consolidate=False, only_slice=True ) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._has_valid_setitem_indexer(key) iloc = self if self.name == "iloc" else self.obj.iloc iloc._setitem_with_indexer(indexer, value, self.name) def _validate_key(self, key, axis: int): """ Ensure that key is valid for current indexer. Parameters ---------- key : scalar, slice or list-like Key requested. axis : int Dimension on which the indexing is being made. Raises ------ TypeError If the key (or some element of it) has wrong type. IndexError If the key (or some element of it) is out of bounds. KeyError If the key was not found. """ raise AbstractMethodError(self) def _has_valid_tuple(self, key: tuple): """ Check the key for valid keys across my indexer. """ self._validate_key_length(key) for i, k in enumerate(key): try: self._validate_key(k, i) except ValueError as err: raise ValueError( "Location based indexing can only have " f"[{self._valid_types}] types" ) from err def _is_nested_tuple_indexer(self, tup: tuple) -> bool: """ Returns ------- bool """ if any(isinstance(ax, MultiIndex) for ax in self.obj.axes): return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) return False def _convert_tuple(self, key, is_setter: bool = False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) for i in range(self.ndim): if i == axis: keyidx.append( self._convert_to_indexer(key, axis=axis, is_setter=is_setter) ) else: keyidx.append(slice(None)) else: self._validate_key_length(key) for i, k in enumerate(key): idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter) keyidx.append(idx) return tuple(keyidx) def _validate_key_length(self, key: Sequence[Any]) -> None: if len(key) > self.ndim: raise IndexingError("Too many indexers") def _getitem_tuple_same_dim(self, tup: tuple): """ Index with indexers that should return an object of the same dimension as self.obj. This is only called after a failed call to _getitem_lowerdim. """ retval = self.obj for i, key in enumerate(tup): if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) # We should never have retval.ndim < self.ndim, as that should # be handled by the _getitem_lowerdim call above. assert retval.ndim == self.ndim return retval def _getitem_lowerdim(self, tup: tuple): # we can directly get the axis result since the axis is specified if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) # we may have a nested tuples indexer here if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) # we maybe be using a tuple to represent multiple dimensions here ax0 = self.obj._get_axis(0) # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) if isinstance(ax0, MultiIndex) and self.name != "iloc": with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) self._validate_key_length(tup) for i, key in enumerate(tup): if is_label_like(key): # We don't need to check for tuples here because those are # caught by the _is_nested_tuple_indexer check above. section = self._getitem_axis(key, axis=i) # We should never have a scalar section here, because # _getitem_lowerdim is only called after a check for # is_scalar_access, which that would be. if section.ndim == self.ndim: # we're in the middle of slicing through a MultiIndex # revise the key wrt to `section` by inserting an _NS new_key = tup[:i] + (_NS,) + tup[i + 1 :] else: # Note: the section.ndim == self.ndim check above # rules out having DataFrame here, so we dont need to worry # about transposing. new_key = tup[:i] + tup[i + 1 :] if len(new_key) == 1: new_key = new_key[0] # Slices should return views, but calling iloc/loc with a null # slice returns a new object. if com.is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc return getattr(section, self.name)[new_key] raise IndexingError("not applicable") def _getitem_nested_tuple(self, tup: tuple): # we have a nested tuple so have at least 1 multi-index level # we should be able to match up the dimensionality here # we have too many indexers for our dim, but have at least 1 # multi-index dimension, try to see if we have something like # a tuple passed to a series with a multi-index if len(tup) > self.ndim: if self.name != "loc": # This should never be reached, but lets be explicit about it raise ValueError("Too many indices") if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in tup ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") if self.ndim == 1 or not any(isinstance(x, slice) for x in tup): # GH#10521 Series should reduce MultiIndex dimensions instead of # DataFrame, IndexingError is not raised when slice(None,None,None) # with one row. with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) # this is a series with a multi-index specified a tuple of # selectors axis = self.axis or 0 return self._getitem_axis(tup, axis=axis) # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj # GH#41369 Loop in reverse order ensures indexing along columns before rows # which selects only necessary blocks which avoids dtype conversion if possible axis = len(tup) - 1 for key in tup[::-1]: if com.is_null_slice(key): axis -= 1 continue obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis -= 1 # if we have a scalar, we are done if is_scalar(obj) or not hasattr(obj, "ndim"): break return obj def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if type(key) is tuple: key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) if self._is_scalar_access(key): with suppress(KeyError, IndexError, AttributeError): # AttributeError for IntervalTree get_value return self.obj._get_value(*key, takeable=self._takeable) return self._getitem_tuple(key) else: # we by definition only have the 0th axis axis = self.axis or 0 maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key: tuple): raise NotImplementedError() def _getitem_tuple(self, tup: tuple): raise AbstractMethodError(self) def _getitem_axis(self, key, axis: int): raise NotImplementedError() def _has_valid_setitem_indexer(self, indexer) -> bool: raise AbstractMethodError(self) def _getbool_axis(self, key, axis: int): # caller is responsible for ensuring non-None axis labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds = key.nonzero()[0] return self.obj._take_with_is_copy(inds, axis=axis) @doc(IndexingMixin.loc) class _LocIndexer(_LocationIndexer): _takeable: bool = False _valid_types = ( "labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean" ) # ------------------------------------------------------------------- # Key Checks @doc(_LocationIndexer._validate_key) def _validate_key(self, key, axis: int): # valid for a collection of labels (we check their presence later) # slice of labels (where start-end in labels) # slice of integers (only if in the labels) # boolean not in slice and with boolean index if isinstance(key, bool) and not is_bool_dtype(self.obj.index): raise KeyError( f"{key}: boolean label can not be used without a boolean index" ) if isinstance(key, slice) and ( isinstance(key.start, bool) or isinstance(key.stop, bool) ): raise TypeError(f"{key}: boolean values can not be used in a slice") def _has_valid_setitem_indexer(self, indexer) -> bool: return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False for i, k in enumerate(key): if not is_scalar(k): return False ax = self.obj.axes[i] if isinstance(ax, MultiIndex): return False if isinstance(k, str) and ax._supports_partial_string_indexing: # partial string indexing, df.loc['2000', 'A'] # should not be considered scalar return False if not ax.is_unique: return False return True # ------------------------------------------------------------------- # MultiIndex Handling def _multi_take_opportunity(self, tup: tuple) -> bool: """ Check whether there is the possibility to use ``_multi_take``. Currently the limit is that all axes being indexed, must be indexed with list-likes. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- bool Whether the current indexing, can be passed through `_multi_take`. """ if not all(is_list_like_indexer(x) for x in tup): return False # just too complicated return not any(com.is_bool_indexer(x) for x in tup) def _multi_take(self, tup: tuple): """ Create the indexers for the passed tuple of keys, and executes the take operation. This allows the take operation to be executed all at once, rather than once for each dimension. Improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- values: same type as the object being indexed """ # GH 836 d = { axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, self.obj._AXIS_ORDERS) } return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True) # ------------------------------------------------------------------- def _getitem_iterable(self, key, axis: int): """ Index current object with an iterable collection of keys. Parameters ---------- key : iterable Targeted labels. axis : int Dimension on which the indexing is being made. Raises ------ KeyError If no key was found. Will change in the future to raise if not all keys were found. Returns ------- scalar, DataFrame, or Series: indexed value(s). """ # we assume that not com.is_bool_indexer(key), as that is # handled before we get here. self._validate_key(key, axis) # A collection of keys keyarr, indexer = self._get_listlike_indexer(key, axis) return self.obj._reindex_with_indexers( {axis: [keyarr, indexer]}, copy=True, allow_dups=True ) def _getitem_tuple(self, tup: tuple): with suppress(IndexingError): return self._getitem_lowerdim(tup) # no multi-index, so validate all of the indexers self._has_valid_tuple(tup) # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) return self._getitem_tuple_same_dim(tup) def _get_label(self, label, axis: int): # GH#5667 this will fail if the label is not present in the axis. return self.obj.xs(label, axis=axis) def _handle_lowerdim_multi_index_axis0(self, tup: tuple): # we have an axis0 multi-index, handle or raise axis = self.axis or 0 try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=axis) except (TypeError, InvalidIndexError): # slices are unhashable pass except KeyError as ek: # raise KeyError if number of indexers match # else IndexingError will be raised if self.ndim < len(tup) <= self.obj.index.nlevels: raise ek raise IndexingError("No label returned") def _getitem_axis(self, key, axis: int): key = item_from_zerodim(key) if is_iterator(key): key = list(key) labels = self.obj._get_axis(axis) key = labels._get_partial_string_timestamp_match_key(key) if isinstance(key, slice): self._validate_key(key, axis) return self._get_slice_axis(key, axis=axis) elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): # an iterable multi-selection if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, "ndim") and key.ndim > 1: raise ValueError("Cannot index with multidimensional key") return self._getitem_iterable(key, axis=axis) # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] # fall thru to straight lookup self._validate_key(key, axis) return self._get_label(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): """ This is pretty simple as we just have to deal with labels. """ # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self.obj._slice(indexer, axis=axis) else: # DatetimeIndex overrides Index.slice_indexer and may # return a DatetimeIndex instead of a slice object. return self.obj.take(indexer, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Convert indexing key into something we can use to do actual fancy indexing on a ndarray. Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) if isinstance(key, slice): return labels._convert_slice_indexer(key, kind="loc") # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(key) and not is_int_index if is_scalar(key) or isinstance(labels, MultiIndex): # Otherwise get_loc will raise InvalidIndexError # if we are a label return me try: return labels.get_loc(key) except LookupError: if isinstance(key, tuple) and isinstance(labels, MultiIndex): if len(key) == labels.nlevels: return {"key": key} raise except InvalidIndexError: # GH35015, using datetime as column indices raises exception if not isinstance(labels, MultiIndex): raise except TypeError: pass except ValueError: if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition # always valid return {"key": key} if is_nested_tuple(key, labels): if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in key ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") return labels.get_locs(key) elif is_list_like_indexer(key): if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) (inds,) = key.nonzero() return inds else: return self._get_listlike_indexer(key, axis)[1] else: try: return labels.get_loc(key) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(key): return {"key": key} raise def _get_listlike_indexer(self, key, axis: int): """ Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found. """ ax = self.obj._get_axis(axis) # Have the index compute an indexer or return None # if it cannot handle: indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): # _validate_read_indexer is a no-op if no -1s, so skip return ax[indexer], indexer if ax._index_as_unique: indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) self._validate_read_indexer(keyarr, indexer, axis) return keyarr, indexer def _validate_read_indexer(self, key, indexer, axis: int): """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis : int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values: missing_mask = indexer < 0 missing = (missing_mask).sum() if missing: if missing == len(indexer): axis_name = self.obj._get_axis_name(axis) raise KeyError(f"None of [{key}] are in the [{axis_name}]") ax = self.obj._get_axis(axis) not_found = list(set(key) - set(ax)) raise KeyError(f"{not_found} not in index") @doc(IndexingMixin.iloc) class _iLocIndexer(_LocationIndexer): _valid_types = ( "integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array" ) _takeable = True # ------------------------------------------------------------------- # Key Checks def _validate_key(self, key, axis: int): if com.is_bool_indexer(key): if hasattr(key, "index") and isinstance(key.index, Index): if key.index.inferred_type == "integer": raise NotImplementedError( "iLocation based boolean " "indexing on an integer type " "is not available" ) raise ValueError( "iLocation based boolean indexing cannot use " "an indexable as a mask" ) return if isinstance(key, slice): return elif is_integer(key): self._validate_integer(key, axis) elif isinstance(key, tuple): # a tuple should already have been caught by this point # so don't treat a tuple as a valid indexer raise IndexingError("Too many indexers") elif is_list_like_indexer(key): arr = np.array(key) len_axis = len(self.obj._get_axis(axis)) # check that the key has a numeric dtype if not is_numeric_dtype(arr.dtype): raise IndexError(f".iloc requires numeric indexers, got {arr}") # check that the key does not exceed the maximum size of the index if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis): raise IndexError("positional indexers are out-of-bounds") else: raise ValueError(f"Can only index by location with a [{self._valid_types}]") def _has_valid_setitem_indexer(self, indexer) -> bool: """ Validate that a positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally. Returns ------- bool """ if isinstance(indexer, dict): raise IndexError("iloc cannot enlarge its target object") if isinstance(indexer, ABCDataFrame): warnings.warn( "DataFrame indexer for .iloc is deprecated and will be removed in" "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, stacklevel=3, ) if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("iloc cannot enlarge its target object") elif isinstance(i, dict): raise IndexError("iloc cannot enlarge its target object") return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False return all(is_integer(k) for k in key) def _validate_integer(self, key: int, axis: int) -> None: """ Check that 'key' is a valid position in the desired axis. Parameters ---------- key : int Requested position. axis : int Desired axis. Raises ------ IndexError If 'key' is not a valid position in axis 'axis'. """ len_axis = len(self.obj._get_axis(axis)) if key >= len_axis or key < -len_axis: raise IndexError("single positional indexer is out-of-bounds") # ------------------------------------------------------------------- def _getitem_tuple(self, tup: tuple): self._has_valid_tuple(tup) with suppress(IndexingError): return self._getitem_lowerdim(tup) return self._getitem_tuple_same_dim(tup) def _get_list_axis(self, key, axis: int): """ Return Series values by list or array of integers. Parameters ---------- key : list-like positional indexer axis : int Returns ------- Series object Notes ----- `axis` can only be zero. """ try: return self.obj._take_with_is_copy(key, axis=axis) except IndexError as err: # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): if isinstance(key, ABCDataFrame): raise IndexError( "DataFrame indexer is not allowed for .iloc\n" "Consider using .loc for automatic alignment." ) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) if is_iterator(key): key = list(key) if isinstance(key, list): key = np.asarray(key) if com.is_bool_indexer(key): self._validate_key(key, axis) return self._getbool_axis(key, axis=axis) # a list of integers elif is_list_like_indexer(key): return self._get_list_axis(key, axis=axis) # a single integer else: key = item_from_zerodim(key) if not is_integer(key): raise TypeError("Cannot index by location index with a non-integer key") # validate the location self._validate_integer(key, axis) return self.obj._ixs(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) labels._validate_positional_slice(slice_obj) return self.obj._slice(slice_obj, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Much simpler as we only have to deal with our valid types. """ return key def _get_setitem_indexer(self, key): # GH#32257 Fall through to let numpy do validation if is_iterator(key): return list(key) return key # ------------------------------------------------------------------- def _setitem_with_indexer(self, indexer, value, name="iloc"): """ _setitem_with_indexer is for setting values on a Series/DataFrame using positional indexers. If the relevant keys are not present, the Series/DataFrame may be expanded. This method is currently broken when dealing with non-unique Indexes, since it goes from positional indexers back to labels when calling BlockManager methods, see GH#12991, GH#22046, GH#15686. """ info_axis = self.obj._info_axis_number # maybe partial set take_split_path = not self.obj._mgr.is_single_block # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value if ( not take_split_path and getattr(self.obj._mgr, "blocks", False) and self.ndim > 1 ): # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value blk = self.obj._mgr.blocks[0] take_split_path = not blk._can_hold_element(val) # if we have any multi-indexes that have non-trivial slices # (not null slices) then we must take the split path, xref # GH 10360, GH 27841 if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for i, ax in zip(indexer, self.obj.axes): if isinstance(ax, MultiIndex) and not ( is_integer(i) or com.is_null_slice(i) ): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): if isinstance(idx, dict): # reindex the axis to the new value # and set inplace key, _ = convert_missing_indexer(idx) # if this is the items axes, then take the main missing # path first # this correctly sets the dtype and avoids cache issues # essentially this separates out the block that is needed # to possibly be modified if self.ndim > 1 and i == info_axis: # add the new item, and set the value # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like if not len(self.obj): if not is_list_like_indexer(value): raise ValueError( "cannot set a frame with no " "defined index and a scalar" ) self.obj[key] = value return # add a new item with the dtype setup if com.is_null_slice(indexer[0]): # We are setting an entire column self.obj[key] = value else: self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes ) self._setitem_with_indexer(new_indexer, value, name) return # reindex the axis # make sure to clear the cache because we are # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) # We are expanding the Series/DataFrame values to match # the length of thenew index `labels`. GH#40096 ensure # this is valid even if the index has duplicates. taker = np.arange(len(index) + 1, dtype=np.intp) taker[-1] = -1 reindexers = {i: (labels, taker)} new_obj = self.obj._reindex_with_indexers( reindexers, allow_dups=True ) self.obj._mgr = new_obj._mgr self.obj._maybe_update_cacher(clear=True) self.obj._is_copy = None nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: indexer, missing = convert_missing_indexer(indexer) if missing: self._setitem_with_indexer_missing(indexer, value) return # align and set the values if take_split_path: # We have to operate column-wise self._setitem_with_indexer_split_path(indexer, value, name) else: self._setitem_single_block(indexer, value, name) def _setitem_with_indexer_split_path(self, indexer, value, name: str): """ Setitem column-wise. """ # Above we only set take_split_path to True for 2D cases assert self.ndim == 2 if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) if len(indexer) > self.ndim: raise IndexError("too many indices for array") if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: raise ValueError(r"Cannot set values with ndim > 2") if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): from pandas import Series value = self._align_series(indexer, Series(value)) # Ensure we have something we can iterate over info_axis = indexer[1] ilocs = self._ensure_iterable_column_indexer(info_axis) pi = indexer[0] lplane_indexer = length_of_indexer(pi, self.obj.index) # lplane_indexer gives the expected length of obj[indexer[0]] # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value, name) elif np.ndim(value) == 2: self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): # We are setting multiple rows in a single column. self._setitem_single_column(ilocs[0], value, pi) elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): # We are trying to set N values into M entries of a single # column, which is invalid for N != M # Exclude zero-len for e.g. boolean masking that is all-false if len(value) == 1 and not is_integer(info_axis): # This is a case like df.iloc[:3, [1]] = [0] # where we treat as df.iloc[:3, 1] = 0 return self._setitem_with_indexer((pi, info_axis[0]), value[0]) raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) elif lplane_indexer == 0 and len(value) == len(self.obj.index): # We get here in one case via .loc with a all-False mask pass elif len(ilocs) == len(value): # We are setting multiple columns in a single row. for loc, v in zip(ilocs, value): self._setitem_single_column(loc, v, pi) elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0: # This is a setitem-with-expansion, see # test_loc_setitem_empty_append_expands_rows_mixed_dtype # e.g. df = DataFrame(columns=["x", "y"]) # df["x"] = df["x"].astype(np.int64) # df.loc[:, "x"] = [1, 2, 3] self._setitem_single_column(ilocs[0], value, pi) else: raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) else: # scalar value for loc in ilocs: self._setitem_single_column(loc, value, pi) def _setitem_with_indexer_2d_value(self, indexer, value): # We get here with np.ndim(value) == 2, excluding DataFrame, # which goes through _setitem_with_indexer_frame_value pi = indexer[0] ilocs = self._ensure_iterable_column_indexer(indexer[1]) # GH#7551 Note that this coerces the dtype if we are mixed value = np.array(value, dtype=object) if len(ilocs) != value.shape[1]: raise ValueError( "Must have equal len keys and value when setting with an ndarray" ) for i, loc in enumerate(ilocs): # setting with a list, re-coerces self._setitem_single_column(loc, value[:, i].tolist(), pi) def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str): ilocs = self._ensure_iterable_column_indexer(indexer[1]) sub_indexer = list(indexer) pi = indexer[0] multiindex_indexer = isinstance(self.obj.columns, MultiIndex) unique_cols = value.columns.is_unique # We do not want to align the value in case of iloc GH#37728 if name == "iloc": for i, loc in enumerate(ilocs): val = value.iloc[:, i] self._setitem_single_column(loc, val, pi) elif not unique_cols and value.columns.equals(self.obj.columns): # We assume we are already aligned, see # test_iloc_setitem_frame_duplicate_columns_multiple_blocks for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value.iloc[:, loc], multiindex_indexer, ) else: val = np.nan self._setitem_single_column(loc, val, pi) elif not unique_cols: raise ValueError("Setting with non-unique columns is not allowed.") else: for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value[item], multiindex_indexer ) else: val = np.nan self._setitem_single_column(loc, val, pi) def _setitem_single_column(self, loc: int, value, plane_indexer): """ Parameters ---------- loc : int Indexer for column position plane_indexer : int, slice, listlike[int] The indexer we use for setitem along axis=0. """ pi = plane_indexer ser = self.obj._ixs(loc, axis=1) # perform the equivalent of a setitem on the info axis # as we have a null slice or a slice with full bounds # which means essentially reassign to the columns of a # multi-dim object # GH#6149 (null slice), GH#10408 (full bounds) if com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)): ser = value elif ( is_array_like(value) and is_exact_shape_match(ser, value) and not is_empty_indexer(pi, value) ): if is_list_like(pi): ser = value[np.argsort(pi)] else: # in case of slice ser = value[pi] else: # set the item, possibly having a dtype change ser = ser.copy() ser._mgr = ser._mgr.setitem(indexer=(pi,), value=value) ser._maybe_update_cacher(clear=True) # reset the sliced object if unique self.obj._iset_item(loc, ser) def _setitem_single_block(self, indexer, value, name: str): """ _setitem_with_indexer for the case when we have a single Block. """ from pandas import Series info_axis = self.obj._info_axis_number item_labels = self.obj._get_axis(info_axis) if isinstance(indexer, tuple): # if we are setting on the info axis ONLY # set using those methods to avoid block-splitting # logic here if ( len(indexer) > info_axis and is_integer(indexer[info_axis]) and all( com.is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis ) ): selected_item_labels = item_labels[indexer[info_axis]] if len(item_labels.get_indexer_for([selected_item_labels])) == 1: self.obj[selected_item_labels] = value return indexer = maybe_convert_ix(*indexer) if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): # TODO(EA): ExtensionBlock.setitem this causes issues with # setting for extensionarrays that store dicts. Need to decide # if it's worth supporting that. value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame) and name != "iloc": value = self._align_frame(indexer, value) # check for chained assignment self.obj._check_is_chained_assignment_possible() # actually do the set self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) def _setitem_with_indexer_missing(self, indexer, value): """ Insert new row(s) or column(s) into the Series or DataFrame. """ from pandas import Series # reindex the axis to the new value # and set inplace if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index), indexer) # we have a coerced indexer, e.g. a float # that matches in an Int64Index, so # we will not create a duplicate index, rather # index to that element # e.g. 0.0 -> 0 # GH#12246 if index.is_unique: new_indexer = index.get_indexer([new_index[-1]]) if (new_indexer != -1).any(): # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): # GH#22717 handle casting compatibility that np.concatenate # does incorrectly new_values = concat_compat([self.obj._values, new_values]) self.obj._mgr = self.obj._constructor( new_values, index=new_index, name=self.obj.name )._mgr self.obj._maybe_update_cacher(clear=True) elif self.ndim == 2: if not len(self.obj.columns): # no columns and scalar raise ValueError("cannot set a frame with no defined columns") if isinstance(value, ABCSeries): # append a Series value = value.reindex(index=self.obj.columns, copy=True) value.name = indexer elif isinstance(value, dict): value = Series( value, index=self.obj.columns, name=indexer, dtype=object ) else: # a list-list if is_list_like_indexer(value): # must have conforming columns if len(value) != len(self.obj.columns): raise ValueError("cannot set a row with mismatched columns") value = Series(value, index=self.obj.columns, name=indexer) self.obj._mgr = self.obj.append(value)._mgr self.obj._maybe_update_cacher(clear=True) def _ensure_iterable_column_indexer(self, column_indexer): """ Ensure that our column indexer is something that can be iterated over. """ if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False): """ Parameters ---------- indexer : tuple, slice, scalar Indexer used to get the locations that will be set to `ser`. ser : pd.Series Values to assign to the locations specified by `indexer`. multiindex_indexer : bool, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. Returns ------- `np.array` of `ser` broadcast to the appropriate shape for assignment to the locations selected by `indexer` """ if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = (indexer,) if isinstance(indexer, tuple): # flatten np.ndarray indexers def ravel(i): return i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.ndim == 2 obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? # hence need to align to a single axis dimension # rather that find all valid dims # frame if is_frame: single_aligner = single_aligner and aligners[0] # we have a frame, with multiple indexers on both axes; and a # series, so need to broadcast (see GH5206) if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer): ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: len_indexer = len(indexer[1]) ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T return ser for i, idx in enumerate(indexer): ax = obj.axes[i] # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if ser.index.equals(new_ix) or not len(new_ix): return ser._values.copy() return ser.reindex(new_ix)._values # 2 dims elif single_aligner: # reindex along index ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values elif is_integer(indexer) and self.ndim == 1: if is_object_dtype(self.obj): return ser ax = self.obj._get_axis(0) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values[indexer] elif is_integer(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError("Incompatible indexer with Series") def _align_frame(self, indexer, df: DataFrame): is_frame = self.ndim == 2 if isinstance(indexer, tuple): idx, cols = None, None sindexers = [] for i, ix in enumerate(indexer): ax = self.obj.axes[i] if is_sequence(ix) or isinstance(ix, slice): if isinstance(ix, np.ndarray): ix = ix.ravel() if idx is None: idx = ax[ix] elif cols is None: cols = ax[ix] else: break else: sindexers.append(i) if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy()._values else: val = df.reindex(idx, columns=cols)._values return val elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame: ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy()._values else: # we have a multi-index and are trying to align # with a particular, level GH3738 if ( isinstance(ax, MultiIndex) and isinstance(df.index, MultiIndex) and ax.nlevels != df.index.nlevels ): raise TypeError( "cannot align on a multi-index with out " "specifying the join levels" ) val = df.reindex(index=ax)._values return val raise ValueError("Incompatible indexer with DataFrame") class _ScalarAccessIndexer(NDFrameIndexerBase): """ Access scalars quickly. """ def _convert_key(self, key, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if not isinstance(key, tuple): # we could have a convertible item here (e.g. Timestamp) if not is_list_like_indexer(key): key = (key,) else: raise ValueError("Invalid call for scalar access (getting)!") key = self._convert_key(key) return self.obj._get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = _tuplify(self.ndim, key) key = list(self._convert_key(key, is_setter=True)) if len(key) != self.ndim: raise ValueError("Not enough indexers for scalar access (setting)!") self.obj._set_value(*key, value=value, takeable=self._takeable) @doc(IndexingMixin.at) class _AtIndexer(_ScalarAccessIndexer): _takeable = False def _convert_key(self, key, is_setter: bool = False): """ Require they keys to be the same type as the index. (so we don't fallback) """ # GH 26989 # For series, unpacking key needs to result in the label. # This is already the case for len(key) == 1; e.g. (1,) if self.ndim == 1 and len(key) > 1: key = (key,) # allow arbitrary setting if is_setter: return list(key) return key @property def _axes_are_unique(self) -> bool: # Only relevant for self.ndim == 2 assert self.ndim == 2 return self.obj.index.is_unique and self.obj.columns.is_unique def __getitem__(self, key): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (getting)!") return self.obj.loc[key] return super().__getitem__(key) def __setitem__(self, key, value): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (setting)!") self.obj.loc[key] = value return return super().__setitem__(key, value) @doc(IndexingMixin.iat) class _iAtIndexer(_ScalarAccessIndexer): _takeable = True def _convert_key(self, key, is_setter: bool = False): """ Require integer args. (and convert to label arguments) """ for i in key: if not is_integer(i): raise ValueError("iAt based indexing can only have integer indexers") return key def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: """ Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple """ _tup: list[Hashable | slice] _tup = [slice(None, None) for _ in range(ndim)] _tup[0] = loc return tuple(_tup) def convert_to_index_sliceable(obj: DataFrame, key): """ If we are index sliceable, then return my slicer, otherwise return None. """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind="getitem") elif isinstance(key, str): # we are an actual column if key in obj.columns: return None # We might have a datetimelike string that we can translate to a # slice here via partial string indexing if idx._supports_partial_string_indexing: try: res = idx._get_string_slice(str(key)) warnings.warn( "Indexing a DataFrame with a datetimelike index using a single " "string to slice the rows, like `frame[string]`, is deprecated " "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, stacklevel=3, ) return res except (KeyError, ValueError, NotImplementedError): return None return None def check_bool_indexer(index: Index, key) -> np.ndarray: """ Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index. """ result = key if isinstance(key, ABCSeries) and not key.index.equals(index): result = result.reindex(index) mask = isna(result._values) if mask.any(): raise IndexingError( "Unalignable boolean Series provided as " "indexer (index of the boolean Series and of " "the indexed object do not match)." ) return result.astype(bool)._values if is_object_dtype(key): # key might be object-dtype bool, check_array_indexer needs bool array result = np.asarray(result, dtype=bool) elif not is_array_like(result): # GH 33924 # key may contain nan elements, check_array_indexer needs bool array result = pd_array(result, dtype=bool) return check_array_indexer(index, result) def convert_missing_indexer(indexer): """ Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer["key"] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False def convert_from_missing_indexer_tuple(indexer, axes): """ Create a filtered indexer that doesn't have any missing indexers. """ def get_indexer(_i, _idx): return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)) def maybe_convert_ix(*args): """ We likely want to take the cross-product. """ for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): return args return np.ix_(*args) def is_nested_tuple(tup, labels) -> bool: """ Returns ------- bool """ # check for a compatible nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False for k in tup: if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False def is_label_like(key) -> bool: """ Returns ------- bool """ # select a label or row return not isinstance(key, slice) and not is_list_like_indexer(key) def need_slice(obj: slice) -> bool: """ Returns ------- bool """ return ( obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1) )
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/core/indexing.py
from pandas import ( TimedeltaIndex, timedelta_range, ) import pandas._testing as tm class TestTimedeltaIndexDelete: def test_delete(self): idx = timedelta_range(start="1 Days", periods=5, freq="D", name="idx") # preserve freq expected_0 = timedelta_range(start="2 Days", periods=4, freq="D", name="idx") expected_4 = timedelta_range(start="1 Days", periods=4, freq="D", name="idx") # reset freq to None expected_1 = TimedeltaIndex( ["1 day", "3 day", "4 day", "5 day"], freq=None, name="idx" ) cases = { 0: expected_0, -5: expected_0, -1: expected_4, 4: expected_4, 1: expected_1, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq with tm.external_error_raised((IndexError, ValueError)): # either depending on numpy version idx.delete(5) def test_delete_slice(self): idx = timedelta_range(start="1 days", periods=10, freq="D", name="idx") # preserve freq expected_0_2 = timedelta_range(start="4 days", periods=7, freq="D", name="idx") expected_7_9 = timedelta_range(start="1 days", periods=7, freq="D", name="idx") # reset freq to None expected_3_5 = TimedeltaIndex( ["1 d", "2 d", "3 d", "7 d", "8 d", "9 d", "10d"], freq=None, name="idx" ) cases = { (0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq result = idx.delete(slice(n[0], n[-1] + 1)) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq def test_delete_doesnt_infer_freq(self): # GH#30655 behavior matches DatetimeIndex tdi = TimedeltaIndex(["1 Day", "2 Days", None, "3 Days", "4 Days"]) result = tdi.delete(2) assert result.freq is None
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/tests/indexes/timedeltas/test_delete.py
from __future__ import annotations from contextlib import contextmanager import re from typing import ( Sequence, Type, cast, ) import warnings @contextmanager def assert_produces_warning( expected_warning: type[Warning] | bool | None = Warning, filter_level="always", check_stacklevel: bool = True, raise_on_extra_warnings: bool = True, match: str | None = None, ): """ Context manager for running code expected to either raise a specific warning, or not raise any warnings. Verifies that the code raises the expected warning, and that it does not raise any other unexpected warnings. It is basically a wrapper around ``warnings.catch_warnings``. Parameters ---------- expected_warning : {Warning, False, None}, default Warning The type of Exception raised. ``exception.Warning`` is the base class for all warnings. To check that no warning is returned, specify ``False`` or ``None``. filter_level : str or None, default "always" Specifies whether warnings are ignored, displayed, or turned into errors. Valid values are: * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated from each location * "module" - print the warning the first time it is generated from each module * "once" - print the warning the first time it is generated check_stacklevel : bool, default True If True, displays the line that called the function containing the warning to show were the function is called. Otherwise, the line that implements the function is displayed. raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. match : str, optional Match warning message. Examples -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning()) ... >>> with assert_produces_warning(False): ... warnings.warn(RuntimeWarning()) ... Traceback (most recent call last): ... AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. >>> with assert_produces_warning(UserWarning): ... warnings.warn(RuntimeWarning()) Traceback (most recent call last): ... AssertionError: Did not see expected warning of class 'UserWarning'. ..warn:: This is *not* thread-safe. """ __tracebackhide__ = True with warnings.catch_warnings(record=True) as w: warnings.simplefilter(filter_level) yield w if expected_warning: expected_warning = cast(Type[Warning], expected_warning) _assert_caught_expected_warning( caught_warnings=w, expected_warning=expected_warning, match=match, check_stacklevel=check_stacklevel, ) if raise_on_extra_warnings: _assert_caught_no_extra_warnings( caught_warnings=w, expected_warning=expected_warning, ) def _assert_caught_expected_warning( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning], match: str | None, check_stacklevel: bool, ) -> None: """Assert that there was the expected warning among the caught warnings.""" saw_warning = False matched_message = False for actual_warning in caught_warnings: if issubclass(actual_warning.category, expected_warning): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): _assert_raised_with_correct_stacklevel(actual_warning) if match is not None and re.search(match, str(actual_warning.message)): matched_message = True if not saw_warning: raise AssertionError( f"Did not see expected warning of class " f"{repr(expected_warning.__name__)}" ) if match and not matched_message: raise AssertionError( f"Did not see warning {repr(expected_warning.__name__)} " f"matching {match}" ) def _assert_caught_no_extra_warnings( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning] | bool | None, ) -> None: """Assert that no extra warnings apart from the expected ones are caught.""" extra_warnings = [] for actual_warning in caught_warnings: if _is_unexpected_warning(actual_warning, expected_warning): unclosed = "unclosed transport <asyncio.sslproto._SSLProtocolTransport" if actual_warning.category == ResourceWarning and unclosed in str( actual_warning.message ): # FIXME: kludge because pytest.filterwarnings does not # suppress these, xref GH#38630 continue extra_warnings.append( ( actual_warning.category.__name__, actual_warning.message, actual_warning.filename, actual_warning.lineno, ) ) if extra_warnings: raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}") def _is_unexpected_warning( actual_warning: warnings.WarningMessage, expected_warning: type[Warning] | bool | None, ) -> bool: """Check if the actual warning issued is unexpected.""" if actual_warning and not expected_warning: return True expected_warning = cast(Type[Warning], expected_warning) return bool(not issubclass(actual_warning.category, expected_warning)) def _assert_raised_with_correct_stacklevel( actual_warning: warnings.WarningMessage, ) -> None: from inspect import ( getframeinfo, stack, ) caller = getframeinfo(stack()[4][0]) msg = ( "Warning not set with correct stacklevel. " f"File where warning is raised: {actual_warning.filename} != " f"{caller.filename}. Warning message: {actual_warning.message}" ) assert actual_warning.filename == caller.filename, msg
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/_testing/_warnings.py
from typing import Optional import numpy as np from pandas._libs import lib from pandas.core.dtypes.cast import maybe_downcast_numeric from pandas.core.dtypes.common import ( ensure_object, is_datetime_or_timedelta_dtype, is_decimal, is_integer_dtype, is_number, is_numeric_dtype, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) import pandas as pd from pandas.core.arrays.numeric import NumericArray def to_numeric(arg, errors="raise", downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series Argument to be converted. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaN. - If 'ignore', then invalid parsing will return the input. downcast : {'integer', 'signed', 'unsigned', 'float'}, default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. Returns ------- ret Numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. DataFrame.convert_dtypes : Convert dtypes. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 Downcasting of nullable integer and floating dtypes is supported: >>> s = pd.Series([1, 2, 3], dtype="Int64") >>> pd.to_numeric(s, downcast="integer") 0 1 1 2 2 3 dtype: Int8 >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") >>> pd.to_numeric(s, downcast="float") 0 1.0 1 2.1 2 3.0 dtype: Float32 """ if downcast not in (None, "integer", "signed", "unsigned", "float"): raise ValueError("invalid downcasting method provided") if errors not in ("ignore", "raise", "coerce"): raise ValueError("invalid error value specified") is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndex): is_index = True if needs_i8_conversion(arg.dtype): values = arg.asi8 else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype="O") elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype="O") elif getattr(arg, "ndim", 1) > 1: raise TypeError("arg must be a list, tuple, 1-d array, or Series") else: values = arg # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: Optional[np.ndarray] = None if isinstance(values, NumericArray): mask = values._mask values = values._data[~mask] values_dtype = getattr(values, "dtype", None) if is_numeric_dtype(values_dtype): pass elif is_datetime_or_timedelta_dtype(values_dtype): values = values.view(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: values, _ = lib.maybe_convert_numeric( values, set(), coerce_numeric=coerce_numeric ) except (ValueError, TypeError): if errors == "raise": raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values.dtype): typecodes = None if downcast in ("integer", "signed"): typecodes = np.typecodes["Integer"] elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): typecodes = np.typecodes["UnsignedInteger"] elif downcast == "float": typecodes = np.typecodes["Float"] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: dtype = np.dtype(dtype) if dtype.itemsize <= values.dtype.itemsize: values = maybe_downcast_numeric(values, dtype) # successful conversion if values.dtype == dtype: break # GH33013: for IntegerArray & FloatingArray need to reconstruct masked array if mask is not None: data = np.zeros(mask.shape, dtype=values.dtype) data[~mask] = values from pandas.core.arrays import ( FloatingArray, IntegerArray, ) klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray values = klass(data, mask.copy()) if is_series: return arg._constructor(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
import pytest from pandas.compat._optional import VERSIONS import pandas as pd from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr from pandas.util.version import Version def test_compat(): # test we have compat with our version of nu from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if Version(ver) < Version(VERSIONS["numexpr"]): assert not NUMEXPR_INSTALLED else: assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") @pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval("a + b", engine=engine, parser=parser) assert res == 3 if engine == "numexpr": try: import numexpr as ne # noqa F401 except ImportError: pytest.skip("no numexpr") else: testit() else: testit()
datapythonista/pandas
pandas/tests/computation/test_compat.py
pandas/core/tools/numeric.py
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import with_statement from decimal import Decimal from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ from parler.models import TranslatableModel, TranslatedFields from shuup.core.fields import InternalIdentifierField from shuup.utils.numbers import bankers_round, parse_decimal_string __all__ = ("SalesUnit",) @python_2_unicode_compatible class SalesUnit(TranslatableModel): identifier = InternalIdentifierField(unique=True) decimals = models.PositiveSmallIntegerField(default=0, verbose_name=_(u"allowed decimals")) translations = TranslatedFields( name=models.CharField(max_length=128, verbose_name=_('name')), short_name=models.CharField(max_length=128, verbose_name=_('short name')), ) class Meta: verbose_name = _('sales unit') verbose_name_plural = _('sales units') def __str__(self): return self.safe_translation_getter("name", default=None) @property def allow_fractions(self): return self.decimals > 0 @cached_property def quantity_step(self): """ Get the quantity increment for the amount of decimals this unit allows. For 0 decimals, this will be 1; for 1 decimal, 0.1; etc. :return: Decimal in (0..1] :rtype: Decimal """ # This particular syntax (`10 ^ -n`) is the same that `bankers_round` uses # to figure out the quantizer. return Decimal(10) ** (-int(self.decimals)) def round(self, value): return bankers_round(parse_decimal_string(value), self.decimals)
# This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import datetime import decimal import json from decimal import Decimal from django.utils.timezone import now from django.utils.translation import activate import pytest from shuup.admin.modules.orders.views.edit import OrderEditView from shuup.campaigns.models.campaigns import CatalogCampaign from shuup.campaigns.models.catalog_filters import ( CategoryFilter, ProductFilter, ProductTypeFilter ) from shuup.campaigns.models.context_conditions import ContactGroupCondition from shuup.campaigns.models.product_effects import ( ProductDiscountAmount, ProductDiscountPercentage ) from shuup.core.models import ( Category, ProductType, Shop, ShopProduct, ShopStatus ) from shuup.testing.factories import ( create_product, get_default_customer_group, get_default_shop ) from shuup.testing.utils import apply_request_middleware from shuup_tests.campaigns import initialize_test @pytest.mark.django_db def test_campaign_creation(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") condition = ContactGroupCondition.objects.create() condition.contact_groups = request.customer.groups.all() condition.save() assert condition.values.first() == request.customer.groups.first() condition.values = request.customer.groups.all() condition.save() assert condition.values.first() == request.customer.groups.first() category_filter = CategoryFilter.objects.create() category_filter.categories.add(cat) category_filter.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(condition) campaign.filters.add(category_filter) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=20) @pytest.mark.django_db def test_condition_doesnt_match(rf): activate("en") request, shop, group = initialize_test(rf, False) condition = ContactGroupCondition.objects.create() condition.contact_groups = [get_default_customer_group()] condition.save() request.customer = None assert not condition.matches(request) @pytest.mark.django_db def test_condition_affects_price(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") contact_condition = ContactGroupCondition.objects.create() contact_condition.contact_groups = request.customer.groups.all() contact_condition.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(contact_condition) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=20) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=199) assert product.get_price_info(request, quantity=2).price == price(179) * 2 @pytest.mark.django_db def test_filter_affects_price(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") category_filter = CategoryFilter.objects.create() category_filter.categories.add(cat) category_filter.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.filters.add(category_filter) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=20) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=199) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() assert product.get_price_info(request, quantity=1).price == price(179) @pytest.mark.django_db def test_campaign_all_rules_must_match1(rf): activate("en") discount_amount = "20.53" original_price = "199.20" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1 = ContactGroupCondition.objects.create() rule1.contact_groups = request.customer.groups.all() rule1.save() rule2 = CategoryFilter.objects.create() rule2.categories.add(cat) rule2.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) product = create_product("Just-A-Product-Too", shop, default_price=original_price) price = shop.create_price # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, disconut should be given assert product.get_price_info(request, quantity=1).price == (price(original_price) - price(discount_amount)) @pytest.mark.django_db def test_percentage_campaigns(rf): activate("en") discount_percentage = "0.14" original_price = "123.47" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1 = ContactGroupCondition.objects.create() rule1.contact_groups = request.customer.groups.all() rule1.save() rule2 = CategoryFilter.objects.create() rule2.categories.add(cat) rule2.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() cdp = ProductDiscountPercentage.objects.create(campaign=campaign, discount_percentage=discount_percentage) product = create_product("Just-A-Product-Too", shop, default_price=original_price) price = shop.create_price # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, discount should be given discounted_price = price(original_price) - (price(original_price) * Decimal(cdp.value)) assert product.get_price_info(request, quantity=1).price == discounted_price @pytest.mark.django_db def test_only_best_price_affects(rf): activate("en") discount_amount = "20.53" original_price = "199.20" best_discount_amount = "40.00" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) rule3, rule4 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule3) campaign.filters.add(rule4) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=best_discount_amount) product = create_product("Just-A-Product-Too", shop, default_price=original_price) price = shop.create_price # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, discount should be given assert product.get_price_info(request, quantity=1).price == (price(original_price) - price(best_discount_amount)) @pytest.mark.django_db def test_minimum_price_is_forced(rf): activate("en") discount_amount = "20.53" original_price = "199.20" allowed_minimum_price = "190.20" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=original_price) shop_product = product.get_shop_instance(shop) shop_product.minimum_price = price(allowed_minimum_price) shop_product.save() # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, discount should be given assert product.get_price_info(request, quantity=1).price == shop_product.minimum_price @pytest.mark.django_db def test_price_cannot_be_under_zero(rf): activate("en") discount_amount = "200" original_price = "199.20" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() assert product.get_price_info(request, quantity=1).price == price("0") def create_condition_and_filter(cat, request): rule1 = ContactGroupCondition.objects.create() rule1.contact_groups = request.customer.groups.all() rule1.save() rule2 = CategoryFilter.objects.create() rule2.categories.add(cat) rule2.save() return rule1, rule2 @pytest.mark.django_db def test_start_end_dates(rf): activate("en") original_price = "180" discounted_price = "160" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) discount_amount = 20 campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.save() ProductDiscountAmount.objects.create(discount_amount=discount_amount, campaign=campaign) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=original_price) today = now() # starts in future campaign.start_datetime = (today + datetime.timedelta(days=2)) campaign.save() assert not campaign.is_available() assert product.get_price_info(request, quantity=1).price == price(original_price) # has already started campaign.start_datetime = (today - datetime.timedelta(days=2)) campaign.save() assert product.get_price_info(request, quantity=1).price == price(discounted_price) # already ended campaign.end_datetime = (today - datetime.timedelta(days=1)) campaign.save() assert not campaign.is_available() assert product.get_price_info(request, quantity=1).price == price(original_price) # not ended yet campaign.end_datetime = (today + datetime.timedelta(days=1)) campaign.save() assert product.get_price_info(request, quantity=1).price == price(discounted_price) # no start datetime campaign.start_datetime = None campaign.save() assert product.get_price_info(request, quantity=1).price == price(discounted_price) # no start datetime but ended campaign.end_datetime = (today - datetime.timedelta(days=1)) campaign.save() assert not campaign.is_available() assert product.get_price_info(request, quantity=1).price == price(original_price) @pytest.mark.django_db def test_availability(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) discount_amount = "20" campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=False) campaign.conditions.add(rule1) campaign.save() ProductDiscountAmount.objects.create(discount_amount=discount_amount, campaign=campaign) assert not campaign.is_available() @pytest.mark.django_db def test_admin_order_with_campaign(rf, admin_user): request, shop, group = initialize_test(rf, False) customer = request.customer cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) ProductDiscountAmount.objects.create(campaign=campaign, discount_amount="10") product = create_product("Just-A-Product-Too", shop, default_price=20) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) request = apply_request_middleware(rf.get("/", { "command": "product_data", "shop_id": shop.id, "customer_id": customer.id, "id": product.id, "quantity": 1 }), user=admin_user) response = OrderEditView.as_view()(request) data = json.loads(response.content.decode("utf8")) assert decimal.Decimal(data['unitPrice']['value']) == shop.create_price(10).value @pytest.mark.django_db def test_product_catalog_campaigns(): shop = get_default_shop() product = create_product("test", shop, default_price=20) shop_product = product.get_shop_instance(shop) cat = Category.objects.create(name="test") campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) # no rules assert CatalogCampaign.get_for_product(shop_product).count() == 0 # category filter that doesn't match cat_filter = CategoryFilter.objects.create() cat_filter.categories.add(cat) campaign.filters.add(cat_filter) assert CatalogCampaign.get_for_product(shop_product).count() == 0 shop_product.primary_category = cat shop_product.save() assert CatalogCampaign.get_for_product(shop_product).count() == 1 shop_product.primary_category = None shop_product.save() assert CatalogCampaign.get_for_product(shop_product).count() == 0 # category filter that matches shop_product.categories.add(cat) assert CatalogCampaign.get_for_product(shop_product).count() == 1 # create other shop shop1 = Shop.objects.create(name="testshop", identifier="testshop", status=ShopStatus.ENABLED, public_name="testshop") sp = ShopProduct.objects.create(product=product, shop=shop1, default_price=shop1.create_price(200)) assert product.get_shop_instance(shop1) == sp campaign2 = CatalogCampaign.objects.create(shop=shop1, name="test1", active=True) cat_filter2 = CategoryFilter.objects.create() cat_filter2.categories.add(cat) campaign2.filters.add(cat_filter2) assert CatalogCampaign.get_for_product(sp).count() == 0 # add product to this category sp.primary_category = cat sp.save() assert CatalogCampaign.get_for_product(sp).count() == 1 # matches now sp.primary_category = None sp.save() assert CatalogCampaign.get_for_product(sp).count() == 0 # no match sp.categories.add(cat) assert CatalogCampaign.get_for_product(sp).count() == 1 # matches now campaign3 = CatalogCampaign.objects.create(shop=shop1, name="test1", active=True) cat_filter3 = CategoryFilter.objects.create() cat_filter3.categories.add(cat) campaign3.filters.add(cat_filter3) assert CatalogCampaign.get_for_product(sp).count() == 2 # there are now two matching campaigns in same shop assert CatalogCampaign.get_for_product(shop_product).count() == 1 # another campaign matches only once @pytest.mark.django_db def test_product_catalog_campaigns2(): shop = get_default_shop() product = create_product("test", shop, default_price=20) product_type = ProductType.objects.create(name="asdf") shop_product = product.get_shop_instance(shop) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) assert CatalogCampaign.get_for_product(shop_product).count() == 0 type_filter = ProductTypeFilter.objects.create() type_filter.product_types.add(product_type) campaign.filters.add(type_filter) assert CatalogCampaign.get_for_product(shop_product).count() == 0 type_filter.product_types.add(product.type) assert CatalogCampaign.get_for_product(shop_product).count() == 1 product.type = product_type product.save() assert CatalogCampaign.get_for_product(shop_product).count() == 1 type_filter.product_types.remove(product_type) assert CatalogCampaign.get_for_product(shop_product).count() == 0 @pytest.mark.django_db def test_product_catalog_campaigns3(): shop = get_default_shop() product = create_product("test", shop, default_price=20) shop_product = product.get_shop_instance(shop) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) assert CatalogCampaign.get_for_product(shop_product).count() == 0 type_filter = ProductFilter.objects.create() type_filter.products.add(product) campaign.filters.add(type_filter) assert CatalogCampaign.get_for_product(shop_product).count() == 1
hrayr-artunyan/shuup
shuup_tests/campaigns/test_catalog_campaigns.py
shuup/core/models/_units.py
# This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals import decimal from django.core.exceptions import ValidationError from django.db import models from django.utils.translation import ugettext_lazy as _ from enumfields import Enum from parler.models import TranslatableModel, TranslatedField, TranslatedFields from shuup.core.fields import MeasurementField, MoneyValueField from ._service_base import ( ServiceBehaviorComponent, ServiceCost, TranslatableServiceBehaviorComponent ) class FixedCostBehaviorComponent(TranslatableServiceBehaviorComponent): name = _("Fixed cost") help_text = _("Add fixed cost to price of the service.") price_value = MoneyValueField() description = TranslatedField(any_language=True) translations = TranslatedFields( description=models.CharField(max_length=100, blank=True, verbose_name=_("description")), ) def get_costs(self, service, source): price = source.create_price(self.price_value) description = self.safe_translation_getter('description') yield ServiceCost(price, description) class WaivingCostBehaviorComponent(TranslatableServiceBehaviorComponent): name = _("Waiving cost") help_text = _( "Add cost to price of the service if total price " "of products is less than a waive limit.") price_value = MoneyValueField() waive_limit_value = MoneyValueField() description = TranslatedField(any_language=True) translations = TranslatedFields( description=models.CharField(max_length=100, blank=True, verbose_name=_("description")), ) def get_costs(self, service, source): waive_limit = source.create_price(self.waive_limit_value) product_total = source.total_price_of_products price = source.create_price(self.price_value) description = self.safe_translation_getter('description') zero_price = source.create_price(0) if product_total and product_total >= waive_limit: yield ServiceCost(zero_price, description, base_price=price) else: yield ServiceCost(price, description) class WeightLimitsBehaviorComponent(ServiceBehaviorComponent): name = _("Weight limits") help_text = _( "Limit availability of the service based on " "total weight of products.") min_weight = models.DecimalField( max_digits=36, decimal_places=6, blank=True, null=True, verbose_name=_("minimum weight")) max_weight = models.DecimalField( max_digits=36, decimal_places=6, blank=True, null=True, verbose_name=_("maximum weight")) def get_unavailability_reasons(self, service, source): weight = sum(((x.get("weight") or 0) for x in source.get_lines()), 0) if self.min_weight: if weight < self.min_weight: yield ValidationError(_("Minimum weight not met."), code="min_weight") if self.max_weight: if weight > self.max_weight: yield ValidationError(_("Maximum weight exceeded."), code="max_weight") class WeightBasedPriceRange(TranslatableModel): component = models.ForeignKey( "WeightBasedPricingBehaviorComponent", related_name="ranges", on_delete=models.CASCADE ) min_value = MeasurementField(unit="g", verbose_name=_("min weight"), blank=True, null=True) max_value = MeasurementField(unit="g", verbose_name=_("max weight"), blank=True, null=True) price_value = MoneyValueField() description = TranslatedField(any_language=True) translations = TranslatedFields( description=models.CharField(max_length=100, blank=True, verbose_name=_("description")), ) def matches_to_value(self, value): return _is_in_range(value, self.min_value, self.max_value) def _is_in_range(value, min_value, max_value): """ Help function to check if the ``WeightBasedPriceRange`` matches If min_value is None the max_value determines if the range matches. None as a max_value represents infinity. Min value is counted in range only when it's zero. Max value is always part of the range. :type value: decimal.Decimal :type min_value: MeasurementField :type max_value: MeasurementField :rtype: bool """ if value is None: return False if (not (min_value or max_value)) or (min_value == max_value == value): return True if (not min_value or value > min_value) and (max_value is None or value <= max_value): return True return False class WeightBasedPricingBehaviorComponent(ServiceBehaviorComponent): name = _("Weight-based pricing") help_text = _( "Define price based on basket weight. " "Range minimums is counted in range only as zero.") def _get_matching_range_with_lowest_price(self, source): total_gross_weight = source.total_gross_weight matching_ranges = [range for range in self.ranges.all() if range.matches_to_value(total_gross_weight)] if not matching_ranges: return return min(matching_ranges, key=lambda x: x.price_value) def get_costs(self, service, source): range = self._get_matching_range_with_lowest_price(source) if range: price = source.create_price(range.price_value) description = range.safe_translation_getter('description') yield ServiceCost(price, description) def get_unavailability_reasons(self, service, source): range = self._get_matching_range_with_lowest_price(source) if not range: yield ValidationError(_("Weight does not match with any range."), code="out_of_range") class GroupAvailabilityBehaviorComponent(ServiceBehaviorComponent): name = _("Contact group availability") help_text = _("Limit service availability for specific contact groups.") groups = models.ManyToManyField("ContactGroup", verbose_name=_("groups")) def get_unavailability_reasons(self, service, source): if source.customer and not source.customer.pk: yield ValidationError(_("Customer does not belong to any group.")) return customer_groups = set(source.customer.groups.all().values_list("pk", flat=True)) groups_to_match = set(self.groups.all().values_list("pk", flat=True)) if not bool(customer_groups & groups_to_match): yield ValidationError(_("Service is not available for any of the customers groups.")) class StaffOnlyBehaviorComponent(ServiceBehaviorComponent): name = _("Staff only availability") help_text = _("Limit service availability to staff only") def get_unavailability_reasons(self, service, source): if not source.creator or not source.creator.is_staff: yield ValidationError(_("Service is only available for staff")) class RoundingMode(Enum): ROUND_HALF_UP = decimal.ROUND_HALF_UP ROUND_HALF_DOWN = decimal.ROUND_HALF_DOWN ROUND_UP = decimal.ROUND_UP ROUND_DOWN = decimal.ROUND_DOWN class Labels: ROUND_HALF_UP = _("round to nearest with ties going away from zero") ROUND_HALF_DOWN = _("round to nearest with ties going towards zero") ROUND_UP = _("round away from zero") ROUND_DOWN = _("round towards zero")
# This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import datetime import decimal import json from decimal import Decimal from django.utils.timezone import now from django.utils.translation import activate import pytest from shuup.admin.modules.orders.views.edit import OrderEditView from shuup.campaigns.models.campaigns import CatalogCampaign from shuup.campaigns.models.catalog_filters import ( CategoryFilter, ProductFilter, ProductTypeFilter ) from shuup.campaigns.models.context_conditions import ContactGroupCondition from shuup.campaigns.models.product_effects import ( ProductDiscountAmount, ProductDiscountPercentage ) from shuup.core.models import ( Category, ProductType, Shop, ShopProduct, ShopStatus ) from shuup.testing.factories import ( create_product, get_default_customer_group, get_default_shop ) from shuup.testing.utils import apply_request_middleware from shuup_tests.campaigns import initialize_test @pytest.mark.django_db def test_campaign_creation(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") condition = ContactGroupCondition.objects.create() condition.contact_groups = request.customer.groups.all() condition.save() assert condition.values.first() == request.customer.groups.first() condition.values = request.customer.groups.all() condition.save() assert condition.values.first() == request.customer.groups.first() category_filter = CategoryFilter.objects.create() category_filter.categories.add(cat) category_filter.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(condition) campaign.filters.add(category_filter) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=20) @pytest.mark.django_db def test_condition_doesnt_match(rf): activate("en") request, shop, group = initialize_test(rf, False) condition = ContactGroupCondition.objects.create() condition.contact_groups = [get_default_customer_group()] condition.save() request.customer = None assert not condition.matches(request) @pytest.mark.django_db def test_condition_affects_price(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") contact_condition = ContactGroupCondition.objects.create() contact_condition.contact_groups = request.customer.groups.all() contact_condition.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(contact_condition) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=20) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=199) assert product.get_price_info(request, quantity=2).price == price(179) * 2 @pytest.mark.django_db def test_filter_affects_price(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") category_filter = CategoryFilter.objects.create() category_filter.categories.add(cat) category_filter.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.filters.add(category_filter) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=20) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=199) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() assert product.get_price_info(request, quantity=1).price == price(179) @pytest.mark.django_db def test_campaign_all_rules_must_match1(rf): activate("en") discount_amount = "20.53" original_price = "199.20" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1 = ContactGroupCondition.objects.create() rule1.contact_groups = request.customer.groups.all() rule1.save() rule2 = CategoryFilter.objects.create() rule2.categories.add(cat) rule2.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) product = create_product("Just-A-Product-Too", shop, default_price=original_price) price = shop.create_price # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, disconut should be given assert product.get_price_info(request, quantity=1).price == (price(original_price) - price(discount_amount)) @pytest.mark.django_db def test_percentage_campaigns(rf): activate("en") discount_percentage = "0.14" original_price = "123.47" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1 = ContactGroupCondition.objects.create() rule1.contact_groups = request.customer.groups.all() rule1.save() rule2 = CategoryFilter.objects.create() rule2.categories.add(cat) rule2.save() campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() cdp = ProductDiscountPercentage.objects.create(campaign=campaign, discount_percentage=discount_percentage) product = create_product("Just-A-Product-Too", shop, default_price=original_price) price = shop.create_price # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, discount should be given discounted_price = price(original_price) - (price(original_price) * Decimal(cdp.value)) assert product.get_price_info(request, quantity=1).price == discounted_price @pytest.mark.django_db def test_only_best_price_affects(rf): activate("en") discount_amount = "20.53" original_price = "199.20" best_discount_amount = "40.00" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) rule3, rule4 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule3) campaign.filters.add(rule4) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=best_discount_amount) product = create_product("Just-A-Product-Too", shop, default_price=original_price) price = shop.create_price # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, discount should be given assert product.get_price_info(request, quantity=1).price == (price(original_price) - price(best_discount_amount)) @pytest.mark.django_db def test_minimum_price_is_forced(rf): activate("en") discount_amount = "20.53" original_price = "199.20" allowed_minimum_price = "190.20" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=original_price) shop_product = product.get_shop_instance(shop) shop_product.minimum_price = price(allowed_minimum_price) shop_product.save() # price should not be discounted because the request.category is faulty assert product.get_price_info(request, quantity=1).price == price(original_price) shop_product.categories.add(cat) shop_product.save() # now the category is set, so both rules match, discount should be given assert product.get_price_info(request, quantity=1).price == shop_product.minimum_price @pytest.mark.django_db def test_price_cannot_be_under_zero(rf): activate("en") discount_amount = "200" original_price = "199.20" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.filters.add(rule2) campaign.save() ProductDiscountAmount.objects.create(campaign=campaign, discount_amount=discount_amount) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=original_price) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) shop_product.save() assert product.get_price_info(request, quantity=1).price == price("0") def create_condition_and_filter(cat, request): rule1 = ContactGroupCondition.objects.create() rule1.contact_groups = request.customer.groups.all() rule1.save() rule2 = CategoryFilter.objects.create() rule2.categories.add(cat) rule2.save() return rule1, rule2 @pytest.mark.django_db def test_start_end_dates(rf): activate("en") original_price = "180" discounted_price = "160" request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) discount_amount = 20 campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) campaign.save() ProductDiscountAmount.objects.create(discount_amount=discount_amount, campaign=campaign) price = shop.create_price product = create_product("Just-A-Product-Too", shop, default_price=original_price) today = now() # starts in future campaign.start_datetime = (today + datetime.timedelta(days=2)) campaign.save() assert not campaign.is_available() assert product.get_price_info(request, quantity=1).price == price(original_price) # has already started campaign.start_datetime = (today - datetime.timedelta(days=2)) campaign.save() assert product.get_price_info(request, quantity=1).price == price(discounted_price) # already ended campaign.end_datetime = (today - datetime.timedelta(days=1)) campaign.save() assert not campaign.is_available() assert product.get_price_info(request, quantity=1).price == price(original_price) # not ended yet campaign.end_datetime = (today + datetime.timedelta(days=1)) campaign.save() assert product.get_price_info(request, quantity=1).price == price(discounted_price) # no start datetime campaign.start_datetime = None campaign.save() assert product.get_price_info(request, quantity=1).price == price(discounted_price) # no start datetime but ended campaign.end_datetime = (today - datetime.timedelta(days=1)) campaign.save() assert not campaign.is_available() assert product.get_price_info(request, quantity=1).price == price(original_price) @pytest.mark.django_db def test_availability(rf): activate("en") request, shop, group = initialize_test(rf, False) cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) discount_amount = "20" campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=False) campaign.conditions.add(rule1) campaign.save() ProductDiscountAmount.objects.create(discount_amount=discount_amount, campaign=campaign) assert not campaign.is_available() @pytest.mark.django_db def test_admin_order_with_campaign(rf, admin_user): request, shop, group = initialize_test(rf, False) customer = request.customer cat = Category.objects.create(name="test") rule1, rule2 = create_condition_and_filter(cat, request) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) campaign.conditions.add(rule1) ProductDiscountAmount.objects.create(campaign=campaign, discount_amount="10") product = create_product("Just-A-Product-Too", shop, default_price=20) shop_product = product.get_shop_instance(shop) shop_product.categories.add(cat) request = apply_request_middleware(rf.get("/", { "command": "product_data", "shop_id": shop.id, "customer_id": customer.id, "id": product.id, "quantity": 1 }), user=admin_user) response = OrderEditView.as_view()(request) data = json.loads(response.content.decode("utf8")) assert decimal.Decimal(data['unitPrice']['value']) == shop.create_price(10).value @pytest.mark.django_db def test_product_catalog_campaigns(): shop = get_default_shop() product = create_product("test", shop, default_price=20) shop_product = product.get_shop_instance(shop) cat = Category.objects.create(name="test") campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) # no rules assert CatalogCampaign.get_for_product(shop_product).count() == 0 # category filter that doesn't match cat_filter = CategoryFilter.objects.create() cat_filter.categories.add(cat) campaign.filters.add(cat_filter) assert CatalogCampaign.get_for_product(shop_product).count() == 0 shop_product.primary_category = cat shop_product.save() assert CatalogCampaign.get_for_product(shop_product).count() == 1 shop_product.primary_category = None shop_product.save() assert CatalogCampaign.get_for_product(shop_product).count() == 0 # category filter that matches shop_product.categories.add(cat) assert CatalogCampaign.get_for_product(shop_product).count() == 1 # create other shop shop1 = Shop.objects.create(name="testshop", identifier="testshop", status=ShopStatus.ENABLED, public_name="testshop") sp = ShopProduct.objects.create(product=product, shop=shop1, default_price=shop1.create_price(200)) assert product.get_shop_instance(shop1) == sp campaign2 = CatalogCampaign.objects.create(shop=shop1, name="test1", active=True) cat_filter2 = CategoryFilter.objects.create() cat_filter2.categories.add(cat) campaign2.filters.add(cat_filter2) assert CatalogCampaign.get_for_product(sp).count() == 0 # add product to this category sp.primary_category = cat sp.save() assert CatalogCampaign.get_for_product(sp).count() == 1 # matches now sp.primary_category = None sp.save() assert CatalogCampaign.get_for_product(sp).count() == 0 # no match sp.categories.add(cat) assert CatalogCampaign.get_for_product(sp).count() == 1 # matches now campaign3 = CatalogCampaign.objects.create(shop=shop1, name="test1", active=True) cat_filter3 = CategoryFilter.objects.create() cat_filter3.categories.add(cat) campaign3.filters.add(cat_filter3) assert CatalogCampaign.get_for_product(sp).count() == 2 # there are now two matching campaigns in same shop assert CatalogCampaign.get_for_product(shop_product).count() == 1 # another campaign matches only once @pytest.mark.django_db def test_product_catalog_campaigns2(): shop = get_default_shop() product = create_product("test", shop, default_price=20) product_type = ProductType.objects.create(name="asdf") shop_product = product.get_shop_instance(shop) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) assert CatalogCampaign.get_for_product(shop_product).count() == 0 type_filter = ProductTypeFilter.objects.create() type_filter.product_types.add(product_type) campaign.filters.add(type_filter) assert CatalogCampaign.get_for_product(shop_product).count() == 0 type_filter.product_types.add(product.type) assert CatalogCampaign.get_for_product(shop_product).count() == 1 product.type = product_type product.save() assert CatalogCampaign.get_for_product(shop_product).count() == 1 type_filter.product_types.remove(product_type) assert CatalogCampaign.get_for_product(shop_product).count() == 0 @pytest.mark.django_db def test_product_catalog_campaigns3(): shop = get_default_shop() product = create_product("test", shop, default_price=20) shop_product = product.get_shop_instance(shop) campaign = CatalogCampaign.objects.create(shop=shop, name="test", active=True) assert CatalogCampaign.get_for_product(shop_product).count() == 0 type_filter = ProductFilter.objects.create() type_filter.products.add(product) campaign.filters.add(type_filter) assert CatalogCampaign.get_for_product(shop_product).count() == 1
hrayr-artunyan/shuup
shuup_tests/campaigns/test_catalog_campaigns.py
shuup/core/models/_service_behavior.py
# coding: utf-8 """ EnvironmentsApi.py Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class EnvironmentsApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def get_all(self, **kwargs): """ Gets all Environments This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildEnvironmentPage If the method is called asynchronously, returns the request thread. """ all_params = ['page_index', 'page_size', 'sort', 'q'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all" % key ) params[key] = val del params['kwargs'] resource_path = '/environments'.replace('{format}', 'json') method = 'GET' path_params = {} query_params = {} if 'page_index' in params: query_params['pageIndex'] = params['page_index'] if 'page_size' in params: query_params['pageSize'] = params['page_size'] if 'sort' in params: query_params['sort'] = params['sort'] if 'q' in params: query_params['q'] = params['q'] header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='BuildEnvironmentPage', auth_settings=auth_settings, callback=params.get('callback')) return response def get_specific(self, id, **kwargs): """ Get specific Environment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Environment id (required) :return: BuildEnvironmentSingleton If the method is called asynchronously, returns the request thread. """ # verify the required parameter 'id' is set if id is None: raise ValueError("Missing the required parameter `id` when calling `get_specific`") all_params = ['id'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_specific" % key ) params[key] = val del params['kwargs'] resource_path = '/environments/{id}'.replace('{format}', 'json') method = 'GET' path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='BuildEnvironmentSingleton', auth_settings=auth_settings, callback=params.get('callback')) return response
import pytest from pnc_cli.swagger_client import ArtifactRest __author__ = 'thauser' from pnc_cli.swagger_client.apis import BuildrecordsApi from pnc_cli.swagger_client.apis import ProductmilestonesApi from test import testutils import pnc_cli.user_config as uc @pytest.fixture(scope='function', autouse=True) def get_milestone_api(): global milestone_api milestone_api = ProductmilestonesApi(uc.user.get_api_client()) @pytest.fixture(scope='function', autouse=True) def get_records_api(): global records_api records_api = BuildrecordsApi(uc.user.get_api_client()) def test_get_all_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_all') def test_get_all(new_milestone): assert milestone_api.get_all(page_index=0, page_size=1000000, sort='', q='').content is not None def test_create_new_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'create_new') def test_create_new(new_milestone): milestones = [m.id for m in milestone_api.get_all(page_size=1000000).content] assert new_milestone.id in milestones def test_get_all_by_product_version_id_no_version_id(): testutils.assert_raises_valueerror(milestone_api, 'get_all_by_product_version_id', version_id=None) def test_get_all_by_product_version_id_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_all_by_product_version_id', version_id=1) def test_get_all_by_product_version_id(): milestones = milestone_api.get_all_by_product_version_id(version_id=1, page_index=0, page_size=1000000, sort='', q='').content assert milestones is not None def test_get_specific_no_id(): testutils.assert_raises_valueerror(milestone_api, 'get_specific', id=None) def test_get_specific_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_specific', id=1) def test_get_specific(new_milestone): retrieved = milestone_api.get_specific(new_milestone.id).content assert new_milestone.to_dict() == retrieved.to_dict() def test_update_no_id(): testutils.assert_raises_valueerror(milestone_api, 'update', id=None) def test_update_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'update', id=1) def test_update(new_milestone): new_milestone.download_url = "updatedUrlHere" milestone_api.update(id=new_milestone.id, body=new_milestone) updated = milestone_api.get_specific(new_milestone.id).content assert updated.to_dict() == new_milestone.to_dict() def test_add_distributed_artifact_no_id(): testutils.assert_raises_valueerror(milestone_api, 'add_distributed_artifact', id=None) def test_add_distributed_artifact_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'add_distributed_artifact', id=1) def test_add_distributed_artifact(new_milestone): test_builds = records_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content record = test_builds[len(test_builds)-1] # latest test build artifact = records_api.get_built_artifacts(record.id).content[1] # first artifact milestone_api.remove_distributed_artifact(id=new_milestone.id, artifact_id=artifact.id) milestone_api.add_distributed_artifact(id=new_milestone.id, body=artifact) artifacts = milestone_api.get_distributed_artifacts(id=new_milestone.id).content assert artifact.id in [x.id for x in artifacts] def test_get_distributed_artifacts_no_id(): testutils.assert_raises_valueerror(milestone_api, 'get_distributed_artifacts', id=None) def test_get_distributed_artifacts_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_distributed_artifacts', id=1) def test_get_distributed_artifacts(new_milestone): result = milestone_api.get_distributed_artifacts(id=new_milestone.id).content assert result is not None def test_get_distributed_builds_no_id(): testutils.assert_raises_valueerror(milestone_api, 'get_distributed_builds', id=None) def test_get_distributed_builds_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_distributed_builds', id=1) def test_get_distributed_builds(new_milestone): result = milestone_api.get_distributed_builds(id=new_milestone.id).content assert result is not None def test_remove_distributed_artifact_no_milestone_id(): testutils.assert_raises_valueerror(milestone_api, 'remove_distributed_artifact', id=None, artifact_id=1) def test_remove_distributed_artifact_no_artifact_id(): testutils.assert_raises_valueerror(milestone_api, 'remove_distributed_artifact', id=1, artifact_id=None) def test_remove_distributed_artifact_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'remove_distributed_artifact', id=1, artifact_id=1) def test_remove_distributed_artifact(new_milestone): test_builds = records_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content record = test_builds[len(test_builds)-1] # latest test build artifact = records_api.get_built_artifacts(record.id).content[1] # first artifact milestone_api.remove_distributed_artifact(id=new_milestone.id, artifact_id=artifact.id) artifacts = milestone_api.get_distributed_artifacts(new_milestone.id).content assert artifacts is None or artifact.id not in [x.id for x in artifacts] # assert that removing the artifact either means there are no artifacts, or at least the removed artifact is not present
thauser/pnc-cli
test/integration/test_productmilestones_api.py
pnc_cli/swagger_client/apis/environments_api.py
# coding: utf-8 """ ProductsApi.py Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class ProductsApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def create_new(self, **kwargs): """ Creates a new Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_new(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param ProductRest body: :return: ProductSingleton If the method is called asynchronously, returns the request thread. """ all_params = ['body'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_new" % key ) params[key] = val del params['kwargs'] resource_path = '/products'.replace('{format}', 'json') method = 'POST' path_params = {} query_params = {} header_params = {} form_params = {} files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='ProductSingleton', auth_settings=auth_settings, callback=params.get('callback')) return response def get_all(self, **kwargs): """ Gets all Products This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: ProductPage If the method is called asynchronously, returns the request thread. """ all_params = ['page_index', 'page_size', 'sort', 'q'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all" % key ) params[key] = val del params['kwargs'] resource_path = '/products'.replace('{format}', 'json') method = 'GET' path_params = {} query_params = {} if 'page_index' in params: query_params['pageIndex'] = params['page_index'] if 'page_size' in params: query_params['pageSize'] = params['page_size'] if 'sort' in params: query_params['sort'] = params['sort'] if 'q' in params: query_params['q'] = params['q'] header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='ProductPage', auth_settings=auth_settings, callback=params.get('callback')) return response def get_product_versions(self, id, **kwargs): """ Get all versions for a Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_product_versions(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: ProductVersionPage If the method is called asynchronously, returns the request thread. """ # verify the required parameter 'id' is set if id is None: raise ValueError("Missing the required parameter `id` when calling `get_product_versions`") all_params = ['id', 'page_index', 'page_size', 'sort', 'q'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_product_versions" % key ) params[key] = val del params['kwargs'] resource_path = '/products/{id}/product-versions'.replace('{format}', 'json') method = 'GET' path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = {} if 'page_index' in params: query_params['pageIndex'] = params['page_index'] if 'page_size' in params: query_params['pageSize'] = params['page_size'] if 'sort' in params: query_params['sort'] = params['sort'] if 'q' in params: query_params['q'] = params['q'] header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='ProductVersionPage', auth_settings=auth_settings, callback=params.get('callback')) return response def get_specific(self, id, **kwargs): """ Get specific Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product id (required) :return: ProductSingleton If the method is called asynchronously, returns the request thread. """ # verify the required parameter 'id' is set if id is None: raise ValueError("Missing the required parameter `id` when calling `get_specific`") all_params = ['id'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_specific" % key ) params[key] = val del params['kwargs'] resource_path = '/products/{id}'.replace('{format}', 'json') method = 'GET' path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = {} header_params = {} form_params = {} files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type='ProductSingleton', auth_settings=auth_settings, callback=params.get('callback')) return response def update(self, id, **kwargs): """ Updates an existing Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product id (required) :param ProductRest body: :return: None If the method is called asynchronously, returns the request thread. """ # verify the required parameter 'id' is set if id is None: raise ValueError("Missing the required parameter `id` when calling `update`") all_params = ['id', 'body'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update" % key ) params[key] = val del params['kwargs'] resource_path = '/products/{id}'.replace('{format}', 'json') method = 'PUT' path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = {} header_params = {} form_params = {} files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params, body=body_params, post_params=form_params, files=files, response_type=None, auth_settings=auth_settings, callback=params.get('callback')) return response
import pytest from pnc_cli.swagger_client import ArtifactRest __author__ = 'thauser' from pnc_cli.swagger_client.apis import BuildrecordsApi from pnc_cli.swagger_client.apis import ProductmilestonesApi from test import testutils import pnc_cli.user_config as uc @pytest.fixture(scope='function', autouse=True) def get_milestone_api(): global milestone_api milestone_api = ProductmilestonesApi(uc.user.get_api_client()) @pytest.fixture(scope='function', autouse=True) def get_records_api(): global records_api records_api = BuildrecordsApi(uc.user.get_api_client()) def test_get_all_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_all') def test_get_all(new_milestone): assert milestone_api.get_all(page_index=0, page_size=1000000, sort='', q='').content is not None def test_create_new_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'create_new') def test_create_new(new_milestone): milestones = [m.id for m in milestone_api.get_all(page_size=1000000).content] assert new_milestone.id in milestones def test_get_all_by_product_version_id_no_version_id(): testutils.assert_raises_valueerror(milestone_api, 'get_all_by_product_version_id', version_id=None) def test_get_all_by_product_version_id_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_all_by_product_version_id', version_id=1) def test_get_all_by_product_version_id(): milestones = milestone_api.get_all_by_product_version_id(version_id=1, page_index=0, page_size=1000000, sort='', q='').content assert milestones is not None def test_get_specific_no_id(): testutils.assert_raises_valueerror(milestone_api, 'get_specific', id=None) def test_get_specific_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_specific', id=1) def test_get_specific(new_milestone): retrieved = milestone_api.get_specific(new_milestone.id).content assert new_milestone.to_dict() == retrieved.to_dict() def test_update_no_id(): testutils.assert_raises_valueerror(milestone_api, 'update', id=None) def test_update_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'update', id=1) def test_update(new_milestone): new_milestone.download_url = "updatedUrlHere" milestone_api.update(id=new_milestone.id, body=new_milestone) updated = milestone_api.get_specific(new_milestone.id).content assert updated.to_dict() == new_milestone.to_dict() def test_add_distributed_artifact_no_id(): testutils.assert_raises_valueerror(milestone_api, 'add_distributed_artifact', id=None) def test_add_distributed_artifact_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'add_distributed_artifact', id=1) def test_add_distributed_artifact(new_milestone): test_builds = records_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content record = test_builds[len(test_builds)-1] # latest test build artifact = records_api.get_built_artifacts(record.id).content[1] # first artifact milestone_api.remove_distributed_artifact(id=new_milestone.id, artifact_id=artifact.id) milestone_api.add_distributed_artifact(id=new_milestone.id, body=artifact) artifacts = milestone_api.get_distributed_artifacts(id=new_milestone.id).content assert artifact.id in [x.id for x in artifacts] def test_get_distributed_artifacts_no_id(): testutils.assert_raises_valueerror(milestone_api, 'get_distributed_artifacts', id=None) def test_get_distributed_artifacts_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_distributed_artifacts', id=1) def test_get_distributed_artifacts(new_milestone): result = milestone_api.get_distributed_artifacts(id=new_milestone.id).content assert result is not None def test_get_distributed_builds_no_id(): testutils.assert_raises_valueerror(milestone_api, 'get_distributed_builds', id=None) def test_get_distributed_builds_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'get_distributed_builds', id=1) def test_get_distributed_builds(new_milestone): result = milestone_api.get_distributed_builds(id=new_milestone.id).content assert result is not None def test_remove_distributed_artifact_no_milestone_id(): testutils.assert_raises_valueerror(milestone_api, 'remove_distributed_artifact', id=None, artifact_id=1) def test_remove_distributed_artifact_no_artifact_id(): testutils.assert_raises_valueerror(milestone_api, 'remove_distributed_artifact', id=1, artifact_id=None) def test_remove_distributed_artifact_invalid_param(): testutils.assert_raises_typeerror(milestone_api, 'remove_distributed_artifact', id=1, artifact_id=1) def test_remove_distributed_artifact(new_milestone): test_builds = records_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content record = test_builds[len(test_builds)-1] # latest test build artifact = records_api.get_built_artifacts(record.id).content[1] # first artifact milestone_api.remove_distributed_artifact(id=new_milestone.id, artifact_id=artifact.id) artifacts = milestone_api.get_distributed_artifacts(new_milestone.id).content assert artifacts is None or artifact.id not in [x.id for x in artifacts] # assert that removing the artifact either means there are no artifacts, or at least the removed artifact is not present
thauser/pnc-cli
test/integration/test_productmilestones_api.py
pnc_cli/swagger_client/apis/products_api.py
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains the classes that represent Telegram InlineQueryResultMpeg4Gif.""" from telegram import InlineQueryResult class InlineQueryResultCachedMpeg4Gif(InlineQueryResult): """ Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an optional caption. Alternatively, you can use :attr:`input_message_content` to send a message with the specified content instead of the animation. Attributes: type (:obj:`str`): 'mpeg4_gif'. id (:obj:`str`): Unique identifier for this result, 1-64 bytes. mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file. title (:obj:`str`): Optional. Title for the result. caption (:obj:`str`): Optional. Caption, 0-1024 characters parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached to the message. input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the message to be sent instead of the MPEG-4 file. Args: id (:obj:`str`): Unique identifier for this result, 1-64 bytes. mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file. title (:obj:`str`, optional): Title for the result. caption (:obj:`str`, optional): Caption, 0-1024 characters parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached to the message. input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the message to be sent instead of the MPEG-4 file. **kwargs (:obj:`dict`): Arbitrary keyword arguments. """ def __init__(self, id, mpeg4_file_id, title=None, caption=None, reply_markup=None, input_message_content=None, parse_mode=None, **kwargs): # Required super(InlineQueryResultCachedMpeg4Gif, self).__init__('mpeg4_gif', id) self.mpeg4_file_id = mpeg4_file_id # Optionals if title: self.title = title if caption: self.caption = caption if parse_mode: self.parse_mode = parse_mode if reply_markup: self.reply_markup = reply_markup if input_message_content: self.input_message_content = input_message_content
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. import pytest from telegram import PassportElementErrorReverseSide, PassportElementErrorSelfie @pytest.fixture(scope='class') def passport_element_error_reverse_side(): return PassportElementErrorReverseSide(TestPassportElementErrorReverseSide.type, TestPassportElementErrorReverseSide.file_hash, TestPassportElementErrorReverseSide.message) class TestPassportElementErrorReverseSide(object): source = 'reverse_side' type = 'test_type' file_hash = 'file_hash' message = 'Error message' def test_expected_values(self, passport_element_error_reverse_side): assert passport_element_error_reverse_side.source == self.source assert passport_element_error_reverse_side.type == self.type assert passport_element_error_reverse_side.file_hash == self.file_hash assert passport_element_error_reverse_side.message == self.message def test_to_dict(self, passport_element_error_reverse_side): passport_element_error_reverse_side_dict = passport_element_error_reverse_side.to_dict() assert isinstance(passport_element_error_reverse_side_dict, dict) assert (passport_element_error_reverse_side_dict['source'] == passport_element_error_reverse_side.source) assert (passport_element_error_reverse_side_dict['type'] == passport_element_error_reverse_side.type) assert (passport_element_error_reverse_side_dict['file_hash'] == passport_element_error_reverse_side.file_hash) assert (passport_element_error_reverse_side_dict['message'] == passport_element_error_reverse_side.message) def test_equality(self): a = PassportElementErrorReverseSide(self.type, self.file_hash, self.message) b = PassportElementErrorReverseSide(self.type, self.file_hash, self.message) c = PassportElementErrorReverseSide(self.type, '', '') d = PassportElementErrorReverseSide('', self.file_hash, '') e = PassportElementErrorReverseSide('', '', self.message) f = PassportElementErrorSelfie(self.type, self.file_hash, self.message) assert a == b assert hash(a) == hash(b) assert a is not b assert a != c assert hash(a) != hash(c) assert a != d assert hash(a) != hash(d) assert a != e assert hash(a) != hash(e) assert a != f assert hash(a) != hash(f)
leandrotoledo/python-telegram-bot
tests/test_passportelementerrorreverseside.py
telegram/inline/inlinequeryresultcachedmpeg4gif.py
# -*- coding: utf-8 -*- import datetime import functools import operator import re import pytz from dateutil import parser from django.utils import timezone from nose.tools import * # flake8: noqa from rest_framework import serializers as ser from unittest import TestCase from tests.base import ApiTestCase from api.base.filters import ListFilterMixin import api.base.filters as filters from api.base.exceptions import ( InvalidFilterError, InvalidFilterOperator, InvalidFilterComparisonType, InvalidFilterMatchType, ) from api.base.serializers import RelationshipField class FakeSerializer(ser.Serializer): filterable_fields = ('id', 'string_field', 'second_string_field','list_field', 'date_field', 'int_field', 'bool_field', 'relationship_field') id = ser.CharField() string_field = ser.CharField() second_string_field = ser.CharField() list_field = ser.ListField() date_field = ser.DateField() datetime_field = ser.DateTimeField() int_field = ser.IntegerField() float_field = ser.FloatField() bool_field = ser.BooleanField(source='foobar') relationship_field = RelationshipField(related_view='fake', related_view_kwargs={}) class FakeRecord(object): def __init__( self, _id=None, string_field='foo', second_string_field='bar', list_field=None, date_field=timezone.now(), datetime_field=timezone.now(), int_field=42, float_field=41.99999, foobar=True ): self._id = _id self.string_field = string_field self.second_string_field = second_string_field self.list_field = list_field or [1, 2, 3] self.date_field = date_field self.datetime_field = datetime_field self.int_field = int_field self.float_field = float_field # bool_field in serializer corresponds to foobar in model self.foobar = foobar class FakeListView(ListFilterMixin): serializer_class = FakeSerializer class TestFilterMixin(ApiTestCase): def setUp(self): super(TestFilterMixin, self).setUp() self.view = FakeListView() def test_parse_query_params_default_operators(self): query_params = { 'filter[string_field]': 'foo', 'filter[list_field]': 'bar', 'filter[int_field]': '42', 'filter[bool_field]': 'false', } fields = self.view.parse_query_params(query_params) assert_in('string_field', fields['filter[string_field]']) assert_equal(fields['filter[string_field]']['string_field']['op'], 'icontains') assert_in('list_field', fields['filter[list_field]']) assert_equal(fields['filter[list_field]']['list_field']['op'], 'contains') assert_in('int_field', fields['filter[int_field]']) assert_equal(fields['filter[int_field]']['int_field']['op'], 'eq') assert_in('bool_field', fields['filter[bool_field]']) assert_equal(fields['filter[bool_field]']['bool_field']['op'], 'eq') def test_parse_query_params_casts_values(self): query_params = { 'filter[string_field]': 'foo', 'filter[list_field]': 'bar', 'filter[int_field]': '42', 'filter[bool_field]': 'false', } fields = self.view.parse_query_params(query_params) assert_in('string_field', fields['filter[string_field]']) assert_equal(fields['filter[string_field]']['string_field']['value'], 'foo') assert_in('list_field', fields['filter[list_field]']) assert_equal(fields['filter[list_field]']['list_field']['value'], 'bar') assert_in('int_field', fields['filter[int_field]']) assert_equal(fields['filter[int_field]']['int_field']['value'], 42) assert_in('bool_field', fields.get('filter[bool_field]')) assert_equal(fields['filter[bool_field]']['bool_field']['value'], False) def test_parse_query_params_uses_field_source_attribute(self): query_params = { 'filter[bool_field]': 'false', } fields = self.view.parse_query_params(query_params) parsed_field = fields['filter[bool_field]']['bool_field'] assert_equal(parsed_field['source_field_name'], 'foobar') assert_equal(parsed_field ['value'], False) assert_equal(parsed_field ['op'], 'eq') def test_parse_query_params_generalizes_dates(self): query_params = { 'filter[date_field]': '2014-12-12' } fields = self.view.parse_query_params(query_params) start = parser.parse('2014-12-12').replace(tzinfo=pytz.utc) stop = start + datetime.timedelta(days=1) for key, field_name in fields.iteritems(): for match in field_name['date_field']: if match['op'] == 'gte': assert_equal(match['value'], start) elif match['op'] == 'lt': assert_equal(match['value'], stop) else: self.fail() def test_parse_query_params_comparable_field(self): query_params = { 'filter[int_field][gt]': 42, 'filter[int_field][lte]': 9000 } fields = self.view.parse_query_params(query_params) for key, field_name in fields.iteritems(): if field_name['int_field']['op'] == 'gt': assert_equal(field_name['int_field']['value'], 42) elif field_name['int_field']['op'] == 'lte': assert_equal(field_name['int_field']['value'], 9000) else: self.fail() def test_parse_query_params_matchable_field(self): query_params = { 'filter[string_field][contains]': 'foo', 'filter[string_field][icontains]': 'bar' } fields = self.view.parse_query_params(query_params) for key, field_name in fields.iteritems(): if field_name['string_field']['op'] == 'contains': assert_equal(field_name['string_field']['value'], 'foo') elif field_name['string_field']['op'] == 'icontains': assert_equal(field_name['string_field']['value'], 'bar') else: self.fail() def test_parse_query_params_raises_InvalidFilterError_bad_field(self): query_params = { 'filter[fake]': 'foo' } with assert_raises(InvalidFilterError): self.view.parse_query_params(query_params) def test_parse_query_params_raises_InvalidFilterComparisonType(self): query_params = { 'filter[string_field][gt]': 'foo' } with assert_raises(InvalidFilterComparisonType): self.view.parse_query_params(query_params) def test_parse_query_params_raises_InvalidFilterMatchType(self): query_params = { 'filter[date_field][icontains]': '2015' } with assert_raises(InvalidFilterMatchType): self.view.parse_query_params(query_params) def test_parse_query_params_raises_InvalidFilterOperator(self): query_params = { 'filter[int_field][bar]': 42 } with assert_raises(InvalidFilterOperator): self.view.parse_query_params(query_params) def test_InvalidFilterOperator_parameterizes_valid_operators(self): query_params = { 'filter[int_field][bar]': 42 } try: self.view.parse_query_params(query_params) except InvalidFilterOperator as err: ops = re.search(r'one of (?P<ops>.+)\.$', err.detail).groupdict()['ops'] assert_equal(ops, "gt, gte, lt, lte, eq, ne") query_params = { 'filter[string_field][bar]': 'foo' } try: self.view.parse_query_params(query_params) except InvalidFilterOperator as err: ops = re.search(r'one of (?P<ops>.+)\.$', err.detail).groupdict()['ops'] assert_equal(ops, "contains, icontains, eq, ne") def test_parse_query_params_supports_multiple_filters(self): query_params = { 'filter[string_field]': 'foo', 'filter[string_field]': 'bar', } # FIXME: This test may only be checking one field fields = self.view.parse_query_params(query_params) assert_in('string_field', fields.get('filter[string_field]')) for key, field_name in fields.iteritems(): assert_in(field_name['string_field']['value'], ('foo', 'bar')) def test_convert_value_bool(self): value = 'true' field = FakeSerializer._declared_fields['bool_field'] value = self.view.convert_value(value, field) assert_true(isinstance(value, bool)) assert_true(value) def test_convert_value_date(self): value = '2014-12-12' field = FakeSerializer._declared_fields['date_field'] value = self.view.convert_value(value, field) assert_true(isinstance(value, datetime.datetime)) assert_equal(value, parser.parse('2014-12-12').replace(tzinfo=pytz.utc)) def test_convert_value_int(self): value = '9000' field = FakeSerializer._declared_fields['int_field'] value = self.view.convert_value(value, field) assert_equal(value, 9000) def test_convert_value_float(self): value = '42' orig_type = type(value) field = FakeSerializer._declared_fields['float_field'] value = self.view.convert_value(value, field) assert_equal(value, 42.0) def test_convert_value_null_for_list(self): value = 'null' field = FakeSerializer._declared_fields['list_field'] value = self.view.convert_value(value, field) assert_equal(value, []) def test_multiple_filter_params_bad_filter(self): query_params = { 'filter[string_field, not_a_field]': 'test' } with assert_raises(InvalidFilterError): self.view.parse_query_params(query_params) def test_bad_filter_operator(self): query_params = { 'filter[relationship_field][invalid]': 'false', } with assert_raises(InvalidFilterOperator): self.view.parse_query_params(query_params) class TestListFilterMixin(ApiTestCase): def setUp(self): super(TestListFilterMixin, self).setUp() self.view = FakeListView() def test_get_filtered_queryset_for_list_field_converts_to_lowercase(self): field_name = 'list_field' params = { 'value': 'FOO', 'source_field_name': field_name } default_queryset = [ FakeRecord(_id=1, list_field=['fOO', 'Foo', 'Bar', 'baR']), FakeRecord(_id=2, list_field=['Foo', 'Bax']), FakeRecord(_id=3, list_field=['Bar', 'baR', 'bat']) ] filtered = self.view.get_filtered_queryset(field_name, params, default_queryset) for record in filtered: assert_not_equal(record._id, 3) for id in (1, 2): assert_in(id, [f._id for f in filtered]) def test_get_filtered_queryset_for_list_respects_special_case_of_ids_being_list(self): field_name = 'bool_field' params = { 'value': True, 'op': 'eq', 'source_field_name': 'foobar' } default_queryset = [ FakeRecord(_id=1, foobar=True), FakeRecord(_id=2, foobar=True), FakeRecord(_id=3, foobar=False) ] filtered = self.view.get_filtered_queryset(field_name, params, default_queryset) for record in filtered: assert_not_equal(record._id, 3) for id in (1, 2): assert_in(id, [f._id for f in filtered]) def test_get_filtered_queryset_for_list_respects_id_always_being_list(self): field_name = 'id' params = { 'value': '2', 'op': 'in', 'source_field_name': '_id' } default_queryset = [ FakeRecord(_id='1', foobar=True), FakeRecord(_id='2', foobar=True), FakeRecord(_id='3', foobar=False) ] filtered = self.view.get_filtered_queryset(field_name, params, default_queryset) for record in filtered: assert_equal(record._id, '2') for id in ('1', '3'): assert_not_in(id, [f._id for f in filtered]) def test_parse_query_params_uses_field_source_attribute(self): query_params = { 'filter[bool_field]': 'false', } fields = self.view.parse_query_params(query_params) parsed_field = fields['filter[bool_field]']['bool_field'] assert_equal(parsed_field['source_field_name'], 'foobar') assert_equal(parsed_field ['value'], False) assert_equal(parsed_field ['op'], 'eq') class TestOSFOrderingFilter(ApiTestCase): class query: title = ' ' def __init__(self, title): self.title = title def __str__(self): return self.title class query_with_num: title = ' ' number = 0 def __init__(self, title, number): self.title = title self.number = number def __str__(self): return self.title def test_filter_queryset_forward(self): query_to_be_sorted = [self.query(x) for x in 'NewProj Zip Proj Activity'.split()] sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['title'])) sorted_output = [str(i) for i in sorted_query] assert_equal(sorted_output, ['Activity', 'NewProj', 'Proj', 'Zip']) def test_filter_queryset_forward_duplicate(self): query_to_be_sorted = [self.query(x) for x in 'NewProj Activity Zip Activity'.split()] sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['title'])) sorted_output = [str(i) for i in sorted_query] assert_equal(sorted_output, ['Activity', 'Activity', 'NewProj', 'Zip']) def test_filter_queryset_reverse(self): query_to_be_sorted = [self.query(x) for x in 'NewProj Zip Proj Activity'.split()] sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['-title'])) sorted_output = [str(i) for i in sorted_query] assert_equal(sorted_output, ['Zip', 'Proj', 'NewProj', 'Activity']) def test_filter_queryset_reverse_duplicate(self): query_to_be_sorted = [self.query(x) for x in 'NewProj Activity Zip Activity'.split()] sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['-title'])) sorted_output = [str(i) for i in sorted_query] assert_equal(sorted_output, ['Zip', 'NewProj', 'Activity', 'Activity']) def test_filter_queryset_handles_multiple_fields(self): objs = [self.query_with_num(title='NewProj', number=10), self.query_with_num(title='Zip', number=20), self.query_with_num(title='Activity', number=30), self.query_with_num(title='Activity', number=40)] actual = [x.number for x in sorted(objs, cmp=filters.sort_multiple(['title', '-number']))] assert_equal(actual, [40, 30, 10, 20]) class TestQueryPatternRegex(TestCase): def setUp(self): super(TestQueryPatternRegex, self).setUp() self.filter_regex = FakeListView.QUERY_PATTERN self.filter_fields = FakeListView.FILTER_FIELDS def test_single_field_filter(self): filter_str = 'filter[name]' match = self.filter_regex.match(filter_str) fields = match.groupdict()['fields'] field_names = re.findall(self.filter_fields, fields) assert_equal(fields, 'name') assert_equal(field_names[0], 'name') def test_double_field_filter(self): filter_str = 'filter[name,id]' match = self.filter_regex.match(filter_str) fields = match.groupdict()['fields'] field_names = re.findall(self.filter_fields, fields) assert_equal(fields, 'name,id') assert_equal(field_names[0], 'name') assert_equal(field_names[1], 'id') def test_multiple_field_filter(self): filter_str = 'filter[name,id,another,field,here]' match = self.filter_regex.match(filter_str) fields = match.groupdict()['fields'] field_names = re.findall(self.filter_fields, fields) assert_equal(fields, 'name,id,another,field,here') assert_equals(len(field_names), 5) def test_single_field_filter_end_comma(self): filter_str = 'filter[name,]' match = self.filter_regex.match(filter_str) assert_false(match) def test_multiple_field_filter_end_comma(self): filter_str = 'filter[name,id,]' match = self.filter_regex.match(filter_str) assert_false(match) def test_multiple_field_filter_with_spaces(self): filter_str = 'filter[name, id]' match = self.filter_regex.match(filter_str) fields = match.groupdict()['fields'] field_names = re.findall(self.filter_fields, fields) assert_equal(fields, 'name, id') assert_equal(field_names[0], 'name') assert_equal(field_names[1], 'id') def test_multiple_field_filter_with_blank_field(self): filter_str = 'filter[name, , id]' match = self.filter_regex.match(filter_str) assert_false(match) def test_multiple_field_filter_non_match(self): filter_str = 'filter[name; id]' match = self.filter_regex.match(filter_str) assert_false(match) def test_single_field_filter_non_match(self): filter_str = 'fitler[name]' match = self.filter_regex.match(filter_str) assert_false(match) def test_single_field_non_alphanumeric_character(self): filter_str = 'fitler[<name>]' match = self.filter_regex.match(filter_str) assert_false(match)
import mock from nose.tools import * # flake8: noqa import pytest from addons.github.models import GithubFile from api.base.settings.defaults import API_BASE from api_tests import utils as test_utils from api_tests.preprints.filters.test_filters import PreprintsListFilteringMixin from api_tests.preprints.views.test_preprint_list_mixin import ( PreprintIsPublishedListMixin, PreprintListMatchesPreprintDetailMixin, PreprintIsValidListMixin, ) from api_tests.reviews.mixins.filter_mixins import ReviewableFilterMixin from framework.auth.core import Auth from osf.models import PreprintService, Node from osf_tests.factories import ( ProjectFactory, PreprintFactory, AuthUserFactory, SubjectFactory, PreprintProviderFactory, ) from tests.base import ApiTestCase, capture_signals from website.project import signals as project_signals from website.util import permissions from reviews.workflow import States def build_preprint_create_payload(node_id=None, provider_id=None, file_id=None, attrs=None): if not attrs: attrs = {} payload = { "data": { "attributes": attrs, "relationships": {}, "type": "preprints" } } if node_id: payload['data']['relationships']["node"] = { "data": { "type": "node", "id": node_id } } if provider_id: payload['data']['relationships']["provider"] = { "data": { "type": "provider", "id": provider_id } } if file_id: payload['data']['relationships']["primary_file"] = { "data": { "type": "primary_file", "id": file_id } } return payload def build_preprint_create_payload_without_node(provider_id=None, file_id=None, attrs=None): attrs = attrs or {} return build_preprint_create_payload(node_id=None, provider_id=provider_id, file_id=file_id, attrs=attrs) @pytest.mark.django_db class TestPreprintCreateWithoutNode: @pytest.fixture() def user_one(self): return AuthUserFactory() @pytest.fixture() def subject(self): return SubjectFactory() @pytest.fixture() def provider(self): return PreprintProviderFactory() @pytest.fixture() def url(self): return '/{}preprints/'.format(API_BASE) @pytest.fixture() def preprint_payload(self, provider): return { 'data': { 'type': 'preprints', 'attributes': { 'title': 'Greatest Wrestlemania Moment Vol IX', 'description': 'Crush VS Doink the Clown in an epic battle during WrestleMania IX', 'category': 'data', 'public': False, }, "relationships": { "provider": { "data": { "id": provider._id, "type": "providers" } } } } } def test_create_preprint_logged_in(self, app, user_one, url, preprint_payload): res = app.post_json_api(url, preprint_payload, auth=user_one.auth, expect_errors=True) assert res.status_code == 201 assert res.json['data']['attributes']['title'] == preprint_payload['data']['attributes']['title'] assert res.json['data']['attributes']['description'] == preprint_payload['data']['attributes']['description'] assert res.content_type == 'application/vnd.api+json' def test_create_preprint_creates_a_node(self, app, user_one, provider, url, preprint_payload): res = app.post_json_api(url, preprint_payload, auth=user_one.auth, expect_errors=True) assert res.status_code == 201 assert Node.objects.filter(preprints__guids___id=res.json['data']['id']).exists() class TestPreprintList(ApiTestCase): def setUp(self): super(TestPreprintList, self).setUp() self.user = AuthUserFactory() self.preprint = PreprintFactory(creator=self.user) self.url = '/{}preprints/'.format(API_BASE) self.project = ProjectFactory(creator=self.user) def test_return_preprints_logged_out(self): res = self.app.get(self.url) assert_equal(len(res.json['data']), 1) assert_equal(res.status_code, 200) assert_equal(res.status_code, 200) assert_equal(res.content_type, 'application/vnd.api+json') def test_exclude_nodes_from_preprints_endpoint(self): res = self.app.get(self.url, auth=self.user.auth) ids = [each['id'] for each in res.json['data']] assert_in(self.preprint._id, ids) assert_not_in(self.project._id, ids) class TestPreprintsListFiltering(PreprintsListFilteringMixin): @pytest.fixture() def user(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory(name='Sockarxiv') @pytest.fixture() def provider_two(self): return PreprintProviderFactory(name='Piratearxiv') @pytest.fixture() def provider_three(self, provider_one): return provider_one @pytest.fixture() def project_one(self, user): return ProjectFactory(creator=user) @pytest.fixture() def project_two(self, user): return ProjectFactory(creator=user) @pytest.fixture() def project_three(self, user): return ProjectFactory(creator=user) @pytest.fixture() def url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @mock.patch('website.identifiers.client.EzidClient.change_status_identifier') def test_provider_filter_equals_returns_one(self, mock_change_identifier, app, user, provider_two, preprint_two, provider_url): expected = [preprint_two._id] res = app.get('{}{}'.format(provider_url, provider_two._id), auth=user.auth) actual = [preprint['id'] for preprint in res.json['data']] assert expected == actual class TestPreprintListFilteringByReviewableFields(ReviewableFilterMixin): @pytest.fixture() def url(self): return '/{}preprints/'.format(API_BASE) @pytest.fixture() def expected_reviewables(self, user): preprints = [ PreprintFactory(is_published=False, project=ProjectFactory(is_public=True)), PreprintFactory(is_published=False, project=ProjectFactory(is_public=True)), PreprintFactory(is_published=False, project=ProjectFactory(is_public=True)), ] preprints[0].reviews_submit(user) preprints[0].reviews_accept(user, 'comment') preprints[1].reviews_submit(user) preprints[1].reviews_reject(user, 'comment') preprints[2].reviews_submit(user) return preprints @pytest.fixture def user(self): return AuthUserFactory() class TestPreprintCreate(ApiTestCase): def setUp(self): super(TestPreprintCreate, self).setUp() self.user = AuthUserFactory() self.other_user = AuthUserFactory() self.private_project = ProjectFactory(creator=self.user) self.public_project = ProjectFactory(creator=self.user, is_public=True) self.public_project.add_contributor(self.other_user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) self.subject = SubjectFactory() self.provider = PreprintProviderFactory() self.user_two = AuthUserFactory() self.file_one_public_project = test_utils.create_test_file(self.public_project, self.user, 'millionsofdollars.pdf') self.file_one_private_project = test_utils.create_test_file(self.private_project, self.user, 'woowoowoo.pdf') self.url = '/{}preprints/'.format(API_BASE) def test_create_preprint_from_public_project(self): public_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_public_project._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth) assert_equal(res.status_code, 201) @mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si') def test_create_preprint_from_private_project(self, mock_create_identifiers): private_project_payload = build_preprint_create_payload(self.private_project._id, self.provider._id, self.file_one_private_project._id, attrs={ 'subjects': [[SubjectFactory()._id]], 'is_published': True }) res = self.app.post_json_api(self.url, private_project_payload, auth=self.user.auth) self.private_project.reload() assert_equal(res.status_code, 201) assert_true(self.private_project.is_public) def test_non_authorized_user(self): public_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_public_project._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_read_write_user_not_admin(self): assert_in(self.other_user, self.public_project.contributors) public_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_public_project._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.other_user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_file_is_not_in_node(self): wrong_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_private_project._id) res = self.app.post_json_api(self.url, wrong_project_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This file is not a valid primary file for this preprint.') def test_already_a_preprint_with_conflicting_provider(self): preprint = PreprintFactory(creator=self.user) file_one_preprint = test_utils.create_test_file(preprint.node, self.user, 'openupthatwindow.pdf') already_preprint_payload = build_preprint_create_payload(preprint.node._id, preprint.provider._id, file_one_preprint._id) res = self.app.post_json_api(self.url, already_preprint_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 409) assert_in('Only one preprint per provider can be submitted for a node.', res.json['errors'][0]['detail']) def test_read_write_user_already_a_preprint_with_conflicting_provider(self): assert_in(self.other_user, self.public_project.contributors) preprint = PreprintFactory(creator=self.user) preprint.node.add_contributor(self.other_user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) file_one_preprint = test_utils.create_test_file(preprint.node, self.user, 'openupthatwindow.pdf') already_preprint_payload = build_preprint_create_payload(preprint.node._id, self.provider._id, file_one_preprint._id) res = self.app.post_json_api(self.url, already_preprint_payload, auth=self.other_user.auth, expect_errors=True) assert_equal(res.status_code, 403) @mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si') def test_publish_preprint_fails_with_no_primary_file(self, mock_get_identifiers): no_file_payload = build_preprint_create_payload( node_id=self.public_project._id, provider_id=self.provider._id, file_id=None, attrs= { 'is_published': True, 'subjects': [[SubjectFactory()._id]], } ) res = self.app.post_json_api(self.url, no_file_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'A valid primary_file must be set before publishing a preprint.') @mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si') def test_publish_preprint_fails_with_invalid_primary_file(self, mock_get_identifiers): no_file_payload = build_preprint_create_payload( node_id=self.public_project._id, provider_id=self.provider._id, file_id='totallynotanid', attrs= { 'is_published': True, 'subjects': [[SubjectFactory()._id]], } ) res = self.app.post_json_api(self.url, no_file_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'A valid primary_file must be set before publishing a preprint.') def test_no_provider_given(self): no_providers_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_public_project._id) del no_providers_payload['data']['relationships']['provider'] res = self.app.post_json_api(self.url, no_providers_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'You must specify a valid provider to create a preprint.') def test_invalid_provider_given(self): wrong_provider_payload = build_preprint_create_payload(self.public_project._id, 'jobbers', self.file_one_public_project._id) res = self.app.post_json_api(self.url, wrong_provider_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'You must specify a valid provider to create a preprint.') def test_request_id_does_not_match_request_url_id(self): public_project_payload = build_preprint_create_payload(self.private_project._id, self.provider._id, self.file_one_public_project._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This file is not a valid primary file for this preprint.') def test_file_not_osfstorage(self): github_file = self.file_one_public_project github_file.recast(GithubFile._typedmodels_type) github_file.save() public_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, github_file._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'This file is not a valid primary file for this preprint.') def test_preprint_contributor_signal_not_sent_on_creation(self): with capture_signals() as mock_signals: public_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_public_project._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth) assert_equal(res.status_code, 201) assert_not_in(project_signals.contributor_added, mock_signals.signals_sent()) def test_create_preprint_with_deleted_node_should_fail(self): self.public_project.is_deleted = True self.public_project.save() public_project_payload = build_preprint_create_payload(self.public_project._id, self.provider._id, self.file_one_public_project._id) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['errors'][0]['detail'], 'Cannot create a preprint from a deleted node.') @mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si') def test_create_preprint_adds_log_if_published(self, mock_get_identifiers): public_project_payload = build_preprint_create_payload( self.public_project._id, self.provider._id, self.file_one_public_project._id, { 'is_published': True, 'subjects': [[SubjectFactory()._id]], } ) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth) assert_equal(res.status_code, 201) preprint_id = res.json['data']['id'] preprint = PreprintService.load(preprint_id) log = preprint.node.logs.latest() assert_equal(log.action, 'preprint_initiated') assert_equal(log.params.get('preprint'), preprint_id) @mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si') @mock.patch('website.preprints.tasks.on_preprint_updated.si') def test_create_preprint_from_project_published_hits_update(self, mock_on_preprint_updated, mock_get_identifiers): private_project_payload = build_preprint_create_payload(self.private_project._id, self.provider._id, self.file_one_private_project._id, attrs={ 'subjects': [[SubjectFactory()._id]], 'is_published': True }) res = self.app.post_json_api(self.url, private_project_payload, auth=self.user.auth) assert mock_on_preprint_updated.called @mock.patch('website.preprints.tasks.on_preprint_updated.si') def test_create_preprint_from_project_unpublished_does_not_hit_update(self, mock_on_preprint_updated): private_project_payload = build_preprint_create_payload(self.private_project._id, self.provider._id, self.file_one_private_project._id, attrs={ 'subjects': [[SubjectFactory()._id]], 'is_published': False }) res = self.app.post_json_api(self.url, private_project_payload, auth=self.user.auth) assert not mock_on_preprint_updated.called @mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si') @mock.patch('website.preprints.tasks.on_preprint_updated.si') def test_setting_is_published_with_moderated_provider_fails(self, mock_get_identifiers, mock_on_preprint_updated): self.provider.reviews_workflow = 'pre-moderation' self.provider.save() public_project_payload = build_preprint_create_payload( self.public_project._id, self.provider._id, self.file_one_public_project._id, { 'is_published': True, 'subjects': [[SubjectFactory()._id]], } ) res = self.app.post_json_api(self.url, public_project_payload, auth=self.user.auth, expect_errors=True) assert res.status_code == 409 assert not mock_get_identifiers.called assert not mock_on_preprint_updated.called class TestPreprintIsPublishedList(PreprintIsPublishedListMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory() @pytest.fixture() def provider_two(self, provider_one): return provider_one @pytest.fixture() def project_published(self, user_admin_contrib): return ProjectFactory(creator=user_admin_contrib, is_public=True) @pytest.fixture() def project_public(self, user_admin_contrib, user_write_contrib): project_public = ProjectFactory(creator=user_admin_contrib, is_public=True) project_public.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @pytest.fixture() def preprint_unpublished(self, user_admin_contrib, provider_one, project_public, subject): return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_public, is_published=False) def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, url): res = app.get(url, auth=user_admin_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] def test_unpublished_invisible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, url): res = app.get(url, auth=user_write_contrib.auth) assert len(res.json['data']) == 1 assert preprint_unpublished._id not in [d['id'] for d in res.json['data']] def test_filter_published_false_write_contrib(self, app, user_write_contrib, preprint_unpublished, url): res = app.get('{}filter[is_published]=false'.format(url), auth=user_write_contrib.auth) assert len(res.json['data']) == 0 class TestReviewsPendingPreprintIsPublishedList(PreprintIsPublishedListMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory(reviews_workflow='pre-moderation') @pytest.fixture() def provider_two(self, provider_one): return provider_one @pytest.fixture() def project_public(self, user_admin_contrib, user_write_contrib): project_public = ProjectFactory(creator=user_admin_contrib, is_public=True) project_public.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def project_published(self, user_admin_contrib): return ProjectFactory(creator=user_admin_contrib, is_public=True) @pytest.fixture() def url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @pytest.fixture() def preprint_unpublished(self, user_admin_contrib, provider_one, project_public, subject): return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_public, is_published=False, reviews_state=States.PENDING.value) def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, url): res = app.get(url, auth=user_admin_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] def test_unpublished_visible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, url): res = app.get(url, auth=user_write_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] def test_filter_published_false_write_contrib(self, app, user_write_contrib, preprint_unpublished, url): res = app.get('{}filter[is_published]=false'.format(url), auth=user_write_contrib.auth) assert len(res.json['data']) == 1 class TestReviewsInitialPreprintIsPublishedList(PreprintIsPublishedListMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory(reviews_workflow='pre-moderation') @pytest.fixture() def provider_two(self, provider_one): return provider_one @pytest.fixture() def project_public(self, user_admin_contrib, user_write_contrib): project_public = ProjectFactory(creator=user_admin_contrib, is_public=True) project_public.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def project_published(self, user_admin_contrib): return ProjectFactory(creator=user_admin_contrib, is_public=True) @pytest.fixture() def url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @pytest.fixture() def preprint_unpublished(self, user_admin_contrib, provider_one, project_public, subject): return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_public, is_published=False, reviews_state=States.INITIAL.value) def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, url): res = app.get(url, auth=user_admin_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] def test_unpublished_invisible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, url): res = app.get(url, auth=user_write_contrib.auth) assert len(res.json['data']) == 1 assert preprint_unpublished._id not in [d['id'] for d in res.json['data']] def test_filter_published_false_write_contrib(self, app, user_write_contrib, preprint_unpublished, url): res = app.get('{}filter[is_published]=false'.format(url), auth=user_write_contrib.auth) assert len(res.json['data']) == 0 class TestPreprintIsPublishedListMatchesDetail(PreprintListMatchesPreprintDetailMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory() @pytest.fixture() def provider_two(self, provider_one): return provider_one @pytest.fixture() def project_published(self, user_admin_contrib): return ProjectFactory(creator=user_admin_contrib, is_public=True) @pytest.fixture() def project_public(self, user_admin_contrib, user_write_contrib): project_public = ProjectFactory(creator=user_admin_contrib, is_public=True) project_public.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def preprint_unpublished(self, user_admin_contrib, provider_one, project_public, subject): return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_public, is_published=False) @pytest.fixture() def list_url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @pytest.fixture() def detail_url(self, preprint_unpublished): return '/{}preprints/{}/'.format(API_BASE, preprint_unpublished._id) def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, list_url, detail_url): res = app.get(list_url, auth=user_admin_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] res = app.get(detail_url, auth=user_admin_contrib.auth) assert res.json['data']['id'] == preprint_unpublished._id def test_unpublished_invisible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, list_url, detail_url): res = app.get(list_url, auth=user_write_contrib.auth) assert len(res.json['data']) == 1 assert preprint_unpublished._id not in [d['id'] for d in res.json['data']] res = app.get(detail_url, auth=user_write_contrib.auth, expect_errors=True) assert res.status_code == 403 class TestReviewsInitialPreprintIsPublishedListMatchesDetail(PreprintListMatchesPreprintDetailMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory(reviews_workflow='pre-moderation') @pytest.fixture() def provider_two(self, provider_one): return provider_one @pytest.fixture() def project_published(self, user_admin_contrib): return ProjectFactory(creator=user_admin_contrib, is_public=True) @pytest.fixture() def project_public(self, user_admin_contrib, user_write_contrib): project_public = ProjectFactory(creator=user_admin_contrib, is_public=True) project_public.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def preprint_unpublished(self, user_admin_contrib, provider_one, project_public, subject): return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_public, is_published=False, reviews_state=States.INITIAL.value) @pytest.fixture() def list_url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @pytest.fixture() def detail_url(self, preprint_unpublished): return '/{}preprints/{}/'.format(API_BASE, preprint_unpublished._id) def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, list_url, detail_url): res = app.get(list_url, auth=user_admin_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] res = app.get(detail_url, auth=user_admin_contrib.auth) assert res.json['data']['id'] == preprint_unpublished._id def test_unpublished_invisible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, list_url, detail_url): res = app.get(list_url, auth=user_write_contrib.auth) assert len(res.json['data']) == 1 assert preprint_unpublished._id not in [d['id'] for d in res.json['data']] res = app.get(detail_url, auth=user_write_contrib.auth, expect_errors=True) assert res.status_code == 403 class TestReviewsPendingPreprintIsPublishedListMatchesDetail(PreprintListMatchesPreprintDetailMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def provider_one(self): return PreprintProviderFactory(reviews_workflow='pre-moderation') @pytest.fixture() def provider_two(self, provider_one): return provider_one @pytest.fixture() def project_published(self, user_admin_contrib): return ProjectFactory(creator=user_admin_contrib, is_public=True) @pytest.fixture() def project_public(self, user_admin_contrib, user_write_contrib): project_public = ProjectFactory(creator=user_admin_contrib, is_public=True) project_public.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def preprint_unpublished(self, user_admin_contrib, provider_one, project_public, subject): return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_public, is_published=False, reviews_state=States.PENDING.value) @pytest.fixture() def list_url(self): return '/{}preprints/?version=2.2&'.format(API_BASE) @pytest.fixture() def detail_url(self, preprint_unpublished): return '/{}preprints/{}/'.format(API_BASE, preprint_unpublished._id) def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, list_url, detail_url): res = app.get(list_url, auth=user_admin_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] res = app.get(detail_url, auth=user_admin_contrib.auth) assert res.json['data']['id'] == preprint_unpublished._id def test_unpublished_visible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, list_url, detail_url): res = app.get(list_url, auth=user_write_contrib.auth) assert len(res.json['data']) == 2 assert preprint_unpublished._id in [d['id'] for d in res.json['data']] res = app.get(detail_url, auth=user_write_contrib.auth, expect_errors=True) assert res.json['data']['id'] == preprint_unpublished._id class TestPreprintIsValidList(PreprintIsValidListMixin): @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def project(self, user_admin_contrib, user_write_contrib): project = ProjectFactory(creator=user_admin_contrib, is_public=True) project.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project @pytest.fixture() def provider(self): return PreprintProviderFactory() @pytest.fixture() def url(self, project): return '/{}preprints/?version=2.2&'.format(API_BASE)
crcresearch/osf.io
api_tests/preprints/views/test_preprint_list.py
api_tests/base/test_filters.py
"""Support for restoring entity states on startup.""" import asyncio import logging from datetime import timedelta, datetime from typing import Any, Dict, List, Set, Optional # noqa pylint_disable=unused-import from homeassistant.core import ( HomeAssistant, callback, State, CoreState, valid_entity_id, ) from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP import homeassistant.util.dt as dt_util from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.json import JSONEncoder from homeassistant.helpers.storage import Store # noqa pylint_disable=unused-import # mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs # mypy: no-warn-return-any DATA_RESTORE_STATE_TASK = "restore_state_task" _LOGGER = logging.getLogger(__name__) STORAGE_KEY = "core.restore_state" STORAGE_VERSION = 1 # How long between periodically saving the current states to disk STATE_DUMP_INTERVAL = timedelta(minutes=15) # How long should a saved state be preserved if the entity no longer exists STATE_EXPIRATION = timedelta(days=7) class StoredState: """Object to represent a stored state.""" def __init__(self, state: State, last_seen: datetime) -> None: """Initialize a new stored state.""" self.state = state self.last_seen = last_seen def as_dict(self) -> Dict: """Return a dict representation of the stored state.""" return {"state": self.state.as_dict(), "last_seen": self.last_seen} @classmethod def from_dict(cls, json_dict: Dict) -> "StoredState": """Initialize a stored state from a dict.""" last_seen = json_dict["last_seen"] if isinstance(last_seen, str): last_seen = dt_util.parse_datetime(last_seen) return cls(State.from_dict(json_dict["state"]), last_seen) class RestoreStateData: """Helper class for managing the helper saved data.""" @classmethod async def async_get_instance(cls, hass: HomeAssistant) -> "RestoreStateData": """Get the singleton instance of this data helper.""" task = hass.data.get(DATA_RESTORE_STATE_TASK) if task is None: async def load_instance(hass: HomeAssistant) -> "RestoreStateData": """Set up the restore state helper.""" data = cls(hass) try: stored_states = await data.store.async_load() except HomeAssistantError as exc: _LOGGER.error("Error loading last states", exc_info=exc) stored_states = None if stored_states is None: _LOGGER.debug("Not creating cache - no saved states found") data.last_states = {} else: data.last_states = { item["state"]["entity_id"]: StoredState.from_dict(item) for item in stored_states if valid_entity_id(item["state"]["entity_id"]) } _LOGGER.debug("Created cache with %s", list(data.last_states)) if hass.state == CoreState.running: data.async_setup_dump() else: hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, data.async_setup_dump ) return data task = hass.data[DATA_RESTORE_STATE_TASK] = hass.async_create_task( load_instance(hass) ) return await task def __init__(self, hass: HomeAssistant) -> None: """Initialize the restore state data class.""" self.hass = hass # type: HomeAssistant self.store = Store( hass, STORAGE_VERSION, STORAGE_KEY, encoder=JSONEncoder ) # type: Store self.last_states = {} # type: Dict[str, StoredState] self.entity_ids = set() # type: Set[str] def async_get_stored_states(self) -> List[StoredState]: """Get the set of states which should be stored. This includes the states of all registered entities, as well as the stored states from the previous run, which have not been created as entities on this run, and have not expired. """ now = dt_util.utcnow() all_states = self.hass.states.async_all() current_entity_ids = set(state.entity_id for state in all_states) # Start with the currently registered states stored_states = [ StoredState(state, now) for state in all_states if state.entity_id in self.entity_ids ] expiration_time = now - STATE_EXPIRATION for entity_id, stored_state in self.last_states.items(): # Don't save old states that have entities in the current run if entity_id in current_entity_ids: continue # Don't save old states that have expired if stored_state.last_seen < expiration_time: continue stored_states.append(stored_state) return stored_states async def async_dump_states(self) -> None: """Save the current state machine to storage.""" _LOGGER.debug("Dumping states") try: await self.store.async_save( [ stored_state.as_dict() for stored_state in self.async_get_stored_states() ] ) except HomeAssistantError as exc: _LOGGER.error("Error saving current states", exc_info=exc) @callback def async_setup_dump(self, *args: Any) -> None: """Set up the restore state listeners.""" # Dump the initial states now. This helps minimize the risk of having # old states loaded by overwritting the last states once home assistant # has started and the old states have been read. self.hass.async_create_task(self.async_dump_states()) # Dump states periodically async_track_time_interval( self.hass, lambda *_: self.hass.async_create_task(self.async_dump_states()), STATE_DUMP_INTERVAL, ) # Dump states when stopping hass self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, lambda *_: self.hass.async_create_task(self.async_dump_states()), ) @callback def async_restore_entity_added(self, entity_id: str) -> None: """Store this entity's state when hass is shutdown.""" self.entity_ids.add(entity_id) @callback def async_restore_entity_removed(self, entity_id: str) -> None: """Unregister this entity from saving state.""" # When an entity is being removed from hass, store its last state. This # allows us to support state restoration if the entity is removed, then # re-added while hass is still running. state = self.hass.states.get(entity_id) # To fully mimic all the attribute data types when loaded from storage, # we're going to serialize it to JSON and then re-load it. if state is not None: state = State.from_dict(_encode_complex(state.as_dict())) if state is not None: self.last_states[entity_id] = StoredState(state, dt_util.utcnow()) self.entity_ids.remove(entity_id) def _encode(value): """Little helper to JSON encode a value.""" try: return JSONEncoder.default(None, value) except TypeError: return value def _encode_complex(value): """Recursively encode all values with the JSONEncoder.""" if isinstance(value, dict): return {_encode(key): _encode_complex(value) for key, value in value.items()} if isinstance(value, list): return [_encode_complex(val) for val in value] new_value = _encode(value) if isinstance(new_value, type(value)): return new_value return _encode_complex(new_value) class RestoreEntity(Entity): """Mixin class for restoring previous entity state.""" async def async_internal_added_to_hass(self) -> None: """Register this entity as a restorable entity.""" assert self.hass is not None _, data = await asyncio.gather( super().async_internal_added_to_hass(), RestoreStateData.async_get_instance(self.hass), ) data.async_restore_entity_added(self.entity_id) async def async_internal_will_remove_from_hass(self) -> None: """Run when entity will be removed from hass.""" assert self.hass is not None _, data = await asyncio.gather( super().async_internal_will_remove_from_hass(), RestoreStateData.async_get_instance(self.hass), ) data.async_restore_entity_removed(self.entity_id) async def async_get_last_state(self) -> Optional[State]: """Get the entity state from the previous run.""" if self.hass is None or self.entity_id is None: # Return None if this entity isn't added to hass yet _LOGGER.warning("Cannot get last state. Entity not added to hass") return None data = await RestoreStateData.async_get_instance(self.hass) if self.entity_id not in data.last_states: return None return data.last_states[self.entity_id].state
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/helpers/restore_state.py
"""Support turning on/off motion detection on Hikvision cameras.""" import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_PORT, STATE_OFF, STATE_ON, ) from homeassistant.helpers.entity import ToggleEntity import homeassistant.helpers.config_validation as cv # This is the last working version, please test before updating _LOGGING = logging.getLogger(__name__) DEFAULT_NAME = "Hikvision Camera Motion Detection" DEFAULT_PASSWORD = "12345" DEFAULT_PORT = 80 DEFAULT_USERNAME = "admin" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string, vol.Optional(CONF_PORT): cv.port, vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up Hikvision camera.""" import hikvision.api from hikvision.error import HikvisionError, MissingParamError host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) try: hikvision_cam = hikvision.api.CreateDevice( host, port=port, username=username, password=password, is_https=False ) except MissingParamError as param_err: _LOGGING.error("Missing required param: %s", param_err) return False except HikvisionError as conn_err: _LOGGING.error("Unable to connect: %s", conn_err) return False add_entities([HikvisionMotionSwitch(name, hikvision_cam)]) class HikvisionMotionSwitch(ToggleEntity): """Representation of a switch to toggle on/off motion detection.""" def __init__(self, name, hikvision_cam): """Initialize the switch.""" self._name = name self._hikvision_cam = hikvision_cam self._state = STATE_OFF @property def should_poll(self): """Poll for status regularly.""" return True @property def name(self): """Return the name of the device if any.""" return self._name @property def state(self): """Return the state of the device if any.""" return self._state @property def is_on(self): """Return true if device is on.""" return self._state == STATE_ON def turn_on(self, **kwargs): """Turn the device on.""" _LOGGING.info("Turning on Motion Detection ") self._hikvision_cam.enable_motion_detection() def turn_off(self, **kwargs): """Turn the device off.""" _LOGGING.info("Turning off Motion Detection ") self._hikvision_cam.disable_motion_detection() def update(self): """Update Motion Detection state.""" enabled = self._hikvision_cam.is_motion_detection_enabled() _LOGGING.info("enabled: %s", enabled) self._state = STATE_ON if enabled else STATE_OFF
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/hikvisioncam/switch.py
"""Support for Neato Connected Vacuums.""" from datetime import timedelta import logging import requests import voluptuous as vol from homeassistant.components.vacuum import ( ATTR_BATTERY_ICON, ATTR_BATTERY_LEVEL, ATTR_STATUS, DOMAIN, STATE_CLEANING, STATE_DOCKED, STATE_ERROR, STATE_IDLE, STATE_PAUSED, STATE_RETURNING, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT, SUPPORT_LOCATE, SUPPORT_MAP, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_START, SUPPORT_STATE, SUPPORT_STOP, StateVacuumDevice, ) from homeassistant.const import ATTR_ENTITY_ID import homeassistant.helpers.config_validation as cv from homeassistant.helpers.service import extract_entity_ids from . import ( ACTION, ALERTS, ERRORS, MODE, NEATO_LOGIN, NEATO_MAP_DATA, NEATO_PERSISTENT_MAPS, NEATO_ROBOTS, ) _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(minutes=5) SUPPORT_NEATO = ( SUPPORT_BATTERY | SUPPORT_PAUSE | SUPPORT_RETURN_HOME | SUPPORT_STOP | SUPPORT_START | SUPPORT_CLEAN_SPOT | SUPPORT_STATE | SUPPORT_MAP | SUPPORT_LOCATE ) ATTR_CLEAN_START = "clean_start" ATTR_CLEAN_STOP = "clean_stop" ATTR_CLEAN_AREA = "clean_area" ATTR_CLEAN_BATTERY_START = "battery_level_at_clean_start" ATTR_CLEAN_BATTERY_END = "battery_level_at_clean_end" ATTR_CLEAN_SUSP_COUNT = "clean_suspension_count" ATTR_CLEAN_SUSP_TIME = "clean_suspension_time" ATTR_MODE = "mode" ATTR_NAVIGATION = "navigation" ATTR_CATEGORY = "category" ATTR_ZONE = "zone" SERVICE_NEATO_CUSTOM_CLEANING = "neato_custom_cleaning" SERVICE_NEATO_CUSTOM_CLEANING_SCHEMA = vol.Schema( { vol.Required(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(ATTR_MODE, default=2): cv.positive_int, vol.Optional(ATTR_NAVIGATION, default=1): cv.positive_int, vol.Optional(ATTR_CATEGORY, default=4): cv.positive_int, vol.Optional(ATTR_ZONE): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Neato vacuum.""" dev = [] for robot in hass.data[NEATO_ROBOTS]: dev.append(NeatoConnectedVacuum(hass, robot)) if not dev: return _LOGGER.debug("Adding vacuums %s", dev) add_entities(dev, True) def neato_custom_cleaning_service(call): """Zone cleaning service that allows user to change options.""" for robot in service_to_entities(call): if call.service == SERVICE_NEATO_CUSTOM_CLEANING: mode = call.data.get(ATTR_MODE) navigation = call.data.get(ATTR_NAVIGATION) category = call.data.get(ATTR_CATEGORY) zone = call.data.get(ATTR_ZONE) robot.neato_custom_cleaning(mode, navigation, category, zone) def service_to_entities(call): """Return the known devices that a service call mentions.""" entity_ids = extract_entity_ids(hass, call) entities = [entity for entity in dev if entity.entity_id in entity_ids] return entities hass.services.register( DOMAIN, SERVICE_NEATO_CUSTOM_CLEANING, neato_custom_cleaning_service, schema=SERVICE_NEATO_CUSTOM_CLEANING_SCHEMA, ) class NeatoConnectedVacuum(StateVacuumDevice): """Representation of a Neato Connected Vacuum.""" def __init__(self, hass, robot): """Initialize the Neato Connected Vacuum.""" self.robot = robot self.neato = hass.data[NEATO_LOGIN] self._name = "{}".format(self.robot.name) self._status_state = None self._clean_state = None self._state = None self._mapdata = hass.data[NEATO_MAP_DATA] self.clean_time_start = None self.clean_time_stop = None self.clean_area = None self.clean_battery_start = None self.clean_battery_end = None self.clean_suspension_charge_count = None self.clean_suspension_time = None self._available = False self._battery_level = None self._robot_serial = self.robot.serial self._robot_maps = hass.data[NEATO_PERSISTENT_MAPS] self._robot_boundaries = {} self._robot_has_map = self.robot.has_persistent_maps def update(self): """Update the states of Neato Vacuums.""" _LOGGER.debug("Running Neato Vacuums update") self.neato.update_robots() try: self._state = self.robot.state self._available = True except ( requests.exceptions.ConnectionError, requests.exceptions.HTTPError, ) as ex: _LOGGER.warning("Neato connection error: %s", ex) self._state = None self._available = False return _LOGGER.debug("self._state=%s", self._state) if "alert" in self._state: robot_alert = ALERTS.get(self._state["alert"]) else: robot_alert = None if self._state["state"] == 1: if self._state["details"]["isCharging"]: self._clean_state = STATE_DOCKED self._status_state = "Charging" elif ( self._state["details"]["isDocked"] and not self._state["details"]["isCharging"] ): self._clean_state = STATE_DOCKED self._status_state = "Docked" else: self._clean_state = STATE_IDLE self._status_state = "Stopped" if robot_alert is not None: self._status_state = robot_alert elif self._state["state"] == 2: if robot_alert is None: self._clean_state = STATE_CLEANING self._status_state = ( MODE.get(self._state["cleaning"]["mode"]) + " " + ACTION.get(self._state["action"]) ) else: self._status_state = robot_alert elif self._state["state"] == 3: self._clean_state = STATE_PAUSED self._status_state = "Paused" elif self._state["state"] == 4: self._clean_state = STATE_ERROR self._status_state = ERRORS.get(self._state["error"]) self._battery_level = self._state["details"]["charge"] if not self._mapdata.get(self._robot_serial, {}).get("maps", []): return self.clean_time_start = ( self._mapdata[self._robot_serial]["maps"][0]["start_at"].strip("Z") ).replace("T", " ") self.clean_time_stop = ( self._mapdata[self._robot_serial]["maps"][0]["end_at"].strip("Z") ).replace("T", " ") self.clean_area = self._mapdata[self._robot_serial]["maps"][0]["cleaned_area"] self.clean_suspension_charge_count = self._mapdata[self._robot_serial]["maps"][ 0 ]["suspended_cleaning_charging_count"] self.clean_suspension_time = self._mapdata[self._robot_serial]["maps"][0][ "time_in_suspended_cleaning" ] self.clean_battery_start = self._mapdata[self._robot_serial]["maps"][0][ "run_charge_at_start" ] self.clean_battery_end = self._mapdata[self._robot_serial]["maps"][0][ "run_charge_at_end" ] if self._robot_has_map: if self._state["availableServices"]["maps"] != "basic-1": if self._robot_maps[self._robot_serial]: allmaps = self._robot_maps[self._robot_serial] for maps in allmaps: self._robot_boundaries = self.robot.get_map_boundaries( maps["id"] ).json() @property def name(self): """Return the name of the device.""" return self._name @property def supported_features(self): """Flag vacuum cleaner robot features that are supported.""" return SUPPORT_NEATO @property def battery_level(self): """Return the battery level of the vacuum cleaner.""" return self._battery_level @property def available(self): """Return if the robot is available.""" return self._available @property def state(self): """Return the status of the vacuum cleaner.""" return self._clean_state @property def unique_id(self): """Return a unique ID.""" return self._robot_serial @property def device_state_attributes(self): """Return the state attributes of the vacuum cleaner.""" data = {} if self._status_state is not None: data[ATTR_STATUS] = self._status_state if self.battery_level is not None: data[ATTR_BATTERY_LEVEL] = self.battery_level data[ATTR_BATTERY_ICON] = self.battery_icon if self.clean_time_start is not None: data[ATTR_CLEAN_START] = self.clean_time_start if self.clean_time_stop is not None: data[ATTR_CLEAN_STOP] = self.clean_time_stop if self.clean_area is not None: data[ATTR_CLEAN_AREA] = self.clean_area if self.clean_suspension_charge_count is not None: data[ATTR_CLEAN_SUSP_COUNT] = self.clean_suspension_charge_count if self.clean_suspension_time is not None: data[ATTR_CLEAN_SUSP_TIME] = self.clean_suspension_time if self.clean_battery_start is not None: data[ATTR_CLEAN_BATTERY_START] = self.clean_battery_start if self.clean_battery_end is not None: data[ATTR_CLEAN_BATTERY_END] = self.clean_battery_end return data def start(self): """Start cleaning or resume cleaning.""" if self._state["state"] == 1: self.robot.start_cleaning() elif self._state["state"] == 3: self.robot.resume_cleaning() def pause(self): """Pause the vacuum.""" self.robot.pause_cleaning() def return_to_base(self, **kwargs): """Set the vacuum cleaner to return to the dock.""" if self._clean_state == STATE_CLEANING: self.robot.pause_cleaning() self._clean_state = STATE_RETURNING self.robot.send_to_base() def stop(self, **kwargs): """Stop the vacuum cleaner.""" self.robot.stop_cleaning() def locate(self, **kwargs): """Locate the robot by making it emit a sound.""" self.robot.locate() def clean_spot(self, **kwargs): """Run a spot cleaning starting from the base.""" self.robot.start_spot_cleaning() def neato_custom_cleaning(self, mode, navigation, category, zone=None, **kwargs): """Zone cleaning service call.""" boundary_id = None if zone is not None: for boundary in self._robot_boundaries["data"]["boundaries"]: if zone in boundary["name"]: boundary_id = boundary["id"] if boundary_id is None: _LOGGER.error( "Zone '%s' was not found for the robot '%s'", zone, self._name ) return self._clean_state = STATE_CLEANING self.robot.start_cleaning(mode, navigation, category, boundary_id)
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/neato/vacuum.py
"""Support for NuHeat thermostats.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE, PRESET_NONE, ) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle from . import DOMAIN as NUHEAT_DOMAIN _LOGGER = logging.getLogger(__name__) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5) # Hold modes MODE_AUTO = HVAC_MODE_AUTO # Run device schedule MODE_HOLD_TEMPERATURE = "temperature" MODE_TEMPORARY_HOLD = "temporary_temperature" OPERATION_LIST = [HVAC_MODE_HEAT, HVAC_MODE_OFF] SCHEDULE_HOLD = 3 SCHEDULE_RUN = 1 SCHEDULE_TEMPORARY_HOLD = 2 SERVICE_RESUME_PROGRAM = "resume_program" RESUME_PROGRAM_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids}) SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the NuHeat thermostat(s).""" if discovery_info is None: return temperature_unit = hass.config.units.temperature_unit api, serial_numbers = hass.data[NUHEAT_DOMAIN] thermostats = [ NuHeatThermostat(api, serial_number, temperature_unit) for serial_number in serial_numbers ] add_entities(thermostats, True) def resume_program_set_service(service): """Resume the program on the target thermostats.""" entity_id = service.data.get(ATTR_ENTITY_ID) if entity_id: target_thermostats = [ device for device in thermostats if device.entity_id in entity_id ] else: target_thermostats = thermostats for thermostat in target_thermostats: thermostat.resume_program() thermostat.schedule_update_ha_state(True) hass.services.register( NUHEAT_DOMAIN, SERVICE_RESUME_PROGRAM, resume_program_set_service, schema=RESUME_PROGRAM_SCHEMA, ) class NuHeatThermostat(ClimateDevice): """Representation of a NuHeat Thermostat.""" def __init__(self, api, serial_number, temperature_unit): """Initialize the thermostat.""" self._thermostat = api.get_thermostat(serial_number) self._temperature_unit = temperature_unit self._force_update = False @property def name(self): """Return the name of the thermostat.""" return self._thermostat.room @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS @property def temperature_unit(self): """Return the unit of measurement.""" if self._temperature_unit == "C": return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def current_temperature(self): """Return the current temperature.""" if self._temperature_unit == "C": return self._thermostat.celsius return self._thermostat.fahrenheit @property def hvac_mode(self): """Return current operation. ie. heat, idle.""" if self._thermostat.heating: return HVAC_MODE_HEAT return HVAC_MODE_OFF @property def min_temp(self): """Return the minimum supported temperature for the thermostat.""" if self._temperature_unit == "C": return self._thermostat.min_celsius return self._thermostat.min_fahrenheit @property def max_temp(self): """Return the maximum supported temperature for the thermostat.""" if self._temperature_unit == "C": return self._thermostat.max_celsius return self._thermostat.max_fahrenheit @property def target_temperature(self): """Return the currently programmed temperature.""" if self._temperature_unit == "C": return self._thermostat.target_celsius return self._thermostat.target_fahrenheit @property def preset_mode(self): """Return current preset mode.""" schedule_mode = self._thermostat.schedule_mode if schedule_mode == SCHEDULE_RUN: return MODE_AUTO if schedule_mode == SCHEDULE_HOLD: return MODE_HOLD_TEMPERATURE if schedule_mode == SCHEDULE_TEMPORARY_HOLD: return MODE_TEMPORARY_HOLD return MODE_AUTO @property def preset_modes(self): """Return available preset modes.""" return [PRESET_NONE, MODE_HOLD_TEMPERATURE, MODE_TEMPORARY_HOLD] @property def hvac_modes(self): """Return list of possible operation modes.""" return OPERATION_LIST def resume_program(self): """Resume the thermostat's programmed schedule.""" self._thermostat.resume_schedule() self._force_update = True def set_preset_mode(self, preset_mode): """Update the hold mode of the thermostat.""" if preset_mode == PRESET_NONE: schedule_mode = SCHEDULE_RUN elif preset_mode == MODE_HOLD_TEMPERATURE: schedule_mode = SCHEDULE_HOLD elif preset_mode == MODE_TEMPORARY_HOLD: schedule_mode = SCHEDULE_TEMPORARY_HOLD self._thermostat.schedule_mode = schedule_mode self._force_update = True def set_temperature(self, **kwargs): """Set a new target temperature.""" temperature = kwargs.get(ATTR_TEMPERATURE) if self._temperature_unit == "C": self._thermostat.target_celsius = temperature else: self._thermostat.target_fahrenheit = temperature _LOGGER.debug( "Setting NuHeat thermostat temperature to %s %s", temperature, self.temperature_unit, ) self._force_update = True def update(self): """Get the latest state from the thermostat.""" if self._force_update: self._throttled_update(no_throttle=True) self._force_update = False else: self._throttled_update() @Throttle(MIN_TIME_BETWEEN_UPDATES) def _throttled_update(self, **kwargs): """Get the latest state from the thermostat with a throttle.""" self._thermostat.get_data()
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/nuheat/climate.py
"""Support for Homekit covers.""" import logging from homeassistant.components.cover import ( ATTR_POSITION, ATTR_TILT_POSITION, SUPPORT_CLOSE, SUPPORT_CLOSE_TILT, SUPPORT_OPEN, SUPPORT_OPEN_TILT, SUPPORT_SET_POSITION, SUPPORT_STOP, SUPPORT_SET_TILT_POSITION, CoverDevice, ) from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING from . import KNOWN_DEVICES, HomeKitEntity STATE_STOPPED = "stopped" _LOGGER = logging.getLogger(__name__) CURRENT_GARAGE_STATE_MAP = { 0: STATE_OPEN, 1: STATE_CLOSED, 2: STATE_OPENING, 3: STATE_CLOSING, 4: STATE_STOPPED, } TARGET_GARAGE_STATE_MAP = {STATE_OPEN: 0, STATE_CLOSED: 1, STATE_STOPPED: 2} CURRENT_WINDOW_STATE_MAP = {0: STATE_OPENING, 1: STATE_CLOSING, 2: STATE_STOPPED} async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Legacy set up platform.""" pass async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Homekit covers.""" hkid = config_entry.data["AccessoryPairingID"] conn = hass.data[KNOWN_DEVICES][hkid] def async_add_service(aid, service): info = {"aid": aid, "iid": service["iid"]} if service["stype"] == "garage-door-opener": async_add_entities([HomeKitGarageDoorCover(conn, info)], True) return True if service["stype"] in ("window-covering", "window"): async_add_entities([HomeKitWindowCover(conn, info)], True) return True return False conn.add_listener(async_add_service) class HomeKitGarageDoorCover(HomeKitEntity, CoverDevice): """Representation of a HomeKit Garage Door.""" def __init__(self, accessory, discovery_info): """Initialise the Cover.""" super().__init__(accessory, discovery_info) self._state = None self._obstruction_detected = None self.lock_state = None @property def device_class(self): """Define this cover as a garage door.""" return "garage" def get_characteristic_types(self): """Define the homekit characteristics the entity cares about.""" # pylint: disable=import-error from homekit.model.characteristics import CharacteristicsTypes return [ CharacteristicsTypes.DOOR_STATE_CURRENT, CharacteristicsTypes.DOOR_STATE_TARGET, CharacteristicsTypes.OBSTRUCTION_DETECTED, ] def _update_door_state_current(self, value): self._state = CURRENT_GARAGE_STATE_MAP[value] def _update_obstruction_detected(self, value): self._obstruction_detected = value @property def supported_features(self): """Flag supported features.""" return SUPPORT_OPEN | SUPPORT_CLOSE @property def is_closed(self): """Return true if cover is closed, else False.""" return self._state == STATE_CLOSED @property def is_closing(self): """Return if the cover is closing or not.""" return self._state == STATE_CLOSING @property def is_opening(self): """Return if the cover is opening or not.""" return self._state == STATE_OPENING async def async_open_cover(self, **kwargs): """Send open command.""" await self.set_door_state(STATE_OPEN) async def async_close_cover(self, **kwargs): """Send close command.""" await self.set_door_state(STATE_CLOSED) async def set_door_state(self, state): """Send state command.""" characteristics = [ { "aid": self._aid, "iid": self._chars["door-state.target"], "value": TARGET_GARAGE_STATE_MAP[state], } ] await self._accessory.put_characteristics(characteristics) @property def device_state_attributes(self): """Return the optional state attributes.""" if self._obstruction_detected is None: return None return {"obstruction-detected": self._obstruction_detected} class HomeKitWindowCover(HomeKitEntity, CoverDevice): """Representation of a HomeKit Window or Window Covering.""" def __init__(self, accessory, discovery_info): """Initialise the Cover.""" super().__init__(accessory, discovery_info) self._state = None self._position = None self._tilt_position = None self._obstruction_detected = None self.lock_state = None self._features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION def get_characteristic_types(self): """Define the homekit characteristics the entity cares about.""" # pylint: disable=import-error from homekit.model.characteristics import CharacteristicsTypes return [ CharacteristicsTypes.POSITION_STATE, CharacteristicsTypes.POSITION_CURRENT, CharacteristicsTypes.POSITION_TARGET, CharacteristicsTypes.POSITION_HOLD, CharacteristicsTypes.VERTICAL_TILT_CURRENT, CharacteristicsTypes.VERTICAL_TILT_TARGET, CharacteristicsTypes.HORIZONTAL_TILT_CURRENT, CharacteristicsTypes.HORIZONTAL_TILT_TARGET, CharacteristicsTypes.OBSTRUCTION_DETECTED, ] def _setup_position_hold(self, char): self._features |= SUPPORT_STOP def _setup_vertical_tilt_current(self, char): self._features |= ( SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION ) def _setup_horizontal_tilt_current(self, char): self._features |= ( SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION ) def _update_position_state(self, value): self._state = CURRENT_WINDOW_STATE_MAP[value] def _update_position_current(self, value): self._position = value def _update_vertical_tilt_current(self, value): self._tilt_position = value def _update_horizontal_tilt_current(self, value): self._tilt_position = value def _update_obstruction_detected(self, value): self._obstruction_detected = value @property def supported_features(self): """Flag supported features.""" return self._features @property def current_cover_position(self): """Return the current position of cover.""" return self._position @property def is_closed(self): """Return true if cover is closed, else False.""" return self._position == 0 @property def is_closing(self): """Return if the cover is closing or not.""" return self._state == STATE_CLOSING @property def is_opening(self): """Return if the cover is opening or not.""" return self._state == STATE_OPENING async def async_stop_cover(self, **kwargs): """Send hold command.""" characteristics = [ {"aid": self._aid, "iid": self._chars["position.hold"], "value": 1} ] await self._accessory.put_characteristics(characteristics) async def async_open_cover(self, **kwargs): """Send open command.""" await self.async_set_cover_position(position=100) async def async_close_cover(self, **kwargs): """Send close command.""" await self.async_set_cover_position(position=0) async def async_set_cover_position(self, **kwargs): """Send position command.""" position = kwargs[ATTR_POSITION] characteristics = [ {"aid": self._aid, "iid": self._chars["position.target"], "value": position} ] await self._accessory.put_characteristics(characteristics) @property def current_cover_tilt_position(self): """Return current position of cover tilt.""" return self._tilt_position async def async_set_cover_tilt_position(self, **kwargs): """Move the cover tilt to a specific position.""" tilt_position = kwargs[ATTR_TILT_POSITION] if "vertical-tilt.target" in self._chars: characteristics = [ { "aid": self._aid, "iid": self._chars["vertical-tilt.target"], "value": tilt_position, } ] await self._accessory.put_characteristics(characteristics) elif "horizontal-tilt.target" in self._chars: characteristics = [ { "aid": self._aid, "iid": self._chars["horizontal-tilt.target"], "value": tilt_position, } ] await self._accessory.put_characteristics(characteristics) @property def device_state_attributes(self): """Return the optional state attributes.""" state_attributes = {} if self._obstruction_detected is not None: state_attributes["obstruction-detected"] = self._obstruction_detected return state_attributes
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/homekit_controller/cover.py
"""Support for Telegram bot using polling.""" import logging from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP from homeassistant.core import callback from . import CONF_ALLOWED_CHAT_IDS, BaseTelegramBotEntity, initialize_bot _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config): """Set up the Telegram polling platform.""" bot = initialize_bot(config) pol = TelegramPoll(bot, hass, config[CONF_ALLOWED_CHAT_IDS]) @callback def _start_bot(_event): """Start the bot.""" pol.start_polling() @callback def _stop_bot(_event): """Stop the bot.""" pol.stop_polling() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _start_bot) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_bot) return True def process_error(bot, update, error): """Telegram bot error handler.""" from telegram.error import TelegramError, TimedOut, NetworkError, RetryAfter try: raise error except (TimedOut, NetworkError, RetryAfter): # Long polling timeout or connection problem. Nothing serious. pass except TelegramError: _LOGGER.error('Update "%s" caused error "%s"', update, error) def message_handler(handler): """Create messages handler.""" from telegram import Update from telegram.ext import Handler class MessageHandler(Handler): """Telegram bot message handler.""" def __init__(self): """Initialize the messages handler instance.""" super().__init__(handler) def check_update(self, update): # pylint: disable=no-self-use """Check is update valid.""" return isinstance(update, Update) def handle_update(self, update, dispatcher): """Handle update.""" optional_args = self.collect_optional_args(dispatcher, update) return self.callback(dispatcher.bot, update, **optional_args) return MessageHandler() class TelegramPoll(BaseTelegramBotEntity): """Asyncio telegram incoming message handler.""" def __init__(self, bot, hass, allowed_chat_ids): """Initialize the polling instance.""" from telegram.ext import Updater BaseTelegramBotEntity.__init__(self, hass, allowed_chat_ids) self.updater = Updater(bot=bot, workers=4) self.dispatcher = self.updater.dispatcher self.dispatcher.add_handler(message_handler(self.process_update)) self.dispatcher.add_error_handler(process_error) def start_polling(self): """Start the polling task.""" self.updater.start_polling() def stop_polling(self): """Stop the polling task.""" self.updater.stop() def process_update(self, bot, update): """Process incoming message.""" self.process_message(update.to_dict())
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/telegram_bot/polling.py
"""Constants for Google Hangouts Component.""" import logging import voluptuous as vol from homeassistant.components.notify import ATTR_DATA, ATTR_MESSAGE, ATTR_TARGET import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(".") DOMAIN = "hangouts" CONF_2FA = "2fa" CONF_AUTH_CODE = "authorization_code" CONF_REFRESH_TOKEN = "refresh_token" CONF_BOT = "bot" CONF_CONVERSATIONS = "conversations" CONF_DEFAULT_CONVERSATIONS = "default_conversations" CONF_ERROR_SUPPRESSED_CONVERSATIONS = "error_suppressed_conversations" CONF_INTENTS = "intents" CONF_INTENT_TYPE = "intent_type" CONF_SENTENCES = "sentences" CONF_MATCHERS = "matchers" INTENT_HELP = "HangoutsHelp" EVENT_HANGOUTS_CONNECTED = "hangouts_connected" EVENT_HANGOUTS_DISCONNECTED = "hangouts_disconnected" EVENT_HANGOUTS_USERS_CHANGED = "hangouts_users_changed" EVENT_HANGOUTS_CONVERSATIONS_CHANGED = "hangouts_conversations_changed" EVENT_HANGOUTS_CONVERSATIONS_RESOLVED = "hangouts_conversations_resolved" EVENT_HANGOUTS_MESSAGE_RECEIVED = "hangouts_message_received" CONF_CONVERSATION_ID = "id" CONF_CONVERSATION_NAME = "name" SERVICE_SEND_MESSAGE = "send_message" SERVICE_UPDATE = "update" SERVICE_RECONNECT = "reconnect" TARGETS_SCHEMA = vol.All( vol.Schema( { vol.Exclusive(CONF_CONVERSATION_ID, "id or name"): cv.string, vol.Exclusive(CONF_CONVERSATION_NAME, "id or name"): cv.string, } ), cv.has_at_least_one_key(CONF_CONVERSATION_ID, CONF_CONVERSATION_NAME), ) MESSAGE_SEGMENT_SCHEMA = vol.Schema( { vol.Required("text"): cv.string, vol.Optional("is_bold"): cv.boolean, vol.Optional("is_italic"): cv.boolean, vol.Optional("is_strikethrough"): cv.boolean, vol.Optional("is_underline"): cv.boolean, vol.Optional("parse_str"): cv.boolean, vol.Optional("link_target"): cv.string, } ) MESSAGE_DATA_SCHEMA = vol.Schema( {vol.Optional("image_file"): cv.string, vol.Optional("image_url"): cv.string} ) MESSAGE_SCHEMA = vol.Schema( { vol.Required(ATTR_TARGET): [TARGETS_SCHEMA], vol.Required(ATTR_MESSAGE): [MESSAGE_SEGMENT_SCHEMA], vol.Optional(ATTR_DATA): MESSAGE_DATA_SCHEMA, } ) INTENT_SCHEMA = vol.All( # Basic Schema vol.Schema( { vol.Required(CONF_SENTENCES): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_CONVERSATIONS): [TARGETS_SCHEMA], } ) )
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/hangouts/const.py
"""SMA Solar Webconnect interface.""" import asyncio from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_HOST, CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_SSL, CONF_VERIFY_SSL, EVENT_HOMEASSISTANT_STOP, CONF_PATH, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval _LOGGER = logging.getLogger(__name__) CONF_CUSTOM = "custom" CONF_FACTOR = "factor" CONF_GROUP = "group" CONF_KEY = "key" CONF_SENSORS = "sensors" CONF_UNIT = "unit" GROUPS = ["user", "installer"] def _check_sensor_schema(conf): """Check sensors and attributes are valid.""" try: import pysma valid = [s.name for s in pysma.Sensors()] except (ImportError, AttributeError): return conf for name in conf[CONF_CUSTOM]: valid.append(name) for sname, attrs in conf[CONF_SENSORS].items(): if sname not in valid: raise vol.Invalid("{} does not exist".format(sname)) for attr in attrs: if attr in valid: continue raise vol.Invalid("{} does not exist [{}]".format(attr, sname)) return conf CUSTOM_SCHEMA = vol.Any( { vol.Required(CONF_KEY): vol.All(cv.string, vol.Length(min=13, max=15)), vol.Required(CONF_UNIT): cv.string, vol.Optional(CONF_FACTOR, default=1): vol.Coerce(float), vol.Optional(CONF_PATH): vol.All(cv.ensure_list, [str]), } ) PLATFORM_SCHEMA = vol.All( PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_SSL, default=False): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_GROUP, default=GROUPS[0]): vol.In(GROUPS), vol.Optional(CONF_SENSORS, default={}): cv.schema_with_slug_keys( cv.ensure_list ), vol.Optional(CONF_CUSTOM, default={}): cv.schema_with_slug_keys( CUSTOM_SCHEMA ), }, extra=vol.PREVENT_EXTRA, ), _check_sensor_schema, ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up SMA WebConnect sensor.""" import pysma # Check config again during load - dependency available config = _check_sensor_schema(config) # Init all default sensors sensor_def = pysma.Sensors() # Sensor from the custom config sensor_def.add( [ pysma.Sensor(o[CONF_KEY], n, o[CONF_UNIT], o[CONF_FACTOR], o.get(CONF_PATH)) for n, o in config[CONF_CUSTOM].items() ] ) # Use all sensors by default config_sensors = config[CONF_SENSORS] if not config_sensors: config_sensors = {s.name: [] for s in sensor_def} # Prepare all HASS sensor entities hass_sensors = [] used_sensors = [] for name, attr in config_sensors.items(): sub_sensors = [sensor_def[s] for s in attr] hass_sensors.append(SMAsensor(sensor_def[name], sub_sensors)) used_sensors.append(name) used_sensors.extend(attr) async_add_entities(hass_sensors) used_sensors = [sensor_def[s] for s in set(used_sensors)] # Init the SMA interface session = async_get_clientsession(hass, verify_ssl=config[CONF_VERIFY_SSL]) grp = config[CONF_GROUP] url = "http{}://{}".format("s" if config[CONF_SSL] else "", config[CONF_HOST]) sma = pysma.SMA(session, url, config[CONF_PASSWORD], group=grp) # Ensure we logout on shutdown async def async_close_session(event): """Close the session.""" await sma.close_session() hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_close_session) backoff = 0 backoff_step = 0 async def async_sma(event): """Update all the SMA sensors.""" nonlocal backoff, backoff_step if backoff > 1: backoff -= 1 return values = await sma.read(used_sensors) if not values: try: backoff = [1, 1, 1, 6, 30][backoff_step] backoff_step += 1 except IndexError: backoff = 60 return backoff_step = 0 tasks = [] for sensor in hass_sensors: task = sensor.async_update_values() if task: tasks.append(task) if tasks: await asyncio.wait(tasks) interval = config.get(CONF_SCAN_INTERVAL) or timedelta(seconds=5) async_track_time_interval(hass, async_sma, interval) class SMAsensor(Entity): """Representation of a SMA sensor.""" def __init__(self, pysma_sensor, sub_sensors): """Initialize the sensor.""" self._sensor = pysma_sensor self._sub_sensors = sub_sensors self._attr = {s.name: "" for s in sub_sensors} self._state = self._sensor.value @property def name(self): """Return the name of the sensor.""" return self._sensor.name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._sensor.unit @property def device_state_attributes(self): """Return the state attributes of the sensor.""" return self._attr @property def poll(self): """SMA sensors are updated & don't poll.""" return False def async_update_values(self): """Update this sensor.""" update = False for sens in self._sub_sensors: newval = "{} {}".format(sens.value, sens.unit) if self._attr[sens.name] != newval: update = True self._attr[sens.name] = newval if self._sensor.value != self._state: update = True self._state = self._sensor.value return self.async_update_ha_state() if update else None @property def unique_id(self): """Return a unique identifier for this sensor.""" return "sma-{}-{}".format(self._sensor.key, self._sensor.name)
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/sma/sensor.py
"""Common utilities for VeSync Component.""" import logging from homeassistant.helpers.entity import ToggleEntity from .const import VS_SWITCHES _LOGGER = logging.getLogger(__name__) async def async_process_devices(hass, manager): """Assign devices to proper component.""" devices = {} devices[VS_SWITCHES] = [] await hass.async_add_executor_job(manager.update) if manager.outlets: devices[VS_SWITCHES].extend(manager.outlets) _LOGGER.info("%d VeSync outlets found", len(manager.outlets)) if manager.switches: for switch in manager.switches: if not switch.is_dimmable(): devices[VS_SWITCHES].append(switch) _LOGGER.info("%d VeSync standard switches found", len(manager.switches)) return devices class VeSyncDevice(ToggleEntity): """Base class for VeSync Device Representations.""" def __init__(self, device): """Initialize the VeSync device.""" self.device = device @property def unique_id(self): """Return the ID of this device.""" if isinstance(self.device.sub_device_no, int): return "{}{}".format(self.device.cid, str(self.device.sub_device_no)) return self.device.cid @property def name(self): """Return the name of the device.""" return self.device.device_name @property def is_on(self): """Return True if switch is on.""" return self.device.device_status == "on" @property def available(self) -> bool: """Return True if device is available.""" return self.device.connection_status == "online" def turn_on(self, **kwargs): """Turn the device on.""" self.device.turn_on() def turn_off(self, **kwargs): """Turn the device off.""" self.device.turn_off() def update(self): """Update vesync device.""" self.device.update()
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/vesync/common.py
"""Support for RESTful API sensors.""" import logging import json import voluptuous as vol import requests from requests.auth import HTTPBasicAuth, HTTPDigestAuth from homeassistant.components.sensor import PLATFORM_SCHEMA, DEVICE_CLASSES_SCHEMA from homeassistant.const import ( CONF_AUTHENTICATION, CONF_FORCE_UPDATE, CONF_HEADERS, CONF_NAME, CONF_METHOD, CONF_PASSWORD, CONF_PAYLOAD, CONF_RESOURCE, CONF_UNIT_OF_MEASUREMENT, CONF_USERNAME, CONF_TIMEOUT, CONF_VALUE_TEMPLATE, CONF_VERIFY_SSL, CONF_DEVICE_CLASS, HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION, ) from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.entity import Entity import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_METHOD = "GET" DEFAULT_NAME = "REST Sensor" DEFAULT_VERIFY_SSL = True DEFAULT_FORCE_UPDATE = False DEFAULT_TIMEOUT = 10 CONF_JSON_ATTRS = "json_attributes" METHODS = ["POST", "GET"] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_RESOURCE): cv.url, vol.Optional(CONF_AUTHENTICATION): vol.In( [HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION] ), vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}), vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv, vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(METHODS), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(CONF_PAYLOAD): cv.string, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean, vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the RESTful sensor.""" name = config.get(CONF_NAME) resource = config.get(CONF_RESOURCE) method = config.get(CONF_METHOD) payload = config.get(CONF_PAYLOAD) verify_ssl = config.get(CONF_VERIFY_SSL) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) headers = config.get(CONF_HEADERS) unit = config.get(CONF_UNIT_OF_MEASUREMENT) device_class = config.get(CONF_DEVICE_CLASS) value_template = config.get(CONF_VALUE_TEMPLATE) json_attrs = config.get(CONF_JSON_ATTRS) force_update = config.get(CONF_FORCE_UPDATE) timeout = config.get(CONF_TIMEOUT) if value_template is not None: value_template.hass = hass if username and password: if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION: auth = HTTPDigestAuth(username, password) else: auth = HTTPBasicAuth(username, password) else: auth = None rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout) rest.update() if rest.data is None: raise PlatformNotReady # Must update the sensor now (including fetching the rest resource) to # ensure it's updating its state. add_entities( [ RestSensor( hass, rest, name, unit, device_class, value_template, json_attrs, force_update, ) ], True, ) class RestSensor(Entity): """Implementation of a REST sensor.""" def __init__( self, hass, rest, name, unit_of_measurement, device_class, value_template, json_attrs, force_update, ): """Initialize the REST sensor.""" self._hass = hass self.rest = rest self._name = name self._state = None self._unit_of_measurement = unit_of_measurement self._device_class = device_class self._value_template = value_template self._json_attrs = json_attrs self._attributes = None self._force_update = force_update @property def name(self): """Return the name of the sensor.""" return self._name @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit_of_measurement @property def device_class(self): """Return the class of this sensor.""" return self._device_class @property def available(self): """Return if the sensor data are available.""" return self.rest.data is not None @property def state(self): """Return the state of the device.""" return self._state @property def force_update(self): """Force update.""" return self._force_update def update(self): """Get the latest data from REST API and update the state.""" self.rest.update() value = self.rest.data if self._json_attrs: self._attributes = {} if value: try: json_dict = json.loads(value) if isinstance(json_dict, dict): attrs = { k: json_dict[k] for k in self._json_attrs if k in json_dict } self._attributes = attrs else: _LOGGER.warning("JSON result was not a dictionary") except ValueError: _LOGGER.warning("REST result could not be parsed as JSON") _LOGGER.debug("Erroneous JSON: %s", value) else: _LOGGER.warning("Empty reply found when expecting JSON data") if value is not None and self._value_template is not None: value = self._value_template.render_with_possible_json_value(value, None) self._state = value @property def device_state_attributes(self): """Return the state attributes.""" return self._attributes class RestData: """Class for handling the data retrieval.""" def __init__( self, method, resource, auth, headers, data, verify_ssl, timeout=DEFAULT_TIMEOUT ): """Initialize the data object.""" self._request = requests.Request( method, resource, headers=headers, auth=auth, data=data ).prepare() self._verify_ssl = verify_ssl self._timeout = timeout self.data = None def update(self): """Get the latest data from REST service with provided method.""" _LOGGER.debug("Updating from %s", self._request.url) try: with requests.Session() as sess: response = sess.send( self._request, timeout=self._timeout, verify=self._verify_ssl ) self.data = response.text except requests.exceptions.RequestException as ex: _LOGGER.error( "Error fetching data: %s from %s failed with %s", self._request, self._request.url, ex, ) self.data = None
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/rest/sensor.py
"""Http views to control the config manager.""" from homeassistant import config_entries, data_entry_flow from homeassistant.auth.permissions.const import CAT_CONFIG_ENTRIES from homeassistant.components.http import HomeAssistantView from homeassistant.exceptions import Unauthorized from homeassistant.helpers.data_entry_flow import ( FlowManagerIndexView, FlowManagerResourceView, ) from homeassistant.loader import async_get_config_flows async def async_setup(hass): """Enable the Home Assistant views.""" hass.http.register_view(ConfigManagerEntryIndexView) hass.http.register_view(ConfigManagerEntryResourceView) hass.http.register_view(ConfigManagerFlowIndexView(hass.config_entries.flow)) hass.http.register_view(ConfigManagerFlowResourceView(hass.config_entries.flow)) hass.http.register_view(ConfigManagerAvailableFlowView) hass.http.register_view( OptionManagerFlowIndexView(hass.config_entries.options.flow) ) hass.http.register_view( OptionManagerFlowResourceView(hass.config_entries.options.flow) ) return True def _prepare_json(result): """Convert result for JSON.""" if result["type"] != data_entry_flow.RESULT_TYPE_FORM: return result import voluptuous_serialize data = result.copy() schema = data["data_schema"] if schema is None: data["data_schema"] = [] else: data["data_schema"] = voluptuous_serialize.convert(schema) return data class ConfigManagerEntryIndexView(HomeAssistantView): """View to get available config entries.""" url = "/api/config/config_entries/entry" name = "api:config:config_entries:entry" async def get(self, request): """List available config entries.""" hass = request.app["hass"] return self.json( [ { "entry_id": entry.entry_id, "domain": entry.domain, "title": entry.title, "source": entry.source, "state": entry.state, "connection_class": entry.connection_class, "supports_options": hasattr( config_entries.HANDLERS.get(entry.domain), "async_get_options_flow", ), } for entry in hass.config_entries.async_entries() ] ) class ConfigManagerEntryResourceView(HomeAssistantView): """View to interact with a config entry.""" url = "/api/config/config_entries/entry/{entry_id}" name = "api:config:config_entries:entry:resource" async def delete(self, request, entry_id): """Delete a config entry.""" if not request["hass_user"].is_admin: raise Unauthorized(config_entry_id=entry_id, permission="remove") hass = request.app["hass"] try: result = await hass.config_entries.async_remove(entry_id) except config_entries.UnknownEntry: return self.json_message("Invalid entry specified", 404) return self.json(result) class ConfigManagerFlowIndexView(FlowManagerIndexView): """View to create config flows.""" url = "/api/config/config_entries/flow" name = "api:config:config_entries:flow" async def get(self, request): """List flows that are in progress but not started by a user. Example of a non-user initiated flow is a discovered Hue hub that requires user interaction to finish setup. """ if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="add") hass = request.app["hass"] return self.json( [ flw for flw in hass.config_entries.flow.async_progress() if flw["context"]["source"] != config_entries.SOURCE_USER ] ) # pylint: disable=arguments-differ async def post(self, request): """Handle a POST request.""" if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="add") # pylint: disable=no-value-for-parameter return await super().post(request) def _prepare_result_json(self, result): """Convert result to JSON.""" if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY: return super()._prepare_result_json(result) data = result.copy() data["result"] = data["result"].entry_id data.pop("data") return data class ConfigManagerFlowResourceView(FlowManagerResourceView): """View to interact with the flow manager.""" url = "/api/config/config_entries/flow/{flow_id}" name = "api:config:config_entries:flow:resource" async def get(self, request, flow_id): """Get the current state of a data_entry_flow.""" if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="add") return await super().get(request, flow_id) # pylint: disable=arguments-differ async def post(self, request, flow_id): """Handle a POST request.""" if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="add") # pylint: disable=no-value-for-parameter return await super().post(request, flow_id) def _prepare_result_json(self, result): """Convert result to JSON.""" if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY: return super()._prepare_result_json(result) data = result.copy() data["result"] = data["result"].entry_id data.pop("data") return data class ConfigManagerAvailableFlowView(HomeAssistantView): """View to query available flows.""" url = "/api/config/config_entries/flow_handlers" name = "api:config:config_entries:flow_handlers" async def get(self, request): """List available flow handlers.""" hass = request.app["hass"] return self.json(await async_get_config_flows(hass)) class OptionManagerFlowIndexView(FlowManagerIndexView): """View to create option flows.""" url = "/api/config/config_entries/entry/option/flow" name = "api:config:config_entries:entry:resource:option:flow" # pylint: disable=arguments-differ async def post(self, request): """Handle a POST request. handler in request is entry_id. """ if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="edit") # pylint: disable=no-value-for-parameter return await super().post(request) class OptionManagerFlowResourceView(FlowManagerResourceView): """View to interact with the option flow manager.""" url = "/api/config/config_entries/options/flow/{flow_id}" name = "api:config:config_entries:options:flow:resource" async def get(self, request, flow_id): """Get the current state of a data_entry_flow.""" if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="edit") return await super().get(request, flow_id) # pylint: disable=arguments-differ async def post(self, request, flow_id): """Handle a POST request.""" if not request["hass_user"].is_admin: raise Unauthorized(perm_category=CAT_CONFIG_ENTRIES, permission="edit") # pylint: disable=no-value-for-parameter return await super().post(request, flow_id)
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/config/config_entries.py
"""Support for interacting with Smappee Comport Plugs.""" import logging from homeassistant.components.switch import SwitchDevice from . import DATA_SMAPPEE _LOGGER = logging.getLogger(__name__) ICON = "mdi:power-plug" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Smappee Comfort Plugs.""" smappee = hass.data[DATA_SMAPPEE] dev = [] if smappee.is_remote_active: for location_id in smappee.locations.keys(): for items in smappee.info[location_id].get("actuators"): if items.get("name") != "": _LOGGER.debug("Remote actuator %s", items) dev.append( SmappeeSwitch( smappee, items.get("name"), location_id, items.get("id") ) ) elif smappee.is_local_active: for items in smappee.local_devices: _LOGGER.debug("Local actuator %s", items) dev.append( SmappeeSwitch(smappee, items.get("value"), None, items.get("key")) ) add_entities(dev) class SmappeeSwitch(SwitchDevice): """Representation of a Smappee Comport Plug.""" def __init__(self, smappee, name, location_id, switch_id): """Initialize a new Smappee Comfort Plug.""" self._name = name self._state = False self._smappee = smappee self._location_id = location_id self._switch_id = switch_id self._remoteswitch = True if location_id is None: self._remoteswitch = False @property def name(self): """Return the name of the switch.""" return self._name @property def is_on(self): """Return true if switch is on.""" return self._state @property def icon(self): """Icon to use in the frontend.""" return ICON def turn_on(self, **kwargs): """Turn on Comport Plug.""" if self._smappee.actuator_on( self._location_id, self._switch_id, self._remoteswitch ): self._state = True def turn_off(self, **kwargs): """Turn off Comport Plug.""" if self._smappee.actuator_off( self._location_id, self._switch_id, self._remoteswitch ): self._state = False @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {} if self._remoteswitch: attr["Location Id"] = self._location_id attr["Location Name"] = self._smappee.locations[self._location_id] attr["Switch Id"] = self._switch_id return attr
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/smappee/switch.py
"""Offer time listening automation rules.""" import logging import voluptuous as vol from homeassistant.core import callback from homeassistant.const import CONF_AT, CONF_PLATFORM from homeassistant.helpers import config_validation as cv from homeassistant.helpers.event import async_track_time_change _LOGGER = logging.getLogger(__name__) TRIGGER_SCHEMA = vol.Schema( {vol.Required(CONF_PLATFORM): "time", vol.Required(CONF_AT): cv.time} ) async def async_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) return async_track_time_change( hass, time_automation_listener, hour=hours, minute=minutes, second=seconds )
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/automation/time.py
"""Support for Keene Electronics IR-IP devices.""" import functools as ft import logging from homeassistant.components import remote from homeassistant.const import CONF_DEVICE, CONF_NAME from homeassistant.helpers.entity import Entity DOMAIN = "kira" _LOGGER = logging.getLogger(__name__) CONF_REMOTE = "remote" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Kira platform.""" if discovery_info: name = discovery_info.get(CONF_NAME) device = discovery_info.get(CONF_DEVICE) kira = hass.data[DOMAIN][CONF_REMOTE][name] add_entities([KiraRemote(device, kira)]) return True class KiraRemote(Entity): """Remote representation used to send commands to a Kira device.""" def __init__(self, name, kira): """Initialize KiraRemote class.""" _LOGGER.debug("KiraRemote device init started for: %s", name) self._name = name self._kira = kira @property def name(self): """Return the Kira device's name.""" return self._name def update(self): """No-op.""" def send_command(self, command, **kwargs): """Send a command to one device.""" for single_command in command: code_tuple = (single_command, kwargs.get(remote.ATTR_DEVICE)) _LOGGER.info("Sending Command: %s to %s", *code_tuple) self._kira.sendCode(code_tuple) def async_send_command(self, command, **kwargs): """Send a command to a device. This method must be run in the event loop and returns a coroutine. """ return self.hass.async_add_job(ft.partial(self.send_command, command, **kwargs))
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/kira/remote.py
"""Support for Tikteck lights.""" import logging import voluptuous as vol from homeassistant.const import CONF_DEVICES, CONF_NAME, CONF_PASSWORD from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, Light, PLATFORM_SCHEMA, ) import homeassistant.helpers.config_validation as cv import homeassistant.util.color as color_util _LOGGER = logging.getLogger(__name__) SUPPORT_TIKTECK_LED = SUPPORT_BRIGHTNESS | SUPPORT_COLOR DEVICE_SCHEMA = vol.Schema( {vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_PASSWORD): cv.string} ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Tikteck platform.""" lights = [] for address, device_config in config[CONF_DEVICES].items(): device = {} device["name"] = device_config[CONF_NAME] device["password"] = device_config[CONF_PASSWORD] device["address"] = address light = TikteckLight(device) if light.is_valid: lights.append(light) add_entities(lights) class TikteckLight(Light): """Representation of a Tikteck light.""" def __init__(self, device): """Initialize the light.""" import tikteck self._name = device["name"] self._address = device["address"] self._password = device["password"] self._brightness = 255 self._hs = [0, 0] self._state = False self.is_valid = True self._bulb = tikteck.tikteck(self._address, "Smart Light", self._password) if self._bulb.connect() is False: self.is_valid = False _LOGGER.error("Failed to connect to bulb %s, %s", self._address, self._name) @property def unique_id(self): """Return the ID of this light.""" return self._address @property def name(self): """Return the name of the device if any.""" return self._name @property def is_on(self): """Return true if device is on.""" return self._state @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness @property def hs_color(self): """Return the color property.""" return self._hs @property def supported_features(self): """Flag supported features.""" return SUPPORT_TIKTECK_LED @property def should_poll(self): """Return the polling state.""" return False @property def assumed_state(self): """Return the assumed state.""" return True def set_state(self, red, green, blue, brightness): """Set the bulb state.""" return self._bulb.set_state(red, green, blue, brightness) def turn_on(self, **kwargs): """Turn the specified light on.""" self._state = True hs_color = kwargs.get(ATTR_HS_COLOR) brightness = kwargs.get(ATTR_BRIGHTNESS) if hs_color is not None: self._hs = hs_color if brightness is not None: self._brightness = brightness rgb = color_util.color_hs_to_RGB(*self._hs) self.set_state(rgb[0], rgb[1], rgb[2], self.brightness) self.schedule_update_ha_state() def turn_off(self, **kwargs): """Turn the specified light off.""" self._state = False self.set_state(0, 0, 0, 0) self.schedule_update_ha_state()
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/tikteck/light.py
"""Support for HomematicIP Cloud alarm control panel.""" import logging from homematicip.aio.group import AsyncSecurityZoneGroup from homematicip.aio.home import AsyncHome from homematicip.base.enums import WindowState from homeassistant.components.alarm_control_panel import AlarmControlPanel from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED, ) from homeassistant.core import HomeAssistant from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID _LOGGER = logging.getLogger(__name__) CONST_ALARM_CONTROL_PANEL_NAME = "HmIP Alarm Control Panel" async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the HomematicIP Cloud alarm control devices.""" pass async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ) -> None: """Set up the HomematicIP alrm control panel from a config entry.""" home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home devices = [] security_zones = [] for group in home.groups: if isinstance(group, AsyncSecurityZoneGroup): security_zones.append(group) if security_zones: devices.append(HomematicipAlarmControlPanel(home, security_zones)) if devices: async_add_entities(devices) class HomematicipAlarmControlPanel(AlarmControlPanel): """Representation of an alarm control panel.""" def __init__(self, home: AsyncHome, security_zones) -> None: """Initialize the alarm control panel.""" self._home = home self.alarm_state = STATE_ALARM_DISARMED for security_zone in security_zones: if security_zone.label == "INTERNAL": self._internal_alarm_zone = security_zone else: self._external_alarm_zone = security_zone @property def state(self) -> str: """Return the state of the device.""" activation_state = self._home.get_security_zones_activation() # check arm_away if activation_state == (True, True): if self._internal_alarm_zone_state or self._external_alarm_zone_state: return STATE_ALARM_TRIGGERED return STATE_ALARM_ARMED_AWAY # check arm_home if activation_state == (False, True): if self._external_alarm_zone_state: return STATE_ALARM_TRIGGERED return STATE_ALARM_ARMED_HOME return STATE_ALARM_DISARMED @property def _internal_alarm_zone_state(self) -> bool: return _get_zone_alarm_state(self._internal_alarm_zone) @property def _external_alarm_zone_state(self) -> bool: """Return the state of the device.""" return _get_zone_alarm_state(self._external_alarm_zone) async def async_alarm_disarm(self, code=None): """Send disarm command.""" await self._home.set_security_zones_activation(False, False) async def async_alarm_arm_home(self, code=None): """Send arm home command.""" await self._home.set_security_zones_activation(False, True) async def async_alarm_arm_away(self, code=None): """Send arm away command.""" await self._home.set_security_zones_activation(True, True) async def async_added_to_hass(self): """Register callbacks.""" self._internal_alarm_zone.on_update(self._async_device_changed) self._external_alarm_zone.on_update(self._async_device_changed) def _async_device_changed(self, *args, **kwargs): """Handle device state changes.""" _LOGGER.debug("Event %s (%s)", self.name, CONST_ALARM_CONTROL_PANEL_NAME) self.async_schedule_update_ha_state() @property def name(self) -> str: """Return the name of the generic device.""" name = CONST_ALARM_CONTROL_PANEL_NAME if self._home.name: name = "{} {}".format(self._home.name, name) return name @property def should_poll(self) -> bool: """No polling needed.""" return False @property def available(self) -> bool: """Device available.""" return ( not self._internal_alarm_zone.unreach or not self._external_alarm_zone.unreach ) @property def unique_id(self) -> str: """Return a unique ID.""" return "{}_{}".format(self.__class__.__name__, self._home.id) def _get_zone_alarm_state(security_zone) -> bool: if security_zone.active: if ( security_zone.sabotage or security_zone.motionDetected or security_zone.presenceDetected or security_zone.windowState == WindowState.OPEN or security_zone.windowState == WindowState.TILTED ): return True return False
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/homematicip_cloud/alarm_control_panel.py
"""The totalconnect component.""" import logging import voluptuous as vol from total_connect_client import TotalConnectClient import homeassistant.helpers.config_validation as cv from homeassistant.helpers import discovery from homeassistant.const import CONF_PASSWORD, CONF_USERNAME _LOGGER = logging.getLogger(__name__) DOMAIN = "totalconnect" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) TOTALCONNECT_PLATFORMS = ["alarm_control_panel"] def setup(hass, config): """Set up TotalConnect component.""" conf = config[DOMAIN] username = conf[CONF_USERNAME] password = conf[CONF_PASSWORD] client = TotalConnectClient.TotalConnectClient(username, password) if client.token is False: _LOGGER.error("TotalConnect authentication failed") return False hass.data[DOMAIN] = TotalConnectSystem(username, password, client) for platform in TOTALCONNECT_PLATFORMS: discovery.load_platform(hass, platform, DOMAIN, {}, config) return True class TotalConnectSystem: """TotalConnect System class.""" def __init__(self, username, password, client): """Initialize the TotalConnect system.""" self._username = username self._password = password self.client = client
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/totalconnect/__init__.py
"""Twilio SMS platform for notify component.""" import logging import voluptuous as vol from homeassistant.components.twilio import DATA_TWILIO import homeassistant.helpers.config_validation as cv from homeassistant.components.notify import ( ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService, ATTR_DATA, ) _LOGGER = logging.getLogger(__name__) CONF_FROM_NUMBER = "from_number" ATTR_MEDIAURL = "media_url" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_FROM_NUMBER): vol.All( cv.string, vol.Match( r"^\+?[1-9]\d{1,14}$|" r"^(?=.{1,11}$)[a-zA-Z0-9\s]*" r"[a-zA-Z][a-zA-Z0-9\s]*$" ), ) } ) def get_service(hass, config, discovery_info=None): """Get the Twilio SMS notification service.""" return TwilioSMSNotificationService( hass.data[DATA_TWILIO], config[CONF_FROM_NUMBER] ) class TwilioSMSNotificationService(BaseNotificationService): """Implement the notification service for the Twilio SMS service.""" def __init__(self, twilio_client, from_number): """Initialize the service.""" self.client = twilio_client self.from_number = from_number def send_message(self, message="", **kwargs): """Send SMS to specified target user cell.""" targets = kwargs.get(ATTR_TARGET) data = kwargs.get(ATTR_DATA) or {} twilio_args = {"body": message, "from_": self.from_number} if ATTR_MEDIAURL in data: twilio_args[ATTR_MEDIAURL] = data[ATTR_MEDIAURL] if not targets: _LOGGER.info("At least 1 target is required") return for target in targets: self.client.messages.create(to=target, **twilio_args)
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/twilio_sms/notify.py
"""Support for the IBM Watson IoT Platform.""" import logging import queue import threading import time import voluptuous as vol from homeassistant.const import ( CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_ID, CONF_INCLUDE, CONF_TOKEN, CONF_TYPE, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED, STATE_UNAVAILABLE, STATE_UNKNOWN, ) from homeassistant.helpers import state as state_helper import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_ORG = "organization" DOMAIN = "watson_iot" MAX_TRIES = 3 RETRY_DELAY = 20 CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( vol.Schema( { vol.Required(CONF_ORG): cv.string, vol.Required(CONF_TYPE): cv.string, vol.Required(CONF_ID): cv.string, vol.Required(CONF_TOKEN): cv.string, vol.Optional(CONF_EXCLUDE, default={}): vol.Schema( { vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids, vol.Optional(CONF_DOMAINS, default=[]): vol.All( cv.ensure_list, [cv.string] ), } ), vol.Optional(CONF_INCLUDE, default={}): vol.Schema( { vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids, vol.Optional(CONF_DOMAINS, default=[]): vol.All( cv.ensure_list, [cv.string] ), } ), } ) ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Set up the Watson IoT Platform component.""" from ibmiotf import gateway conf = config[DOMAIN] include = conf[CONF_INCLUDE] exclude = conf[CONF_EXCLUDE] whitelist_e = set(include[CONF_ENTITIES]) whitelist_d = set(include[CONF_DOMAINS]) blacklist_e = set(exclude[CONF_ENTITIES]) blacklist_d = set(exclude[CONF_DOMAINS]) client_args = { "org": conf[CONF_ORG], "type": conf[CONF_TYPE], "id": conf[CONF_ID], "auth-method": "token", "auth-token": conf[CONF_TOKEN], } watson_gateway = gateway.Client(client_args) def event_to_json(event): """Add an event to the outgoing list.""" state = event.data.get("new_state") if ( state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE) or state.entity_id in blacklist_e or state.domain in blacklist_d ): return if (whitelist_e and state.entity_id not in whitelist_e) or ( whitelist_d and state.domain not in whitelist_d ): return try: _state_as_value = float(state.state) except ValueError: _state_as_value = None if _state_as_value is None: try: _state_as_value = float(state_helper.state_as_number(state)) except ValueError: _state_as_value = None out_event = { "tags": {"domain": state.domain, "entity_id": state.object_id}, "time": event.time_fired.isoformat(), "fields": {"state": state.state}, } if _state_as_value is not None: out_event["fields"]["state_value"] = _state_as_value for key, value in state.attributes.items(): if key != "unit_of_measurement": # If the key is already in fields if key in out_event["fields"]: key = "{}_".format(key) # For each value we try to cast it as float # But if we can not do it we store the value # as string try: out_event["fields"][key] = float(value) except (ValueError, TypeError): out_event["fields"][key] = str(value) return out_event instance = hass.data[DOMAIN] = WatsonIOTThread(hass, watson_gateway, event_to_json) instance.start() def shutdown(event): """Shut down the thread.""" instance.queue.put(None) instance.join() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown) return True class WatsonIOTThread(threading.Thread): """A threaded event handler class.""" def __init__(self, hass, gateway, event_to_json): """Initialize the listener.""" threading.Thread.__init__(self, name="WatsonIOT") self.queue = queue.Queue() self.gateway = gateway self.gateway.connect() self.event_to_json = event_to_json self.write_errors = 0 self.shutdown = False hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener) def _event_listener(self, event): """Listen for new messages on the bus and queue them for Watson IoT.""" item = (time.monotonic(), event) self.queue.put(item) def get_events_json(self): """Return an event formatted for writing.""" events = [] try: item = self.queue.get() if item is None: self.shutdown = True else: event_json = self.event_to_json(item[1]) if event_json: events.append(event_json) except queue.Empty: pass return events def write_to_watson(self, events): """Write preprocessed events to watson.""" import ibmiotf for event in events: for retry in range(MAX_TRIES + 1): try: for field in event["fields"]: value = event["fields"][field] device_success = self.gateway.publishDeviceEvent( event["tags"]["domain"], event["tags"]["entity_id"], field, "json", value, ) if not device_success: _LOGGER.error("Failed to publish message to Watson IoT") continue break except (ibmiotf.MissingMessageEncoderException, IOError): if retry < MAX_TRIES: time.sleep(RETRY_DELAY) else: _LOGGER.exception("Failed to publish message to Watson IoT") def run(self): """Process incoming events.""" while not self.shutdown: event = self.get_events_json() if event: self.write_to_watson(event) self.queue.task_done() def block_till_done(self): """Block till all events processed.""" self.queue.join()
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/watson_iot/__init__.py
"""Allow users to set and activate scenes.""" from collections import namedtuple import logging import voluptuous as vol from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_STATE, CONF_ENTITIES, CONF_NAME, CONF_PLATFORM, STATE_OFF, STATE_ON, SERVICE_RELOAD, ) from homeassistant.core import State, DOMAIN from homeassistant import config as conf_util from homeassistant.exceptions import HomeAssistantError from homeassistant.loader import async_get_integration from homeassistant.helpers import ( config_per_platform, config_validation as cv, entity_platform, ) from homeassistant.helpers.state import HASS_DOMAIN, async_reproduce_state from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN, STATES, Scene PLATFORM_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): HASS_DOMAIN, vol.Required(STATES): vol.All( cv.ensure_list, [ { vol.Required(CONF_NAME): cv.string, vol.Required(CONF_ENTITIES): { cv.entity_id: vol.Any(str, bool, dict) }, } ], ), }, extra=vol.ALLOW_EXTRA, ) SCENECONFIG = namedtuple("SceneConfig", [CONF_NAME, STATES]) _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up home assistant scene entries.""" _process_scenes_config(hass, async_add_entities, config) # This platform can be loaded multiple times. Only first time register the service. if hass.services.has_service(SCENE_DOMAIN, SERVICE_RELOAD): return # Store platform for later. platform = entity_platform.current_platform.get() async def reload_config(call): """Reload the scene config.""" try: conf = await conf_util.async_hass_config_yaml(hass) except HomeAssistantError as err: _LOGGER.error(err) return integration = await async_get_integration(hass, SCENE_DOMAIN) conf = await conf_util.async_process_component_config(hass, conf, integration) if not conf or not platform: return await platform.async_reset() # Extract only the config for the Home Assistant platform, ignore the rest. for p_type, p_config in config_per_platform(conf, SCENE_DOMAIN): if p_type != DOMAIN: continue _process_scenes_config(hass, async_add_entities, p_config) hass.helpers.service.async_register_admin_service( SCENE_DOMAIN, SERVICE_RELOAD, reload_config ) def _process_scenes_config(hass, async_add_entities, config): """Process multiple scenes and add them.""" scene_config = config[STATES] # Check empty list if not scene_config: return async_add_entities( HomeAssistantScene(hass, _process_scene_config(scene)) for scene in scene_config ) def _process_scene_config(scene_config): """Process passed in config into a format to work with. Async friendly. """ name = scene_config.get(CONF_NAME) states = {} c_entities = dict(scene_config.get(CONF_ENTITIES, {})) for entity_id in c_entities: if isinstance(c_entities[entity_id], dict): entity_attrs = c_entities[entity_id].copy() state = entity_attrs.pop(ATTR_STATE, None) attributes = entity_attrs else: state = c_entities[entity_id] attributes = {} # YAML translates 'on' to a boolean # http://yaml.org/type/bool.html if isinstance(state, bool): state = STATE_ON if state else STATE_OFF else: state = str(state) states[entity_id.lower()] = State(entity_id, state, attributes) return SCENECONFIG(name, states) class HomeAssistantScene(Scene): """A scene is a group of entities and the states we want them to be.""" def __init__(self, hass, scene_config): """Initialize the scene.""" self.hass = hass self.scene_config = scene_config @property def name(self): """Return the name of the scene.""" return self.scene_config.name @property def device_state_attributes(self): """Return the scene state attributes.""" return {ATTR_ENTITY_ID: list(self.scene_config.states.keys())} async def async_activate(self): """Activate scene. Try to get entities into requested state.""" await async_reproduce_state(self.hass, self.scene_config.states.values(), True)
"""Tests for the iOS init file.""" from unittest.mock import patch import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.setup import async_setup_component from homeassistant.components import ios from tests.common import mock_component, mock_coro @pytest.fixture(autouse=True) def mock_load_json(): """Mock load_json.""" with patch("homeassistant.components.ios.load_json", return_value={}): yield @pytest.fixture(autouse=True) def mock_dependencies(hass): """Mock dependencies loaded.""" mock_component(hass, "zeroconf") mock_component(hass, "device_tracker") async def test_creating_entry_sets_up_sensor(hass): """Test setting up iOS loads the sensor component.""" with patch( "homeassistant.components.ios.sensor.async_setup_entry", return_value=mock_coro(True), ) as mock_setup: result = await hass.config_entries.flow.async_init( ios.DOMAIN, context={"source": config_entries.SOURCE_USER} ) # Confirmation form assert result["type"] == data_entry_flow.RESULT_TYPE_FORM result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_configuring_ios_creates_entry(hass): """Test that specifying config will create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {"ios": {"push": {}}}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 async def test_not_configuring_ios_not_creates_entry(hass): """Test that no config will not create an entry.""" with patch( "homeassistant.components.ios.async_setup_entry", return_value=mock_coro(True) ) as mock_setup: await async_setup_component(hass, ios.DOMAIN, {}) await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 0
fbradyirl/home-assistant
tests/components/ios/test_init.py
homeassistant/components/homeassistant/scene.py