input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
"""Support for TP-Link LTE modems.""" import asyncio import logging import aiohttp import attr import tp_connected import voluptuous as vol from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_RECIPIENT, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import callback from homeassistant.helpers import config_validation as cv, discovery from homeassistant.helpers.aiohttp_client import async_create_clientsession _LOGGER = logging.getLogger(__name__) DOMAIN = "tplink_lte" DATA_KEY = "tplink_lte" CONF_NOTIFY = "notify" _NOTIFY_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_RECIPIENT): vol.All(cv.ensure_list, [cv.string]), } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_NOTIFY): vol.All( cv.ensure_list, [_NOTIFY_SCHEMA] ), } ) ], ) }, extra=vol.ALLOW_EXTRA, ) @attr.s class ModemData: """Class for modem state.""" host = attr.ib() modem = attr.ib() connected = attr.ib(init=False, default=True) @attr.s class LTEData: """Shared state.""" websession = attr.ib() modem_data = attr.ib(init=False, factory=dict) def get_modem_data(self, config): """Get the requested or the only modem_data value.""" if CONF_HOST in config: return self.modem_data.get(config[CONF_HOST]) if len(self.modem_data) == 1: return next(iter(self.modem_data.values())) return None async def async_setup(hass, config): """Set up TP-Link LTE component.""" if DATA_KEY not in hass.data: websession = async_create_clientsession( hass, cookie_jar=aiohttp.CookieJar(unsafe=True) ) hass.data[DATA_KEY] = LTEData(websession) domain_config = config.get(DOMAIN, []) tasks = [_setup_lte(hass, conf) for conf in domain_config] if tasks: await asyncio.wait(tasks) for conf in domain_config: for notify_conf in conf.get(CONF_NOTIFY, []): hass.async_create_task( discovery.async_load_platform( hass, "notify", DOMAIN, notify_conf, config ) ) return True async def _setup_lte(hass, lte_config, delay=0): """Set up a TP-Link LTE modem.""" host = lte_config[CONF_HOST] password = lte_config[CONF_PASSWORD] websession = hass.data[DATA_KEY].websession modem = tp_connected.Modem(hostname=host, websession=websession) modem_data = ModemData(host, modem) try: await _login(hass, modem_data, password) except tp_connected.Error: retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password)) @callback def cleanup_retry(event): """Clean up retry task resources.""" if not retry_task.done(): retry_task.cancel() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry) async def _login(hass, modem_data, password): """Log in and complete setup.""" await modem_data.modem.login(password=password) modem_data.connected = True hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data async def cleanup(event): """Clean up resources.""" await modem_data.modem.logout() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup) async def _retry_login(hass, modem_data, password): """Sleep and retry setup.""" _LOGGER.warning("Could not connect to %s. Will keep trying.", modem_data.host) modem_data.connected = False delay = 15 while not modem_data.connected: await asyncio.sleep(delay) try: await _login(hass, modem_data, password) _LOGGER.warning("Connected to %s", modem_data.host) except tp_connected.Error: delay = min(2 * delay, 300)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/tplink_lte/__init__.py
"""Support for sending data to an Influx database.""" import logging import math import queue import re import threading import time from influxdb import InfluxDBClient, exceptions import requests.exceptions import voluptuous as vol from homeassistant.const import ( CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_HOST, CONF_INCLUDE, CONF_PASSWORD, CONF_PORT, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED, STATE_UNAVAILABLE, STATE_UNKNOWN, ) from homeassistant.helpers import event as event_helper, state as state_helper import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_values import EntityValues _LOGGER = logging.getLogger(__name__) CONF_DB_NAME = "database" CONF_TAGS = "tags" CONF_DEFAULT_MEASUREMENT = "default_measurement" CONF_OVERRIDE_MEASUREMENT = "override_measurement" CONF_TAGS_ATTRIBUTES = "tags_attributes" CONF_COMPONENT_CONFIG = "component_config" CONF_COMPONENT_CONFIG_GLOB = "component_config_glob" CONF_COMPONENT_CONFIG_DOMAIN = "component_config_domain" CONF_RETRY_COUNT = "max_retries" DEFAULT_DATABASE = "home_assistant" DEFAULT_VERIFY_SSL = True DOMAIN = "influxdb" TIMEOUT = 5 RETRY_DELAY = 20 QUEUE_BACKLOG_SECONDS = 30 RETRY_INTERVAL = 60 # seconds BATCH_TIMEOUT = 1 BATCH_BUFFER_SIZE = 100 COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema( {vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string} ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( vol.Schema( { vol.Optional(CONF_HOST): cv.string, vol.Inclusive(CONF_USERNAME, "authentication"): cv.string, vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string, vol.Optional(CONF_EXCLUDE, default={}): vol.Schema( { vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids, vol.Optional(CONF_DOMAINS, default=[]): vol.All( cv.ensure_list, [cv.string] ), } ), vol.Optional(CONF_INCLUDE, default={}): vol.Schema( { vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids, vol.Optional(CONF_DOMAINS, default=[]): vol.All( cv.ensure_list, [cv.string] ), } ), vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string, vol.Optional(CONF_PORT): cv.port, vol.Optional(CONF_SSL): cv.boolean, vol.Optional(CONF_RETRY_COUNT, default=0): cv.positive_int, vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string, vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string, vol.Optional(CONF_TAGS, default={}): vol.Schema( {cv.string: cv.string} ), vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]): vol.All( cv.ensure_list, [cv.string] ), vol.Optional( CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL ): cv.boolean, vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema( {cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY} ), vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema( {cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY} ), vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema( {cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY} ), } ) ) }, extra=vol.ALLOW_EXTRA, ) RE_DIGIT_TAIL = re.compile(r"^[^\.]*\d+\.?\d+[^\.]*$") RE_DECIMAL = re.compile(r"[^\d.]+") def setup(hass, config): """Set up the InfluxDB component.""" conf = config[DOMAIN] kwargs = { "database": conf[CONF_DB_NAME], "verify_ssl": conf[CONF_VERIFY_SSL], "timeout": TIMEOUT, } if CONF_HOST in conf: kwargs["host"] = conf[CONF_HOST] if CONF_PORT in conf: kwargs["port"] = conf[CONF_PORT] if CONF_USERNAME in conf: kwargs["username"] = conf[CONF_USERNAME] if CONF_PASSWORD in conf: kwargs["password"] = conf[CONF_PASSWORD] if CONF_SSL in conf: kwargs["ssl"] = conf[CONF_SSL] include = conf.get(CONF_INCLUDE, {}) exclude = conf.get(CONF_EXCLUDE, {}) whitelist_e = set(include.get(CONF_ENTITIES, [])) whitelist_d = set(include.get(CONF_DOMAINS, [])) blacklist_e = set(exclude.get(CONF_ENTITIES, [])) blacklist_d = set(exclude.get(CONF_DOMAINS, [])) tags = conf.get(CONF_TAGS) tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES) default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT) override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT) component_config = EntityValues( conf[CONF_COMPONENT_CONFIG], conf[CONF_COMPONENT_CONFIG_DOMAIN], conf[CONF_COMPONENT_CONFIG_GLOB], ) max_tries = conf.get(CONF_RETRY_COUNT) try: influx = InfluxDBClient(**kwargs) influx.write_points([]) except (exceptions.InfluxDBClientError, requests.exceptions.ConnectionError) as exc: _LOGGER.warning( "Database host is not accessible due to '%s', please " "check your entries in the configuration file (host, " "port, etc.) and verify that the database exists and is " "READ/WRITE. Retrying again in %s seconds.", exc, RETRY_INTERVAL, ) event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config)) return True def event_to_json(event): """Add an event to the outgoing Influx list.""" state = event.data.get("new_state") if ( state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE) or state.entity_id in blacklist_e or state.domain in blacklist_d ): return try: if ( (whitelist_e or whitelist_d) and state.entity_id not in whitelist_e and state.domain not in whitelist_d ): return _include_state = _include_value = False _state_as_value = float(state.state) _include_value = True except ValueError: try: _state_as_value = float(state_helper.state_as_number(state)) _include_state = _include_value = True except ValueError: _include_state = True include_uom = True measurement = component_config.get(state.entity_id).get( CONF_OVERRIDE_MEASUREMENT ) if measurement in (None, ""): if override_measurement: measurement = override_measurement else: measurement = state.attributes.get("unit_of_measurement") if measurement in (None, ""): if default_measurement: measurement = default_measurement else: measurement = state.entity_id else: include_uom = False json = { "measurement": measurement, "tags": {"domain": state.domain, "entity_id": state.object_id}, "time": event.time_fired, "fields": {}, } if _include_state: json["fields"]["state"] = state.state if _include_value: json["fields"]["value"] = _state_as_value for key, value in state.attributes.items(): if key in tags_attributes: json["tags"][key] = value elif key != "unit_of_measurement" or include_uom: # If the key is already in fields if key in json["fields"]: key = key + "_" # Prevent column data errors in influxDB. # For each value we try to cast it as float # But if we can not do it we store the value # as string add "_str" postfix to the field key try: json["fields"][key] = float(value) except (ValueError, TypeError): new_key = f"{key}_str" new_value = str(value) json["fields"][new_key] = new_value if RE_DIGIT_TAIL.match(new_value): json["fields"][key] = float(RE_DECIMAL.sub("", new_value)) # Infinity and NaN are not valid floats in InfluxDB try: if not math.isfinite(json["fields"][key]): del json["fields"][key] except (KeyError, TypeError): pass json["tags"].update(tags) return json instance = hass.data[DOMAIN] = InfluxThread(hass, influx, event_to_json, max_tries) instance.start() def shutdown(event): """Shut down the thread.""" instance.queue.put(None) instance.join() influx.close() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown) return True class InfluxThread(threading.Thread): """A threaded event handler class.""" def __init__(self, hass, influx, event_to_json, max_tries): """Initialize the listener.""" threading.Thread.__init__(self, name="InfluxDB") self.queue = queue.Queue() self.influx = influx self.event_to_json = event_to_json self.max_tries = max_tries self.write_errors = 0 self.shutdown = False hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener) def _event_listener(self, event): """Listen for new messages on the bus and queue them for Influx.""" item = (time.monotonic(), event) self.queue.put(item) @staticmethod def batch_timeout(): """Return number of seconds to wait for more events.""" return BATCH_TIMEOUT def get_events_json(self): """Return a batch of events formatted for writing.""" queue_seconds = QUEUE_BACKLOG_SECONDS + self.max_tries * RETRY_DELAY count = 0 json = [] dropped = 0 try: while len(json) < BATCH_BUFFER_SIZE and not self.shutdown: timeout = None if count == 0 else self.batch_timeout() item = self.queue.get(timeout=timeout) count += 1 if item is None: self.shutdown = True else: timestamp, event = item age = time.monotonic() - timestamp if age < queue_seconds: event_json = self.event_to_json(event) if event_json: json.append(event_json) else: dropped += 1 except queue.Empty: pass if dropped: _LOGGER.warning("Catching up, dropped %d old events", dropped) return count, json def write_to_influxdb(self, json): """Write preprocessed events to influxdb, with retry.""" for retry in range(self.max_tries + 1): try: self.influx.write_points(json) if self.write_errors: _LOGGER.error("Resumed, lost %d events", self.write_errors) self.write_errors = 0 _LOGGER.debug("Wrote %d events", len(json)) break except ( exceptions.InfluxDBClientError, exceptions.InfluxDBServerError, IOError, ) as err: if retry < self.max_tries: time.sleep(RETRY_DELAY) else: if not self.write_errors: _LOGGER.error("Write error: %s", err) self.write_errors += len(json) def run(self): """Process incoming events.""" while not self.shutdown: count, json = self.get_events_json() if json: self.write_to_influxdb(json) for _ in range(count): self.queue.task_done() def block_till_done(self): """Block till all events processed.""" self.queue.join()
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/influxdb/__init__.py
"""Integrates Native Apps to Home Assistant.""" from homeassistant.components.webhook import async_register as webhook_register from homeassistant.const import CONF_WEBHOOK_ID from homeassistant.helpers import device_registry as dr, discovery from homeassistant.helpers.typing import ConfigType, HomeAssistantType from .const import ( ATTR_DEVICE_ID, ATTR_DEVICE_NAME, ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION, DATA_BINARY_SENSOR, DATA_CONFIG_ENTRIES, DATA_DELETED_IDS, DATA_DEVICES, DATA_SENSOR, DATA_STORE, DOMAIN, STORAGE_KEY, STORAGE_VERSION, ) from .http_api import RegistrationsView from .webhook import handle_webhook from .websocket_api import register_websocket_handlers PLATFORMS = "sensor", "binary_sensor", "device_tracker" async def async_setup(hass: HomeAssistantType, config: ConfigType): """Set up the mobile app component.""" store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY) app_config = await store.async_load() if app_config is None: app_config = { DATA_BINARY_SENSOR: {}, DATA_CONFIG_ENTRIES: {}, DATA_DELETED_IDS: [], DATA_SENSOR: {}, } hass.data[DOMAIN] = { DATA_BINARY_SENSOR: app_config.get(DATA_BINARY_SENSOR, {}), DATA_CONFIG_ENTRIES: {}, DATA_DELETED_IDS: app_config.get(DATA_DELETED_IDS, []), DATA_DEVICES: {}, DATA_SENSOR: app_config.get(DATA_SENSOR, {}), DATA_STORE: store, } hass.http.register_view(RegistrationsView()) register_websocket_handlers(hass) for deleted_id in hass.data[DOMAIN][DATA_DELETED_IDS]: try: webhook_register( hass, DOMAIN, "Deleted Webhook", deleted_id, handle_webhook ) except ValueError: pass hass.async_create_task( discovery.async_load_platform(hass, "notify", DOMAIN, {}, config) ) return True async def async_setup_entry(hass, entry): """Set up a mobile_app entry.""" registration = entry.data webhook_id = registration[CONF_WEBHOOK_ID] hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id] = entry device_registry = await dr.async_get_registry(hass) device = device_registry.async_get_or_create( config_entry_id=entry.entry_id, identifiers={(DOMAIN, registration[ATTR_DEVICE_ID])}, manufacturer=registration[ATTR_MANUFACTURER], model=registration[ATTR_MODEL], name=registration[ATTR_DEVICE_NAME], sw_version=registration[ATTR_OS_VERSION], ) hass.data[DOMAIN][DATA_DEVICES][webhook_id] = device registration_name = "Mobile App: {}".format(registration[ATTR_DEVICE_NAME]) webhook_register(hass, DOMAIN, registration_name, webhook_id, handle_webhook) for domain in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, domain) ) return True
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/mobile_app/__init__.py
"""Axis network device abstraction.""" import asyncio import async_timeout import axis from axis.streammanager import SIGNAL_PLAYING from homeassistant.const import ( CONF_DEVICE, CONF_HOST, CONF_MAC, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, ) from homeassistant.core import callback from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC from homeassistant.helpers.dispatcher import async_dispatcher_send from .const import CONF_CAMERA, CONF_EVENTS, CONF_MODEL, DOMAIN, LOGGER from .errors import AuthenticationRequired, CannotConnect class AxisNetworkDevice: """Manages a Axis device.""" def __init__(self, hass, config_entry): """Initialize the device.""" self.hass = hass self.config_entry = config_entry self.available = True self.api = None self.fw_version = None self.product_type = None self.listeners = [] @property def host(self): """Return the host of this device.""" return self.config_entry.data[CONF_DEVICE][CONF_HOST] @property def model(self): """Return the model of this device.""" return self.config_entry.data[CONF_MODEL] @property def name(self): """Return the name of this device.""" return self.config_entry.data[CONF_NAME] @property def serial(self): """Return the mac of this device.""" return self.config_entry.data[CONF_MAC] async def async_update_device_registry(self): """Update device registry.""" device_registry = await self.hass.helpers.device_registry.async_get_registry() device_registry.async_get_or_create( config_entry_id=self.config_entry.entry_id, connections={(CONNECTION_NETWORK_MAC, self.serial)}, identifiers={(DOMAIN, self.serial)}, manufacturer="Axis Communications AB", model=f"{self.model} {self.product_type}", name=self.name, sw_version=self.fw_version, ) async def async_setup(self): """Set up the device.""" try: self.api = await get_device(self.hass, self.config_entry.data[CONF_DEVICE]) except CannotConnect: raise ConfigEntryNotReady except Exception: # pylint: disable=broad-except LOGGER.error("Unknown error connecting with Axis device on %s", self.host) return False self.fw_version = self.api.vapix.params.firmware_version self.product_type = self.api.vapix.params.prodtype if self.config_entry.options[CONF_CAMERA]: self.hass.async_create_task( self.hass.config_entries.async_forward_entry_setup( self.config_entry, "camera" ) ) if self.config_entry.options[CONF_EVENTS]: self.api.stream.connection_status_callback = ( self.async_connection_status_callback ) self.api.enable_events(event_callback=self.async_event_callback) platform_tasks = [ self.hass.config_entries.async_forward_entry_setup( self.config_entry, platform ) for platform in ["binary_sensor", "switch"] ] self.hass.async_create_task(self.start(platform_tasks)) self.config_entry.add_update_listener(self.async_new_address_callback) return True @property def event_new_address(self): """Device specific event to signal new device address.""" return f"axis_new_address_{self.serial}" @staticmethod async def async_new_address_callback(hass, entry): """Handle signals of device getting new address. This is a static method because a class method (bound method), can not be used with weak references. """ device = hass.data[DOMAIN][entry.data[CONF_MAC]] device.api.config.host = device.host async_dispatcher_send(hass, device.event_new_address) @property def event_reachable(self): """Device specific event to signal a change in connection status.""" return f"axis_reachable_{self.serial}" @callback def async_connection_status_callback(self, status): """Handle signals of device connection status. This is called on every RTSP keep-alive message. Only signal state change if state change is true. """ if self.available != (status == SIGNAL_PLAYING): self.available = not self.available async_dispatcher_send(self.hass, self.event_reachable, True) @property def event_new_sensor(self): """Device specific event to signal new sensor available.""" return f"axis_add_sensor_{self.serial}" @callback def async_event_callback(self, action, event_id): """Call to configure events when initialized on event stream.""" if action == "add": async_dispatcher_send(self.hass, self.event_new_sensor, event_id) async def start(self, platform_tasks): """Start the event stream when all platforms are loaded.""" await asyncio.gather(*platform_tasks) self.api.start() @callback def shutdown(self, event): """Stop the event stream.""" self.api.stop() async def async_reset(self): """Reset this device to default state.""" platform_tasks = [] if self.config_entry.options[CONF_CAMERA]: platform_tasks.append( self.hass.config_entries.async_forward_entry_unload( self.config_entry, "camera" ) ) if self.config_entry.options[CONF_EVENTS]: self.api.stop() platform_tasks += [ self.hass.config_entries.async_forward_entry_unload( self.config_entry, platform ) for platform in ["binary_sensor", "switch"] ] await asyncio.gather(*platform_tasks) for unsub_dispatcher in self.listeners: unsub_dispatcher() self.listeners = [] return True async def get_device(hass, config): """Create a Axis device.""" device = axis.AxisDevice( loop=hass.loop, host=config[CONF_HOST], username=config[CONF_USERNAME], password=config[CONF_PASSWORD], port=config[CONF_PORT], web_proto="http", ) device.vapix.initialize_params(preload_data=False) device.vapix.initialize_ports() try: with async_timeout.timeout(15): await asyncio.gather( hass.async_add_executor_job(device.vapix.params.update_brand), hass.async_add_executor_job(device.vapix.params.update_properties), hass.async_add_executor_job(device.vapix.ports.update), ) return device except axis.Unauthorized: LOGGER.warning( "Connected to device at %s but not registered.", config[CONF_HOST] ) raise AuthenticationRequired except (asyncio.TimeoutError, axis.RequestError): LOGGER.error("Error connecting to the Axis device at %s", config[CONF_HOST]) raise CannotConnect except axis.AxisException: LOGGER.exception("Unknown Axis communication error occurred") raise AuthenticationRequired
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/axis/device.py
"""Config flow to configure homekit_controller.""" import json import logging import os import homekit from homekit.controller.ip_implementation import IpPairing import voluptuous as vol from homeassistant import config_entries from homeassistant.core import callback from .connection import get_accessory_name, get_bridge_information from .const import DOMAIN, KNOWN_DEVICES HOMEKIT_IGNORE = ["Home Assistant Bridge"] HOMEKIT_DIR = ".homekit" PAIRING_FILE = "pairing.json" _LOGGER = logging.getLogger(__name__) def load_old_pairings(hass): """Load any old pairings from on-disk json fragments.""" old_pairings = {} data_dir = os.path.join(hass.config.path(), HOMEKIT_DIR) pairing_file = os.path.join(data_dir, PAIRING_FILE) # Find any pairings created with in HA 0.85 / 0.86 if os.path.exists(pairing_file): with open(pairing_file) as pairing_file: old_pairings.update(json.load(pairing_file)) # Find any pairings created in HA <= 0.84 if os.path.exists(data_dir): for device in os.listdir(data_dir): if not device.startswith("hk-"): continue alias = device[3:] if alias in old_pairings: continue with open(os.path.join(data_dir, device)) as pairing_data_fp: old_pairings[alias] = json.load(pairing_data_fp) return old_pairings def normalize_hkid(hkid): """Normalize a hkid so that it is safe to compare with other normalized hkids.""" return hkid.lower() @callback def find_existing_host(hass, serial): """Return a set of the configured hosts.""" for entry in hass.config_entries.async_entries(DOMAIN): if entry.data["AccessoryPairingID"] == serial: return entry @config_entries.HANDLERS.register(DOMAIN) class HomekitControllerFlowHandler(config_entries.ConfigFlow): """Handle a HomeKit config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self): """Initialize the homekit_controller flow.""" self.model = None self.hkid = None self.devices = {} self.controller = homekit.Controller() self.finish_pairing = None async def async_step_user(self, user_input=None): """Handle a flow start.""" errors = {} if user_input is not None: key = user_input["device"] self.hkid = self.devices[key]["id"] self.model = self.devices[key]["md"] await self.async_set_unique_id( normalize_hkid(self.hkid), raise_on_progress=False ) return await self.async_step_pair() all_hosts = await self.hass.async_add_executor_job(self.controller.discover, 5) self.devices = {} for host in all_hosts: status_flags = int(host["sf"]) paired = not status_flags & 0x01 if paired: continue self.devices[host["name"]] = host if not self.devices: return self.async_abort(reason="no_devices") return self.async_show_form( step_id="user", errors=errors, data_schema=vol.Schema( {vol.Required("device"): vol.In(self.devices.keys())} ), ) async def async_step_zeroconf(self, discovery_info): """Handle a discovered HomeKit accessory. This flow is triggered by the discovery component. """ # Normalize properties from discovery # homekit_python has code to do this, but not in a form we can # easily use, so do the bare minimum ourselves here instead. properties = { key.lower(): value for (key, value) in discovery_info["properties"].items() } # The hkid is a unique random number that looks like a pairing code. # It changes if a device is factory reset. hkid = properties["id"] model = properties["md"] name = discovery_info["name"].replace("._hap._tcp.local.", "") status_flags = int(properties["sf"]) paired = not status_flags & 0x01 # The configuration number increases every time the characteristic map # needs updating. Some devices use a slightly off-spec name so handle # both cases. try: config_num = int(properties["c#"]) except KeyError: _LOGGER.warning( "HomeKit device %s: c# not exposed, in violation of spec", hkid ) config_num = None # If the device is already paired and known to us we should monitor c# # (config_num) for changes. If it changes, we check for new entities if paired and hkid in self.hass.data.get(KNOWN_DEVICES, {}): conn = self.hass.data[KNOWN_DEVICES][hkid] if conn.config_num != config_num: _LOGGER.debug( "HomeKit info %s: c# incremented, refreshing entities", hkid ) self.hass.async_create_task(conn.async_refresh_entity_map(config_num)) return self.async_abort(reason="already_configured") _LOGGER.debug("Discovered device %s (%s - %s)", name, model, hkid) await self.async_set_unique_id(normalize_hkid(hkid)) self._abort_if_unique_id_configured() # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 self.context["hkid"] = hkid self.context["title_placeholders"] = {"name": name} if paired: old_pairings = await self.hass.async_add_executor_job( load_old_pairings, self.hass ) if hkid in old_pairings: return await self.async_import_legacy_pairing( properties, old_pairings[hkid] ) # Device is paired but not to us - ignore it _LOGGER.debug("HomeKit device %s ignored as already paired", hkid) return self.async_abort(reason="already_paired") # Devices in HOMEKIT_IGNORE have native local integrations - users # should be encouraged to use native integration and not confused # by alternative HK API. if model in HOMEKIT_IGNORE: return self.async_abort(reason="ignored_model") # Device isn't paired with us or anyone else. # But we have a 'complete' config entry for it - that is probably # invalid. Remove it automatically. existing = find_existing_host(self.hass, hkid) if existing: await self.hass.config_entries.async_remove(existing.entry_id) self.model = model self.hkid = hkid # We want to show the pairing form - but don't call async_step_pair # directly as it has side effects (will ask the device to show a # pairing code) return self._async_step_pair_show_form() async def async_import_legacy_pairing(self, discovery_props, pairing_data): """Migrate a legacy pairing to config entries.""" hkid = discovery_props["id"] existing = find_existing_host(self.hass, hkid) if existing: _LOGGER.info( ( "Legacy configuration for homekit accessory %s" "not loaded as already migrated" ), hkid, ) return self.async_abort(reason="already_configured") _LOGGER.info( ( "Legacy configuration %s for homekit" "accessory migrated to config entries" ), hkid, ) pairing = IpPairing(pairing_data) return await self._entry_from_accessory(pairing) async def async_step_pair(self, pair_info=None): """Pair with a new HomeKit accessory.""" # If async_step_pair is called with no pairing code then we do the M1 # phase of pairing. If this is successful the device enters pairing # mode. # If it doesn't have a screen then the pin is static. # If it has a display it will display a pin on that display. In # this case the code is random. So we have to call the start_pairing # API before the user can enter a pin. But equally we don't want to # call start_pairing when the device is discovered, only when they # click on 'Configure' in the UI. # start_pairing will make the device show its pin and return a # callable. We call the callable with the pin that the user has typed # in. errors = {} if pair_info: code = pair_info["pairing_code"] try: await self.hass.async_add_executor_job(self.finish_pairing, code) pairing = self.controller.pairings.get(self.hkid) if pairing: return await self._entry_from_accessory(pairing) errors["pairing_code"] = "unable_to_pair" except homekit.AuthenticationError: # PairSetup M4 - SRP proof failed # PairSetup M6 - Ed25519 signature verification failed # PairVerify M4 - Decryption failed # PairVerify M4 - Device not recognised # PairVerify M4 - Ed25519 signature verification failed errors["pairing_code"] = "authentication_error" except homekit.UnknownError: # An error occurred on the device whilst performing this # operation. errors["pairing_code"] = "unknown_error" except homekit.MaxPeersError: # The device can't pair with any more accessories. errors["pairing_code"] = "max_peers_error" except homekit.AccessoryNotFoundError: # Can no longer find the device on the network return self.async_abort(reason="accessory_not_found_error") except Exception: # pylint: disable=broad-except _LOGGER.exception("Pairing attempt failed with an unhandled exception") errors["pairing_code"] = "pairing_failed" start_pairing = self.controller.start_pairing try: self.finish_pairing = await self.hass.async_add_executor_job( start_pairing, self.hkid, self.hkid ) except homekit.BusyError: # Already performing a pair setup operation with a different # controller errors["pairing_code"] = "busy_error" except homekit.MaxTriesError: # The accessory has received more than 100 unsuccessful auth # attempts. errors["pairing_code"] = "max_tries_error" except homekit.UnavailableError: # The accessory is already paired - cannot try to pair again. return self.async_abort(reason="already_paired") except homekit.AccessoryNotFoundError: # Can no longer find the device on the network return self.async_abort(reason="accessory_not_found_error") except Exception: # pylint: disable=broad-except _LOGGER.exception("Pairing attempt failed with an unhandled exception") errors["pairing_code"] = "pairing_failed" return self._async_step_pair_show_form(errors) def _async_step_pair_show_form(self, errors=None): return self.async_show_form( step_id="pair", errors=errors or {}, data_schema=vol.Schema( {vol.Required("pairing_code"): vol.All(str, vol.Strip)} ), ) async def _entry_from_accessory(self, pairing): """Return a config entry from an initialized bridge.""" # The bulk of the pairing record is stored on the config entry. # A specific exception is the 'accessories' key. This is more # volatile. We do cache it, but not against the config entry. # So copy the pairing data and mutate the copy. pairing_data = pairing.pairing_data.copy() # Use the accessories data from the pairing operation if it is # available. Otherwise request a fresh copy from the API. # This removes the 'accessories' key from pairing_data at # the same time. accessories = pairing_data.pop("accessories", None) if not accessories: accessories = await self.hass.async_add_executor_job( pairing.list_accessories_and_characteristics ) bridge_info = get_bridge_information(accessories) name = get_accessory_name(bridge_info) return self.async_create_entry(title=name, data=pairing_data)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/homekit_controller/config_flow.py
"""Support for Ness D8X/D16X devices.""" from collections import namedtuple import datetime import logging from nessclient import ArmingState, Client import voluptuous as vol from homeassistant.components.binary_sensor import DEVICE_CLASSES from homeassistant.const import ( ATTR_CODE, ATTR_STATE, CONF_HOST, CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.helpers import config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send _LOGGER = logging.getLogger(__name__) DOMAIN = "ness_alarm" DATA_NESS = "ness_alarm" CONF_DEVICE_PORT = "port" CONF_INFER_ARMING_STATE = "infer_arming_state" CONF_ZONES = "zones" CONF_ZONE_NAME = "name" CONF_ZONE_TYPE = "type" CONF_ZONE_ID = "id" ATTR_OUTPUT_ID = "output_id" DEFAULT_ZONES = [] DEFAULT_SCAN_INTERVAL = datetime.timedelta(minutes=1) DEFAULT_INFER_ARMING_STATE = False SIGNAL_ZONE_CHANGED = "ness_alarm.zone_changed" SIGNAL_ARMING_STATE_CHANGED = "ness_alarm.arming_state_changed" ZoneChangedData = namedtuple("ZoneChangedData", ["zone_id", "state"]) DEFAULT_ZONE_TYPE = "motion" ZONE_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_NAME): cv.string, vol.Required(CONF_ZONE_ID): cv.positive_int, vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): vol.In(DEVICE_CLASSES), } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_DEVICE_PORT): cv.port, vol.Optional( CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL ): vol.All(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_ZONES, default=DEFAULT_ZONES): vol.All( cv.ensure_list, [ZONE_SCHEMA] ), vol.Optional( CONF_INFER_ARMING_STATE, default=DEFAULT_INFER_ARMING_STATE ): cv.boolean, } ) }, extra=vol.ALLOW_EXTRA, ) SERVICE_PANIC = "panic" SERVICE_AUX = "aux" SERVICE_SCHEMA_PANIC = vol.Schema({vol.Required(ATTR_CODE): cv.string}) SERVICE_SCHEMA_AUX = vol.Schema( { vol.Required(ATTR_OUTPUT_ID): cv.positive_int, vol.Optional(ATTR_STATE, default=True): cv.boolean, } ) async def async_setup(hass, config): """Set up the Ness Alarm platform.""" conf = config[DOMAIN] zones = conf[CONF_ZONES] host = conf[CONF_HOST] port = conf[CONF_DEVICE_PORT] scan_interval = conf[CONF_SCAN_INTERVAL] infer_arming_state = conf[CONF_INFER_ARMING_STATE] client = Client( host=host, port=port, loop=hass.loop, update_interval=scan_interval.total_seconds(), infer_arming_state=infer_arming_state, ) hass.data[DATA_NESS] = client async def _close(event): await client.close() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close) hass.async_create_task( async_load_platform(hass, "binary_sensor", DOMAIN, {CONF_ZONES: zones}, config) ) hass.async_create_task( async_load_platform(hass, "alarm_control_panel", DOMAIN, {}, config) ) def on_zone_change(zone_id: int, state: bool): """Receives and propagates zone state updates.""" async_dispatcher_send( hass, SIGNAL_ZONE_CHANGED, ZoneChangedData(zone_id=zone_id, state=state) ) def on_state_change(arming_state: ArmingState): """Receives and propagates arming state updates.""" async_dispatcher_send(hass, SIGNAL_ARMING_STATE_CHANGED, arming_state) client.on_zone_change(on_zone_change) client.on_state_change(on_state_change) # Force update for current arming status and current zone states hass.loop.create_task(client.keepalive()) hass.loop.create_task(client.update()) async def handle_panic(call): await client.panic(call.data[ATTR_CODE]) async def handle_aux(call): await client.aux(call.data[ATTR_OUTPUT_ID], call.data[ATTR_STATE]) hass.services.async_register( DOMAIN, SERVICE_PANIC, handle_panic, schema=SERVICE_SCHEMA_PANIC ) hass.services.async_register( DOMAIN, SERVICE_AUX, handle_aux, schema=SERVICE_SCHEMA_AUX ) return True
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/ness_alarm/__init__.py
"""Support for sensors through the SmartThings cloud API.""" from collections import namedtuple from typing import Optional, Sequence from pysmartthings import Attribute, Capability from homeassistant.const import ( DEVICE_CLASS_BATTERY, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_TIMESTAMP, ENERGY_KILO_WATT_HOUR, MASS_KILOGRAMS, POWER_WATT, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) from . import SmartThingsEntity from .const import DATA_BROKERS, DOMAIN Map = namedtuple("map", "attribute name default_unit device_class") CAPABILITY_TO_SENSORS = { Capability.activity_lighting_mode: [ Map(Attribute.lighting_mode, "Activity Lighting Mode", None, None) ], Capability.air_conditioner_mode: [ Map(Attribute.air_conditioner_mode, "Air Conditioner Mode", None, None) ], Capability.air_quality_sensor: [ Map(Attribute.air_quality, "Air Quality", "CAQI", None) ], Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None)], Capability.audio_volume: [Map(Attribute.volume, "Volume", "%", None)], Capability.battery: [Map(Attribute.battery, "Battery", "%", DEVICE_CLASS_BATTERY)], Capability.body_mass_index_measurement: [ Map(Attribute.bmi_measurement, "Body Mass Index", "kg/m^2", None) ], Capability.body_weight_measurement: [ Map(Attribute.body_weight_measurement, "Body Weight", MASS_KILOGRAMS, None) ], Capability.carbon_dioxide_measurement: [ Map(Attribute.carbon_dioxide, "Carbon Dioxide Measurement", "ppm", None) ], Capability.carbon_monoxide_detector: [ Map(Attribute.carbon_monoxide, "Carbon Monoxide Detector", None, None) ], Capability.carbon_monoxide_measurement: [ Map(Attribute.carbon_monoxide_level, "Carbon Monoxide Measurement", "ppm", None) ], Capability.dishwasher_operating_state: [ Map(Attribute.machine_state, "Dishwasher Machine State", None, None), Map(Attribute.dishwasher_job_state, "Dishwasher Job State", None, None), Map( Attribute.completion_time, "Dishwasher Completion Time", None, DEVICE_CLASS_TIMESTAMP, ), ], Capability.dryer_mode: [Map(Attribute.dryer_mode, "Dryer Mode", None, None)], Capability.dryer_operating_state: [ Map(Attribute.machine_state, "Dryer Machine State", None, None), Map(Attribute.dryer_job_state, "Dryer Job State", None, None), Map( Attribute.completion_time, "Dryer Completion Time", None, DEVICE_CLASS_TIMESTAMP, ), ], Capability.dust_sensor: [ Map(Attribute.fine_dust_level, "Fine Dust Level", None, None), Map(Attribute.dust_level, "Dust Level", None, None), ], Capability.energy_meter: [ Map(Attribute.energy, "Energy Meter", ENERGY_KILO_WATT_HOUR, None) ], Capability.equivalent_carbon_dioxide_measurement: [ Map( Attribute.equivalent_carbon_dioxide_measurement, "Equivalent Carbon Dioxide Measurement", "ppm", None, ) ], Capability.formaldehyde_measurement: [ Map(Attribute.formaldehyde_level, "Formaldehyde Measurement", "ppm", None) ], Capability.illuminance_measurement: [ Map(Attribute.illuminance, "Illuminance", "lux", DEVICE_CLASS_ILLUMINANCE) ], Capability.infrared_level: [ Map(Attribute.infrared_level, "Infrared Level", "%", None) ], Capability.media_input_source: [ Map(Attribute.input_source, "Media Input Source", None, None) ], Capability.media_playback_repeat: [ Map(Attribute.playback_repeat_mode, "Media Playback Repeat", None, None) ], Capability.media_playback_shuffle: [ Map(Attribute.playback_shuffle, "Media Playback Shuffle", None, None) ], Capability.media_playback: [ Map(Attribute.playback_status, "Media Playback Status", None, None) ], Capability.odor_sensor: [Map(Attribute.odor_level, "Odor Sensor", None, None)], Capability.oven_mode: [Map(Attribute.oven_mode, "Oven Mode", None, None)], Capability.oven_operating_state: [ Map(Attribute.machine_state, "Oven Machine State", None, None), Map(Attribute.oven_job_state, "Oven Job State", None, None), Map(Attribute.completion_time, "Oven Completion Time", None, None), ], Capability.oven_setpoint: [ Map(Attribute.oven_setpoint, "Oven Set Point", None, None) ], Capability.power_meter: [Map(Attribute.power, "Power Meter", POWER_WATT, None)], Capability.power_source: [Map(Attribute.power_source, "Power Source", None, None)], Capability.refrigeration_setpoint: [ Map( Attribute.refrigeration_setpoint, "Refrigeration Setpoint", None, DEVICE_CLASS_TEMPERATURE, ) ], Capability.relative_humidity_measurement: [ Map( Attribute.humidity, "Relative Humidity Measurement", "%", DEVICE_CLASS_HUMIDITY, ) ], Capability.robot_cleaner_cleaning_mode: [ Map( Attribute.robot_cleaner_cleaning_mode, "Robot Cleaner Cleaning Mode", None, None, ) ], Capability.robot_cleaner_movement: [ Map(Attribute.robot_cleaner_movement, "Robot Cleaner Movement", None, None) ], Capability.robot_cleaner_turbo_mode: [ Map(Attribute.robot_cleaner_turbo_mode, "Robot Cleaner Turbo Mode", None, None) ], Capability.signal_strength: [ Map(Attribute.lqi, "LQI Signal Strength", None, None), Map(Attribute.rssi, "RSSI Signal Strength", None, None), ], Capability.smoke_detector: [Map(Attribute.smoke, "Smoke Detector", None, None)], Capability.temperature_measurement: [ Map( Attribute.temperature, "Temperature Measurement", None, DEVICE_CLASS_TEMPERATURE, ) ], Capability.thermostat_cooling_setpoint: [ Map( Attribute.cooling_setpoint, "Thermostat Cooling Setpoint", None, DEVICE_CLASS_TEMPERATURE, ) ], Capability.thermostat_fan_mode: [ Map(Attribute.thermostat_fan_mode, "Thermostat Fan Mode", None, None) ], Capability.thermostat_heating_setpoint: [ Map( Attribute.heating_setpoint, "Thermostat Heating Setpoint", None, DEVICE_CLASS_TEMPERATURE, ) ], Capability.thermostat_mode: [ Map(Attribute.thermostat_mode, "Thermostat Mode", None, None) ], Capability.thermostat_operating_state: [ Map( Attribute.thermostat_operating_state, "Thermostat Operating State", None, None, ) ], Capability.thermostat_setpoint: [ Map( Attribute.thermostat_setpoint, "Thermostat Setpoint", None, DEVICE_CLASS_TEMPERATURE, ) ], Capability.three_axis: [], Capability.tv_channel: [Map(Attribute.tv_channel, "Tv Channel", None, None)], Capability.tvoc_measurement: [ Map(Attribute.tvoc_level, "Tvoc Measurement", "ppm", None) ], Capability.ultraviolet_index: [ Map(Attribute.ultraviolet_index, "Ultraviolet Index", None, None) ], Capability.voltage_measurement: [ Map(Attribute.voltage, "Voltage Measurement", "V", None) ], Capability.washer_mode: [Map(Attribute.washer_mode, "Washer Mode", None, None)], Capability.washer_operating_state: [ Map(Attribute.machine_state, "Washer Machine State", None, None), Map(Attribute.washer_job_state, "Washer Job State", None, None), Map( Attribute.completion_time, "Washer Completion Time", None, DEVICE_CLASS_TIMESTAMP, ), ], } UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT} THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"] async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Platform uses config entry setup.""" pass async def async_setup_entry(hass, config_entry, async_add_entities): """Add binary sensors for a config entry.""" broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] sensors = [] for device in broker.devices.values(): for capability in broker.get_assigned(device.device_id, "sensor"): if capability == Capability.three_axis: sensors.extend( [ SmartThingsThreeAxisSensor(device, index) for index in range(len(THREE_AXIS_NAMES)) ] ) else: maps = CAPABILITY_TO_SENSORS[capability] sensors.extend( [ SmartThingsSensor( device, m.attribute, m.name, m.default_unit, m.device_class ) for m in maps ] ) async_add_entities(sensors) def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]: """Return all capabilities supported if minimum required are present.""" return [ capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities ] class SmartThingsSensor(SmartThingsEntity): """Define a SmartThings Sensor.""" def __init__( self, device, attribute: str, name: str, default_unit: str, device_class: str ): """Init the class.""" super().__init__(device) self._attribute = attribute self._name = name self._device_class = device_class self._default_unit = default_unit @property def name(self) -> str: """Return the name of the binary sensor.""" return f"{self._device.label} {self._name}" @property def unique_id(self) -> str: """Return a unique ID.""" return f"{self._device.device_id}.{self._attribute}" @property def state(self): """Return the state of the sensor.""" return self._device.status.attributes[self._attribute].value @property def device_class(self): """Return the device class of the sensor.""" return self._device_class @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" unit = self._device.status.attributes[self._attribute].unit return UNITS.get(unit, unit) if unit else self._default_unit class SmartThingsThreeAxisSensor(SmartThingsEntity): """Define a SmartThings Three Axis Sensor.""" def __init__(self, device, index): """Init the class.""" super().__init__(device) self._index = index @property def name(self) -> str: """Return the name of the binary sensor.""" return "{} {}".format(self._device.label, THREE_AXIS_NAMES[self._index]) @property def unique_id(self) -> str: """Return a unique ID.""" return "{}.{}".format(self._device.device_id, THREE_AXIS_NAMES[self._index]) @property def state(self): """Return the state of the sensor.""" three_axis = self._device.status.attributes[Attribute.three_axis].value try: return three_axis[self._index] except (TypeError, IndexError): return None
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/smartthings/sensor.py
"""Define constants for the Luftdaten component.""" from datetime import timedelta ATTR_SENSOR_ID = "sensor_id" CONF_SENSOR_ID = "sensor_id" DEFAULT_SCAN_INTERVAL = timedelta(minutes=10) DOMAIN = "luftdaten"
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/luftdaten/const.py
"""Common code for Withings.""" from asyncio import run_coroutine_threadsafe import datetime from functools import partial import logging import re import time from typing import Any, Dict import requests from withings_api import ( AbstractWithingsApi, MeasureGetMeasResponse, SleepGetResponse, SleepGetSummaryResponse, ) from withings_api.common import AuthFailedException, UnauthorizedException from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.exceptions import HomeAssistantError, PlatformNotReady from homeassistant.helpers.config_entry_oauth2_flow import ( AbstractOAuth2Implementation, OAuth2Session, ) from homeassistant.util import dt, slugify from . import const _LOGGER = logging.getLogger(const.LOG_NAMESPACE) NOT_AUTHENTICATED_ERROR = re.compile( # ".*(Error Code (100|101|102|200|401)|Missing access token parameter).*", "^401,.*", re.IGNORECASE, ) class NotAuthenticatedError(HomeAssistantError): """Raise when not authenticated with the service.""" pass class ServiceError(HomeAssistantError): """Raise when the service has an error.""" pass class ThrottleData: """Throttle data.""" def __init__(self, interval: int, data: Any): """Initialize throttle data.""" self._time = int(time.time()) self._interval = interval self._data = data @property def time(self) -> int: """Get time created.""" return self._time @property def interval(self) -> int: """Get interval.""" return self._interval @property def data(self) -> Any: """Get data.""" return self._data def is_expired(self) -> bool: """Is this data expired.""" return int(time.time()) - self.time > self.interval class ConfigEntryWithingsApi(AbstractWithingsApi): """Withing API that uses HA resources.""" def __init__( self, hass: HomeAssistant, config_entry: ConfigEntry, implementation: AbstractOAuth2Implementation, ): """Initialize object.""" self._hass = hass self._config_entry = config_entry self._implementation = implementation self.session = OAuth2Session(hass, config_entry, implementation) def _request( self, path: str, params: Dict[str, Any], method: str = "GET" ) -> Dict[str, Any]: return run_coroutine_threadsafe( self.async_do_request(path, params, method), self._hass.loop ).result() async def async_do_request( self, path: str, params: Dict[str, Any], method: str = "GET" ) -> Dict[str, Any]: """Perform an async request.""" await self.session.async_ensure_token_valid() response = await self._hass.async_add_executor_job( partial( requests.request, method, "%s/%s" % (self.URL, path), params=params, headers={ "Authorization": "Bearer %s" % self._config_entry.data["token"]["access_token"] }, ) ) return response.json() class WithingsDataManager: """A class representing an Withings cloud service connection.""" service_available = None def __init__(self, hass: HomeAssistant, profile: str, api: ConfigEntryWithingsApi): """Initialize data manager.""" self._hass = hass self._api = api self._profile = profile self._slug = slugify(profile) self._measures = None self._sleep = None self._sleep_summary = None self.sleep_summary_last_update_parameter = None self.throttle_data = {} @property def profile(self) -> str: """Get the profile.""" return self._profile @property def slug(self) -> str: """Get the slugified profile the data is for.""" return self._slug @property def api(self) -> ConfigEntryWithingsApi: """Get the api object.""" return self._api @property def measures(self) -> MeasureGetMeasResponse: """Get the current measures data.""" return self._measures @property def sleep(self) -> SleepGetResponse: """Get the current sleep data.""" return self._sleep @property def sleep_summary(self) -> SleepGetSummaryResponse: """Get the current sleep summary data.""" return self._sleep_summary @staticmethod def get_throttle_interval() -> int: """Get the throttle interval.""" return const.THROTTLE_INTERVAL def get_throttle_data(self, domain: str) -> ThrottleData: """Get throttlel data.""" return self.throttle_data.get(domain) def set_throttle_data(self, domain: str, throttle_data: ThrottleData): """Set throttle data.""" self.throttle_data[domain] = throttle_data @staticmethod def print_service_unavailable() -> bool: """Print the service is unavailable (once) to the log.""" if WithingsDataManager.service_available is not False: _LOGGER.error("Looks like the service is not available at the moment") WithingsDataManager.service_available = False return True return False @staticmethod def print_service_available() -> bool: """Print the service is available (once) to to the log.""" if WithingsDataManager.service_available is not True: _LOGGER.info("Looks like the service is available again") WithingsDataManager.service_available = True return True return False async def call(self, function, throttle_domain=None) -> Any: """Call an api method and handle the result.""" throttle_data = self.get_throttle_data(throttle_domain) should_throttle = ( throttle_domain and throttle_data and not throttle_data.is_expired() ) try: if should_throttle: _LOGGER.debug("Throttling call for domain: %s", throttle_domain) result = throttle_data.data else: _LOGGER.debug("Running call.") result = await self._hass.async_add_executor_job(function) # Update throttle data. self.set_throttle_data( throttle_domain, ThrottleData(self.get_throttle_interval(), result) ) WithingsDataManager.print_service_available() return result except Exception as ex: # Withings api encountered error. if isinstance(ex, (UnauthorizedException, AuthFailedException)): raise NotAuthenticatedError(ex) # Oauth2 config flow failed to authenticate. if NOT_AUTHENTICATED_ERROR.match(str(ex)): raise NotAuthenticatedError(ex) # Probably a network error. WithingsDataManager.print_service_unavailable() raise PlatformNotReady(ex) async def check_authenticated(self) -> bool: """Check if the user is authenticated.""" def function(): return bool(self._api.user_get_device()) return await self.call(function) async def update_measures(self) -> MeasureGetMeasResponse: """Update the measures data.""" def function(): return self._api.measure_get_meas() self._measures = await self.call(function, throttle_domain="update_measures") return self._measures async def update_sleep(self) -> SleepGetResponse: """Update the sleep data.""" end_date = int(time.time()) start_date = end_date - (6 * 60 * 60) def function(): return self._api.sleep_get(startdate=start_date, enddate=end_date) self._sleep = await self.call(function, throttle_domain="update_sleep") return self._sleep async def update_sleep_summary(self) -> SleepGetSummaryResponse: """Update the sleep summary data.""" now = dt.utcnow() yesterday = now - datetime.timedelta(days=1) yesterday_noon = datetime.datetime( yesterday.year, yesterday.month, yesterday.day, 12, 0, 0, 0, datetime.timezone.utc, ) _LOGGER.debug( "Getting sleep summary data since: %s", yesterday.strftime("%Y-%m-%d %H:%M:%S UTC"), ) def function(): return self._api.sleep_get_summary(lastupdate=yesterday_noon) self._sleep_summary = await self.call( function, throttle_domain="update_sleep_summary" ) return self._sleep_summary def create_withings_data_manager( hass: HomeAssistant, config_entry: ConfigEntry, implementation: AbstractOAuth2Implementation, ) -> WithingsDataManager: """Set up the sensor config entry.""" profile = config_entry.data.get(const.PROFILE) _LOGGER.debug("Creating withings api instance") api = ConfigEntryWithingsApi( hass=hass, config_entry=config_entry, implementation=implementation ) _LOGGER.debug("Creating withings data manager for profile: %s", profile) return WithingsDataManager(hass, profile, api) def get_data_manager( hass: HomeAssistant, entry: ConfigEntry, implementation: AbstractOAuth2Implementation, ) -> WithingsDataManager: """Get a data manager for a config entry. If the data manager doesn't exist yet, it will be created and cached for later use. """ entry_id = entry.entry_id hass.data[const.DOMAIN] = hass.data.get(const.DOMAIN, {}) domain_dict = hass.data[const.DOMAIN] domain_dict[const.DATA_MANAGER] = domain_dict.get(const.DATA_MANAGER, {}) dm_dict = domain_dict[const.DATA_MANAGER] dm_dict[entry_id] = dm_dict.get(entry_id) or create_withings_data_manager( hass, entry, implementation ) return dm_dict[entry_id]
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/withings/common.py
"""Constants for the the iaqualink component.""" from datetime import timedelta from homeassistant.components.climate.const import HVAC_MODE_HEAT, HVAC_MODE_OFF DOMAIN = "iaqualink" CLIMATE_SUPPORTED_MODES = [HVAC_MODE_HEAT, HVAC_MODE_OFF] UPDATE_INTERVAL = timedelta(seconds=30)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/iaqualink/const.py
"""Support for tracking the proximity of a device.""" import logging import voluptuous as vol from homeassistant.const import CONF_DEVICES, CONF_UNIT_OF_MEASUREMENT, CONF_ZONE import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import track_state_change from homeassistant.util.distance import convert from homeassistant.util.location import distance # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) ATTR_DIR_OF_TRAVEL = "dir_of_travel" ATTR_DIST_FROM = "dist_to_zone" ATTR_NEAREST = "nearest" CONF_IGNORED_ZONES = "ignored_zones" CONF_TOLERANCE = "tolerance" DEFAULT_DIR_OF_TRAVEL = "not set" DEFAULT_DIST_TO_ZONE = "not set" DEFAULT_NEAREST = "not set" DEFAULT_PROXIMITY_ZONE = "home" DEFAULT_TOLERANCE = 1 DOMAIN = "proximity" UNITS = ["km", "m", "mi", "ft"] ZONE_SCHEMA = vol.Schema( { vol.Optional(CONF_ZONE, default=DEFAULT_PROXIMITY_ZONE): cv.string, vol.Optional(CONF_DEVICES, default=[]): vol.All(cv.ensure_list, [cv.entity_id]), vol.Optional(CONF_IGNORED_ZONES, default=[]): vol.All( cv.ensure_list, [cv.string] ), vol.Optional(CONF_TOLERANCE, default=DEFAULT_TOLERANCE): cv.positive_int, vol.Optional(CONF_UNIT_OF_MEASUREMENT): vol.All(cv.string, vol.In(UNITS)), } ) CONFIG_SCHEMA = vol.Schema( {DOMAIN: cv.schema_with_slug_keys(ZONE_SCHEMA)}, extra=vol.ALLOW_EXTRA ) def setup_proximity_component(hass, name, config): """Set up the individual proximity component.""" ignored_zones = config.get(CONF_IGNORED_ZONES) proximity_devices = config.get(CONF_DEVICES) tolerance = config.get(CONF_TOLERANCE) proximity_zone = name unit_of_measurement = config.get( CONF_UNIT_OF_MEASUREMENT, hass.config.units.length_unit ) zone_id = "zone.{}".format(config.get(CONF_ZONE)) proximity = Proximity( hass, proximity_zone, DEFAULT_DIST_TO_ZONE, DEFAULT_DIR_OF_TRAVEL, DEFAULT_NEAREST, ignored_zones, proximity_devices, tolerance, zone_id, unit_of_measurement, ) proximity.entity_id = f"{DOMAIN}.{proximity_zone}" proximity.schedule_update_ha_state() track_state_change(hass, proximity_devices, proximity.check_proximity_state_change) return True def setup(hass, config): """Get the zones and offsets from configuration.yaml.""" for zone, proximity_config in config[DOMAIN].items(): setup_proximity_component(hass, zone, proximity_config) return True class Proximity(Entity): """Representation of a Proximity.""" def __init__( self, hass, zone_friendly_name, dist_to, dir_of_travel, nearest, ignored_zones, proximity_devices, tolerance, proximity_zone, unit_of_measurement, ): """Initialize the proximity.""" self.hass = hass self.friendly_name = zone_friendly_name self.dist_to = dist_to self.dir_of_travel = dir_of_travel self.nearest = nearest self.ignored_zones = ignored_zones self.proximity_devices = proximity_devices self.tolerance = tolerance self.proximity_zone = proximity_zone self._unit_of_measurement = unit_of_measurement @property def name(self): """Return the name of the entity.""" return self.friendly_name @property def state(self): """Return the state.""" return self.dist_to @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return self._unit_of_measurement @property def state_attributes(self): """Return the state attributes.""" return {ATTR_DIR_OF_TRAVEL: self.dir_of_travel, ATTR_NEAREST: self.nearest} def check_proximity_state_change(self, entity, old_state, new_state): """Perform the proximity checking.""" entity_name = new_state.name devices_to_calculate = False devices_in_zone = "" zone_state = self.hass.states.get(self.proximity_zone) proximity_latitude = zone_state.attributes.get("latitude") proximity_longitude = zone_state.attributes.get("longitude") # Check for devices in the monitored zone. for device in self.proximity_devices: device_state = self.hass.states.get(device) if device_state is None: devices_to_calculate = True continue if device_state.state not in self.ignored_zones: devices_to_calculate = True # Check the location of all devices. if (device_state.state).lower() == (self.friendly_name).lower(): device_friendly = device_state.name if devices_in_zone != "": devices_in_zone = devices_in_zone + ", " devices_in_zone = devices_in_zone + device_friendly # No-one to track so reset the entity. if not devices_to_calculate: self.dist_to = "not set" self.dir_of_travel = "not set" self.nearest = "not set" self.schedule_update_ha_state() return # At least one device is in the monitored zone so update the entity. if devices_in_zone != "": self.dist_to = 0 self.dir_of_travel = "arrived" self.nearest = devices_in_zone self.schedule_update_ha_state() return # We can't check proximity because latitude and longitude don't exist. if "latitude" not in new_state.attributes: return # Collect distances to the zone for all devices. distances_to_zone = {} for device in self.proximity_devices: # Ignore devices in an ignored zone. device_state = self.hass.states.get(device) if device_state.state in self.ignored_zones: continue # Ignore devices if proximity cannot be calculated. if "latitude" not in device_state.attributes: continue # Calculate the distance to the proximity zone. dist_to_zone = distance( proximity_latitude, proximity_longitude, device_state.attributes["latitude"], device_state.attributes["longitude"], ) # Add the device and distance to a dictionary. distances_to_zone[device] = round( convert(dist_to_zone, "m", self.unit_of_measurement), 1 ) # Loop through each of the distances collected and work out the # closest. closest_device: str = None dist_to_zone: float = None for device in distances_to_zone: if not dist_to_zone or distances_to_zone[device] < dist_to_zone: closest_device = device dist_to_zone = distances_to_zone[device] # If the closest device is one of the other devices. if closest_device != entity: self.dist_to = round(distances_to_zone[closest_device]) self.dir_of_travel = "unknown" device_state = self.hass.states.get(closest_device) self.nearest = device_state.name self.schedule_update_ha_state() return # Stop if we cannot calculate the direction of travel (i.e. we don't # have a previous state and a current LAT and LONG). if old_state is None or "latitude" not in old_state.attributes: self.dist_to = round(distances_to_zone[entity]) self.dir_of_travel = "unknown" self.nearest = entity_name self.schedule_update_ha_state() return # Reset the variables distance_travelled = 0 # Calculate the distance travelled. old_distance = distance( proximity_latitude, proximity_longitude, old_state.attributes["latitude"], old_state.attributes["longitude"], ) new_distance = distance( proximity_latitude, proximity_longitude, new_state.attributes["latitude"], new_state.attributes["longitude"], ) distance_travelled = round(new_distance - old_distance, 1) # Check for tolerance if distance_travelled < self.tolerance * -1: direction_of_travel = "towards" elif distance_travelled > self.tolerance: direction_of_travel = "away_from" else: direction_of_travel = "stationary" # Update the proximity entity self.dist_to = round(dist_to_zone) self.dir_of_travel = direction_of_travel self.nearest = entity_name self.schedule_update_ha_state() _LOGGER.debug( "proximity.%s update entity: distance=%s: direction=%s: " "device=%s", self.friendly_name, round(dist_to_zone), direction_of_travel, entity_name, ) _LOGGER.info("%s: proximity calculation complete", entity_name)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/proximity/__init__.py
"""Support for deCONZ lights.""" from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR, ATTR_TRANSITION, EFFECT_COLORLOOP, FLASH_LONG, FLASH_SHORT, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_TRANSITION, Light, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect import homeassistant.util.color as color_util from .const import ( COVER_TYPES, DOMAIN as DECONZ_DOMAIN, NEW_GROUP, NEW_LIGHT, SWITCH_TYPES, ) from .deconz_device import DeconzDevice from .gateway import DeconzEntityHandler, get_gateway_from_config_entry async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Old way of setting up deCONZ platforms.""" async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the deCONZ lights and groups from a config entry.""" gateway = get_gateway_from_config_entry(hass, config_entry) entity_handler = DeconzEntityHandler(gateway) @callback def async_add_light(lights): """Add light from deCONZ.""" entities = [] for light in lights: if light.type not in COVER_TYPES + SWITCH_TYPES: entities.append(DeconzLight(light, gateway)) async_add_entities(entities, True) gateway.listeners.append( async_dispatcher_connect( hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_light ) ) @callback def async_add_group(groups): """Add group from deCONZ.""" entities = [] for group in groups: if group.lights: new_group = DeconzGroup(group, gateway) entity_handler.add_entity(new_group) entities.append(new_group) async_add_entities(entities, True) gateway.listeners.append( async_dispatcher_connect( hass, gateway.async_signal_new_device(NEW_GROUP), async_add_group ) ) async_add_light(gateway.api.lights.values()) async_add_group(gateway.api.groups.values()) class DeconzLight(DeconzDevice, Light): """Representation of a deCONZ light.""" def __init__(self, device, gateway): """Set up light.""" super().__init__(device, gateway) self._features = SUPPORT_BRIGHTNESS self._features |= SUPPORT_FLASH self._features |= SUPPORT_TRANSITION if self._device.ct is not None: self._features |= SUPPORT_COLOR_TEMP if self._device.xy is not None: self._features |= SUPPORT_COLOR if self._device.effect is not None: self._features |= SUPPORT_EFFECT @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._device.brightness @property def effect_list(self): """Return the list of supported effects.""" return [EFFECT_COLORLOOP] @property def color_temp(self): """Return the CT color value.""" if self._device.colormode != "ct": return None return self._device.ct @property def hs_color(self): """Return the hs color value.""" if self._device.colormode in ("xy", "hs") and self._device.xy: return color_util.color_xy_to_hs(*self._device.xy) return None @property def is_on(self): """Return true if light is on.""" return self._device.state @property def supported_features(self): """Flag supported features.""" return self._features async def async_turn_on(self, **kwargs): """Turn on light.""" data = {"on": True} if ATTR_COLOR_TEMP in kwargs: data["ct"] = kwargs[ATTR_COLOR_TEMP] if ATTR_HS_COLOR in kwargs: data["xy"] = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR]) if ATTR_BRIGHTNESS in kwargs: data["bri"] = kwargs[ATTR_BRIGHTNESS] if ATTR_TRANSITION in kwargs: data["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10) elif "IKEA" in (self._device.manufacturer or ""): data["transitiontime"] = 0 if ATTR_FLASH in kwargs: if kwargs[ATTR_FLASH] == FLASH_SHORT: data["alert"] = "select" del data["on"] elif kwargs[ATTR_FLASH] == FLASH_LONG: data["alert"] = "lselect" del data["on"] if ATTR_EFFECT in kwargs: if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP: data["effect"] = "colorloop" else: data["effect"] = "none" await self._device.async_set_state(data) async def async_turn_off(self, **kwargs): """Turn off light.""" data = {"on": False} if ATTR_TRANSITION in kwargs: data["bri"] = 0 data["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10) if ATTR_FLASH in kwargs: if kwargs[ATTR_FLASH] == FLASH_SHORT: data["alert"] = "select" del data["on"] elif kwargs[ATTR_FLASH] == FLASH_LONG: data["alert"] = "lselect" del data["on"] await self._device.async_set_state(data) @property def device_state_attributes(self): """Return the device state attributes.""" attributes = {} attributes["is_deconz_group"] = self._device.type == "LightGroup" return attributes class DeconzGroup(DeconzLight): """Representation of a deCONZ group.""" def __init__(self, device, gateway): """Set up group and create an unique id.""" super().__init__(device, gateway) self._unique_id = f"{self.gateway.api.config.bridgeid}-{self._device.deconz_id}" @property def unique_id(self): """Return a unique identifier for this device.""" return self._unique_id @property def device_info(self): """Return a device description for device registry.""" bridgeid = self.gateway.api.config.bridgeid return { "identifiers": {(DECONZ_DOMAIN, self.unique_id)}, "manufacturer": "Dresden Elektronik", "model": "deCONZ group", "name": self._device.name, "via_device": (DECONZ_DOMAIN, bridgeid), } @property def device_state_attributes(self): """Return the device state attributes.""" attributes = dict(super().device_state_attributes) attributes["all_on"] = self._device.all_on return attributes
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/deconz/light.py
"""Support for KEBA charging station sensors.""" import logging from homeassistant.const import DEVICE_CLASS_POWER, ENERGY_KILO_WATT_HOUR from homeassistant.helpers.entity import Entity from . import DOMAIN _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the KEBA charging station platform.""" if discovery_info is None: return keba = hass.data[DOMAIN] sensors = [ KebaSensor(keba, "Curr user", "Max current", "mdi:flash", "A"), KebaSensor( keba, "Setenergy", "Energy target", "mdi:gauge", ENERGY_KILO_WATT_HOUR ), KebaSensor(keba, "P", "Charging power", "mdi:flash", "kW", DEVICE_CLASS_POWER), KebaSensor( keba, "E pres", "Session energy", "mdi:gauge", ENERGY_KILO_WATT_HOUR ), KebaSensor(keba, "E total", "Total Energy", "mdi:gauge", ENERGY_KILO_WATT_HOUR), ] async_add_entities(sensors) class KebaSensor(Entity): """The entity class for KEBA charging stations sensors.""" def __init__(self, keba, key, name, icon, unit, device_class=None): """Initialize the KEBA Sensor.""" self._key = key self._keba = keba self._name = name self._device_class = device_class self._icon = icon self._unit = unit self._state = None self._attributes = {} @property def should_poll(self): """Deactivate polling. Data updated by KebaHandler.""" return False @property def unique_id(self): """Return the unique ID of the binary sensor.""" return f"{self._keba.device_name}_{self._name}" @property def name(self): """Return the name of the device.""" return self._name @property def device_class(self): """Return the class of this sensor.""" return self._device_class @property def icon(self): """Icon to use in the frontend, if any.""" return self._icon @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Get the unit of measurement.""" return self._unit @property def device_state_attributes(self): """Return the state attributes of the binary sensor.""" return self._attributes async def async_update(self): """Get latest cached states from the device.""" self._state = self._keba.get_value(self._key) if self._key == "P": self._attributes["power_factor"] = self._keba.get_value("PF") self._attributes["voltage_u1"] = str(self._keba.get_value("U1")) self._attributes["voltage_u2"] = str(self._keba.get_value("U2")) self._attributes["voltage_u3"] = str(self._keba.get_value("U3")) self._attributes["current_i1"] = str(self._keba.get_value("I1")) self._attributes["current_i2"] = str(self._keba.get_value("I2")) self._attributes["current_i3"] = str(self._keba.get_value("I3")) elif self._key == "Curr user": self._attributes["max_current_hardware"] = self._keba.get_value("Curr HW") def update_callback(self): """Schedule a state update.""" self.async_schedule_update_ha_state(True) async def async_added_to_hass(self): """Add update callback after being added to hass.""" self._keba.add_update_listener(self.update_callback)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/keba/sensor.py
"""Provides device automations for Fan.""" from typing import List import voluptuous as vol from homeassistant.components.automation import AutomationActionType, state from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA from homeassistant.const import ( CONF_DEVICE_ID, CONF_DOMAIN, CONF_ENTITY_ID, CONF_PLATFORM, CONF_TYPE, STATE_OFF, STATE_ON, ) from homeassistant.core import CALLBACK_TYPE, HomeAssistant from homeassistant.helpers import config_validation as cv, entity_registry from homeassistant.helpers.typing import ConfigType from . import DOMAIN TRIGGER_TYPES = {"turned_on", "turned_off"} TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES), } ) async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]: """List device triggers for Fan devices.""" registry = await entity_registry.async_get_registry(hass) triggers = [] # Get all the integrations entities for this device for entry in entity_registry.async_entries_for_device(registry, device_id): if entry.domain != DOMAIN: continue # Add triggers for each entity that belongs to this integration triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "turned_on", } ) triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "turned_off", } ) return triggers async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Attach a trigger.""" config = TRIGGER_SCHEMA(config) if config[CONF_TYPE] == "turned_on": from_state = STATE_OFF to_state = STATE_ON else: from_state = STATE_ON to_state = STATE_OFF state_config = { state.CONF_PLATFORM: "state", CONF_ENTITY_ID: config[CONF_ENTITY_ID], state.CONF_FROM: from_state, state.CONF_TO: to_state, } state_config = state.TRIGGER_SCHEMA(state_config) return await state.async_attach_trigger( hass, state_config, action, automation_info, platform_type="device" )
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/fan/device_trigger.py
"""Switch implementation for Wireless Sensor Tags (wirelesstag.net).""" import logging import voluptuous as vol from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice from homeassistant.const import CONF_MONITORED_CONDITIONS import homeassistant.helpers.config_validation as cv from . import DOMAIN as WIRELESSTAG_DOMAIN, WirelessTagBaseSensor _LOGGER = logging.getLogger(__name__) ARM_TEMPERATURE = "temperature" ARM_HUMIDITY = "humidity" ARM_MOTION = "motion" ARM_LIGHT = "light" ARM_MOISTURE = "moisture" # Switch types: Name, tag sensor type SWITCH_TYPES = { ARM_TEMPERATURE: ["Arm Temperature", "temperature"], ARM_HUMIDITY: ["Arm Humidity", "humidity"], ARM_MOTION: ["Arm Motion", "motion"], ARM_LIGHT: ["Arm Light", "light"], ARM_MOISTURE: ["Arm Moisture", "moisture"], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All( cv.ensure_list, [vol.In(SWITCH_TYPES)] ) } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up switches for a Wireless Sensor Tags.""" platform = hass.data.get(WIRELESSTAG_DOMAIN) switches = [] tags = platform.load_tags() for switch_type in config.get(CONF_MONITORED_CONDITIONS): for _, tag in tags.items(): if switch_type in tag.allowed_monitoring_types: switches.append(WirelessTagSwitch(platform, tag, switch_type)) add_entities(switches, True) class WirelessTagSwitch(WirelessTagBaseSensor, SwitchDevice): """A switch implementation for Wireless Sensor Tags.""" def __init__(self, api, tag, switch_type): """Initialize a switch for Wireless Sensor Tag.""" super().__init__(api, tag) self._switch_type = switch_type self.sensor_type = SWITCH_TYPES[self._switch_type][1] self._name = "{} {}".format(self._tag.name, SWITCH_TYPES[self._switch_type][0]) def turn_on(self, **kwargs): """Turn on the switch.""" self._api.arm(self) def turn_off(self, **kwargs): """Turn on the switch.""" self._api.disarm(self) @property def is_on(self) -> bool: """Return True if entity is on.""" return self._state def updated_state_value(self): """Provide formatted value.""" return self.principal_value @property def principal_value(self): """Provide actual value of switch.""" attr_name = f"is_{self.sensor_type}_sensor_armed" return getattr(self._tag, attr_name, False)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/wirelesstag/switch.py
"""Test reproduce state for Water heater.""" from homeassistant.components.water_heater import ( ATTR_AWAY_MODE, ATTR_OPERATION_MODE, ATTR_TEMPERATURE, SERVICE_SET_AWAY_MODE, SERVICE_SET_OPERATION_MODE, SERVICE_SET_TEMPERATURE, STATE_ECO, STATE_GAS, ) from homeassistant.const import SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON from homeassistant.core import State from tests.common import async_mock_service async def test_reproducing_states(hass, caplog): """Test reproducing Water heater states.""" hass.states.async_set("water_heater.entity_off", STATE_OFF, {}) hass.states.async_set("water_heater.entity_on", STATE_ON, {ATTR_TEMPERATURE: 45}) hass.states.async_set("water_heater.entity_away", STATE_ON, {ATTR_AWAY_MODE: True}) hass.states.async_set("water_heater.entity_gas", STATE_GAS, {}) hass.states.async_set( "water_heater.entity_all", STATE_ECO, {ATTR_AWAY_MODE: True, ATTR_TEMPERATURE: 45}, ) turn_on_calls = async_mock_service(hass, "water_heater", SERVICE_TURN_ON) turn_off_calls = async_mock_service(hass, "water_heater", SERVICE_TURN_OFF) set_op_calls = async_mock_service(hass, "water_heater", SERVICE_SET_OPERATION_MODE) set_temp_calls = async_mock_service(hass, "water_heater", SERVICE_SET_TEMPERATURE) set_away_calls = async_mock_service(hass, "water_heater", SERVICE_SET_AWAY_MODE) # These calls should do nothing as entities already in desired state await hass.helpers.state.async_reproduce_state( [ State("water_heater.entity_off", STATE_OFF), State("water_heater.entity_on", STATE_ON, {ATTR_TEMPERATURE: 45}), State("water_heater.entity_away", STATE_ON, {ATTR_AWAY_MODE: True}), State("water_heater.entity_gas", STATE_GAS, {}), State( "water_heater.entity_all", STATE_ECO, {ATTR_AWAY_MODE: True, ATTR_TEMPERATURE: 45}, ), ], blocking=True, ) assert len(turn_on_calls) == 0 assert len(turn_off_calls) == 0 assert len(set_op_calls) == 0 assert len(set_temp_calls) == 0 assert len(set_away_calls) == 0 # Test invalid state is handled await hass.helpers.state.async_reproduce_state( [State("water_heater.entity_off", "not_supported")], blocking=True ) assert "not_supported" in caplog.text assert len(turn_on_calls) == 0 assert len(turn_off_calls) == 0 assert len(set_op_calls) == 0 assert len(set_temp_calls) == 0 assert len(set_away_calls) == 0 # Make sure correct services are called await hass.helpers.state.async_reproduce_state( [ State("water_heater.entity_on", STATE_OFF), State("water_heater.entity_off", STATE_ON, {ATTR_TEMPERATURE: 45}), State("water_heater.entity_all", STATE_ECO, {ATTR_AWAY_MODE: False}), State("water_heater.entity_away", STATE_GAS, {}), State( "water_heater.entity_gas", STATE_ECO, {ATTR_AWAY_MODE: True, ATTR_TEMPERATURE: 45}, ), # Should not raise State("water_heater.non_existing", "on"), ], blocking=True, ) assert len(turn_on_calls) == 1 assert turn_on_calls[0].domain == "water_heater" assert turn_on_calls[0].data == {"entity_id": "water_heater.entity_off"} assert len(turn_off_calls) == 1 assert turn_off_calls[0].domain == "water_heater" assert turn_off_calls[0].data == {"entity_id": "water_heater.entity_on"} VALID_OP_CALLS = [ {"entity_id": "water_heater.entity_away", ATTR_OPERATION_MODE: STATE_GAS}, {"entity_id": "water_heater.entity_gas", ATTR_OPERATION_MODE: STATE_ECO}, ] assert len(set_op_calls) == 2 for call in set_op_calls: assert call.domain == "water_heater" assert call.data in VALID_OP_CALLS VALID_OP_CALLS.remove(call.data) VALID_TEMP_CALLS = [ {"entity_id": "water_heater.entity_off", ATTR_TEMPERATURE: 45}, {"entity_id": "water_heater.entity_gas", ATTR_TEMPERATURE: 45}, ] assert len(set_temp_calls) == 2 for call in set_temp_calls: assert call.domain == "water_heater" assert call.data in VALID_TEMP_CALLS VALID_TEMP_CALLS.remove(call.data) VALID_AWAY_CALLS = [ {"entity_id": "water_heater.entity_all", ATTR_AWAY_MODE: False}, {"entity_id": "water_heater.entity_gas", ATTR_AWAY_MODE: True}, ] assert len(set_away_calls) == 2 for call in set_away_calls: assert call.domain == "water_heater" assert call.data in VALID_AWAY_CALLS VALID_AWAY_CALLS.remove(call.data)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
tests/components/water_heater/test_reproduce_state.py
import itertools import numpy as np import pytest import pandas as pd from pandas.core.internals import ExtensionBlock from .base import BaseExtensionTests class BaseReshapingTests(BaseExtensionTests): """Tests for reshaping and concatenation.""" @pytest.mark.parametrize('in_frame', [True, False]) def test_concat(self, data, in_frame): wrapped = pd.Series(data) if in_frame: wrapped = pd.DataFrame(wrapped) result = pd.concat([wrapped, wrapped], ignore_index=True) assert len(result) == len(data) * 2 if in_frame: dtype = result.dtypes[0] else: dtype = result.dtype assert dtype == data.dtype assert isinstance(result._data.blocks[0], ExtensionBlock) @pytest.mark.parametrize('in_frame', [True, False]) def test_concat_all_na_block(self, data_missing, in_frame): valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1]) na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3]) if in_frame: valid_block = pd.DataFrame({"a": valid_block}) na_block = pd.DataFrame({"a": na_block}) result = pd.concat([valid_block, na_block]) if in_frame: expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])}) self.assert_frame_equal(result, expected) else: expected = pd.Series(data_missing.take([1, 1, 0, 0])) self.assert_series_equal(result, expected) def test_concat_mixed_dtypes(self, data): # https://github.com/pandas-dev/pandas/issues/20762 df1 = pd.DataFrame({'A': data[:3]}) df2 = pd.DataFrame({"A": [1, 2, 3]}) df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category') dfs = [df1, df2, df3] # dataframes result = pd.concat(dfs) expected = pd.concat([x.astype(object) for x in dfs]) self.assert_frame_equal(result, expected) # series result = pd.concat([x['A'] for x in dfs]) expected = pd.concat([x['A'].astype(object) for x in dfs]) self.assert_series_equal(result, expected) # simple test for just EA and one other result = pd.concat([df1, df2]) expected = pd.concat([df1.astype('object'), df2.astype('object')]) self.assert_frame_equal(result, expected) result = pd.concat([df1['A'], df2['A']]) expected = pd.concat([df1['A'].astype('object'), df2['A'].astype('object')]) self.assert_series_equal(result, expected) def test_concat_columns(self, data, na_value): df1 = pd.DataFrame({'A': data[:3]}) df2 = pd.DataFrame({'B': [1, 2, 3]}) expected = pd.DataFrame({'A': data[:3], 'B': [1, 2, 3]}) result = pd.concat([df1, df2], axis=1) self.assert_frame_equal(result, expected) result = pd.concat([df1['A'], df2['B']], axis=1) self.assert_frame_equal(result, expected) # non-aligned df2 = pd.DataFrame({'B': [1, 2, 3]}, index=[1, 2, 3]) expected = pd.DataFrame({ 'A': data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), 'B': [np.nan, 1, 2, 3]}) result = pd.concat([df1, df2], axis=1) self.assert_frame_equal(result, expected) result = pd.concat([df1['A'], df2['B']], axis=1) self.assert_frame_equal(result, expected) def test_align(self, data, na_value): a = data[:3] b = data[2:5] r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) # Assumes that the ctor can take a list of scalars of the type e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype)) e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype)) self.assert_series_equal(r1, e1) self.assert_series_equal(r2, e2) def test_align_frame(self, data, na_value): a = data[:3] b = data[2:5] r1, r2 = pd.DataFrame({'A': a}).align( pd.DataFrame({'A': b}, index=[1, 2, 3]) ) # Assumes that the ctor can take a list of scalars of the type e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value], dtype=data.dtype)}) e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b), dtype=data.dtype)}) self.assert_frame_equal(r1, e1) self.assert_frame_equal(r2, e2) def test_align_series_frame(self, data, na_value): # https://github.com/pandas-dev/pandas/issues/20576 ser = pd.Series(data, name='a') df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) r1, r2 = ser.align(df) e1 = pd.Series(data._from_sequence(list(data) + [na_value], dtype=data.dtype), name=ser.name) self.assert_series_equal(r1, e1) self.assert_frame_equal(r2, df) def test_set_frame_expand_regular_with_extension(self, data): df = pd.DataFrame({"A": [1] * len(data)}) df['B'] = data expected = pd.DataFrame({"A": [1] * len(data), "B": data}) self.assert_frame_equal(df, expected) def test_set_frame_expand_extension_with_regular(self, data): df = pd.DataFrame({'A': data}) df['B'] = [1] * len(data) expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) self.assert_frame_equal(df, expected) def test_set_frame_overwrite_object(self, data): # https://github.com/pandas-dev/pandas/issues/20555 df = pd.DataFrame({"A": [1] * len(data)}, dtype=object) df['A'] = data assert df.dtypes['A'] == data.dtype def test_merge(self, data, na_value): # GH-20743 df1 = pd.DataFrame({'ext': data[:3], 'int1': [1, 2, 3], 'key': [0, 1, 2]}) df2 = pd.DataFrame({'int2': [1, 2, 3, 4], 'key': [0, 0, 1, 3]}) res = pd.merge(df1, df2) exp = pd.DataFrame( {'int1': [1, 1, 2], 'int2': [1, 2, 3], 'key': [0, 0, 1], 'ext': data._from_sequence([data[0], data[0], data[1]], dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) res = pd.merge(df1, df2, how='outer') exp = pd.DataFrame( {'int1': [1, 1, 2, 3, np.nan], 'int2': [1, 2, 3, np.nan, 4], 'key': [0, 0, 1, 2, 3], 'ext': data._from_sequence( [data[0], data[0], data[1], data[2], na_value], dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) def test_merge_on_extension_array(self, data): # GH 23020 a, b = data[:2] key = type(data)._from_sequence([a, b], dtype=data.dtype) df = pd.DataFrame({"key": key, "val": [1, 2]}) result = pd.merge(df, df, on='key') expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]}) self.assert_frame_equal(result, expected) # order result = pd.merge(df.iloc[[1, 0]], df, on='key') expected = expected.iloc[[1, 0]].reset_index(drop=True) self.assert_frame_equal(result, expected) def test_merge_on_extension_array_duplicates(self, data): # GH 23020 a, b = data[:2] key = type(data)._from_sequence([a, b, a], dtype=data.dtype) df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) result = pd.merge(df1, df2, on='key') expected = pd.DataFrame({ "key": key.take([0, 0, 0, 0, 1]), "val_x": [1, 1, 3, 3, 2], "val_y": [1, 3, 1, 3, 2], }) self.assert_frame_equal(result, expected) @pytest.mark.parametrize("columns", [ ["A", "B"], pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b')], names=['outer', 'inner']), ]) def test_stack(self, data, columns): df = pd.DataFrame({"A": data[:5], "B": data[:5]}) df.columns = columns result = df.stack() expected = df.astype(object).stack() # we need a second astype(object), in case the constructor inferred # object -> specialized, as is done for period. expected = expected.astype(object) if isinstance(expected, pd.Series): assert result.dtype == df.iloc[:, 0].dtype else: assert all(result.dtypes == df.iloc[:, 0].dtype) result = result.astype(object) self.assert_equal(result, expected) @pytest.mark.parametrize("index", [ # Two levels, uniform. pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]), names=['a', 'b']), # non-uniform pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('B', 'b')]), # three levels, non-uniform pd.MultiIndex.from_product([('A', 'B'), ('a', 'b', 'c'), (0, 1, 2)]), pd.MultiIndex.from_tuples([ ('A', 'a', 1), ('A', 'b', 0), ('A', 'a', 0), ('B', 'a', 0), ('B', 'c', 1), ]), ]) @pytest.mark.parametrize("obj", ["series", "frame"]) def test_unstack(self, data, index, obj): data = data[:len(index)] if obj == "series": ser = pd.Series(data, index=index) else: ser = pd.DataFrame({"A": data, "B": data}, index=index) n = index.nlevels levels = list(range(n)) # [0, 1, 2] # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] combinations = itertools.chain.from_iterable( itertools.permutations(levels, i) for i in range(1, n) ) for level in combinations: result = ser.unstack(level=level) assert all(isinstance(result[col].array, type(data)) for col in result.columns) expected = ser.astype(object).unstack(level=level) result = result.astype(object) self.assert_frame_equal(result, expected)
# -*- coding: utf-8 -*- from datetime import datetime import re import numpy as np import pytest from pandas._libs.tslibs import Timestamp from pandas.compat import PY2, range import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index from pandas.api.types import pandas_dtype from pandas.tests.indexes.common import Base import pandas.util.testing as tm class Numeric(Base): def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype='int64')) # float conversions arr = np.arange(5, dtype='int64') * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype='float64') result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype='float64') result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index('A B C D E F'.split()) dt_idx = pd.date_range('2013-01-01', freq='M', periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal(idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]}) to_groupby = Index([datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1)], tz='UTC').values ex_keys = [Timestamp('2011-11-01'), Timestamp('2011-12-01')] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): i = self.create_index() cond = [True] * len(i) expected = i result = i.where(klass(cond)) cond = [False] + [True] * (len(i) - 1) expected = Float64Index([i._na_value] + i[1:].tolist()) result = i.where(klass(cond)) tm.assert_index_equal(result, expected) def test_insert(self): # GH 18295 (test missing) expected = Float64Index([0, np.nan, 1, 2, 3, 4]) for na in (np.nan, pd.NaT, None): result = self.create_index().insert(1, na) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index def setup_method(self, method): self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]), float=Float64Index(np.arange(5) * 2.5), mixed_dec=Float64Index([5, 4, 3, 2, 1.5]), float_dec=Float64Index(np.arange(4, -1, -1) * 2.5)) self.setup_indices() def create_index(self): return Float64Index(np.arange(5, dtype='float64')) def test_repr_roundtrip(self): for ind in (self.mixed, self.float): tm.assert_index_equal(eval(repr(ind)), ind) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype='float64') tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1., 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1., 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1., 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_invalid(self): # invalid msg = (r"Float64Index\(\.\.\.\) must be called with a collection of" r" some kind, 0\.0 was passed") with pytest.raises(TypeError, match=msg): Float64Index(0.) msg = ("String dtype not supported, you may need to explicitly cast to" " a numeric type") with pytest.raises(TypeError, match=msg): Float64Index(['a', 'b', 0.]) msg = (r"float\(\) argument must be a string or a number, not" " 'Timestamp'") with pytest.raises(TypeError, match=msg): Float64Index([Timestamp('20130101')]) def test_constructor_coerce(self): self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5])) self.check_coerce(self.float, Index(np.arange(5) * 2.5)) self.check_coerce(self.float, Index(np.array( np.arange(5) * 2.5, dtype=object))) def test_constructor_explicit(self): # these don't auto convert self.check_coerce(self.float, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False) self.check_coerce(self.mixed, Index( [1.5, 2, 3, 4, 5], dtype=object), is_float_index=False) def test_astype(self): result = self.float.astype(object) assert result.equals(self.float) assert self.float.equals(result) self.check_is_index(result) i = self.mixed.copy() i.name = 'foo' result = i.astype(object) assert result.equals(i) assert i.equals(result) self.check_is_index(result) # GH 12881 # a float astype int for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1, 2]) result = i.astype(dtype) expected = Int64Index([0, 1, 2]) tm.assert_index_equal(result, expected) i = Float64Index([0, 1.1, 2]) result = i.astype(dtype) expected = Int64Index([0, 1, 2]) tm.assert_index_equal(result, expected) for dtype in ['float32', 'float64']: i = Float64Index([0, 1, 2]) result = i.astype(dtype) expected = i tm.assert_index_equal(result, expected) i = Float64Index([0, 1.1, 2]) result = i.astype(dtype) expected = Index(i.values.astype(dtype)) tm.assert_index_equal(result, expected) # invalid for dtype in ['M8[ns]', 'm8[ns]']: msg = ("Cannot convert Float64Index to dtype {}; integer values" " are required for conversion").format(pandas_dtype(dtype)) with pytest.raises(TypeError, match=re.escape(msg)): i.astype(dtype) # GH 13149 for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1.1, np.NAN]) msg = "Cannot convert NA to integer" with pytest.raises(ValueError, match=msg): i.astype(dtype) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) def test_get_indexer(self): idx = Float64Index([0.0, 1.0, 2.0]) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)) target = [-0.1, 0.5, 1.1] tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.intp)) def test_get_loc(self): idx = Float64Index([0.0, 1.0, 2.0]) for method in [None, 'pad', 'backfill', 'nearest']: assert idx.get_loc(1, method) == 1 if method is not None: assert idx.get_loc(1, method, tolerance=0) == 1 for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc(1.1, method) == loc assert idx.get_loc(1.1, method, tolerance=0.9) == loc with pytest.raises(KeyError, match="^'foo'$"): idx.get_loc('foo') with pytest.raises(KeyError, match=r"^1\.5$"): idx.get_loc(1.5) with pytest.raises(KeyError, match=r"^1\.5$"): idx.get_loc(1.5, method='pad', tolerance=0.1) with pytest.raises(KeyError, match="^True$"): idx.get_loc(True) with pytest.raises(KeyError, match="^False$"): idx.get_loc(False) with pytest.raises(ValueError, match='must be numeric'): idx.get_loc(1.4, method='nearest', tolerance='foo') with pytest.raises(ValueError, match='must contain numeric elements'): idx.get_loc(1.4, method='nearest', tolerance=np.array(['foo'])) with pytest.raises( ValueError, match='tolerance size must match target index size'): idx.get_loc(1.4, method='nearest', tolerance=np.array([1, 2])) def test_get_loc_na(self): idx = Float64Index([np.nan, 1, 2]) assert idx.get_loc(1) == 1 assert idx.get_loc(np.nan) == 0 idx = Float64Index([np.nan, 1, np.nan]) assert idx.get_loc(1) == 1 # representable by slice [0:2:2] # pytest.raises(KeyError, idx.slice_locs, np.nan) sliced = idx.slice_locs(np.nan) assert isinstance(sliced, tuple) assert sliced == (0, 3) # not representable by slice idx = Float64Index([np.nan, 1, np.nan, np.nan]) assert idx.get_loc(1) == 1 msg = "'Cannot get left slice bound for non-unique label: nan" with pytest.raises(KeyError, match=msg): idx.slice_locs(np.nan) def test_get_loc_missing_nan(self): # GH 8569 idx = Float64Index([1, 2]) assert idx.get_loc(1) == 0 with pytest.raises(KeyError, match=r"^3\.0$"): idx.get_loc(3) with pytest.raises(KeyError, match="^nan$"): idx.get_loc(np.nan) with pytest.raises(KeyError, match=r"^\[nan\]$"): idx.get_loc([np.nan]) def test_contains_nans(self): i = Float64Index([1.0, 2.0, np.nan]) assert np.nan in i def test_contains_not_nans(self): i = Float64Index([1.0, 2.0, np.nan]) assert 1.0 in i def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_astype_from_object(self): index = Index([1.0, np.nan, 0.2], dtype='object') result = index.astype(float) expected = Float64Index([1.0, np.nan, 0.2]) assert result.dtype == expected.dtype tm.assert_index_equal(result, expected) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name='x') # can't downcast exp = Index([1.0, 0.1, 3.0], name='x') tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name='x') tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, 'obj', 3.0], name='x') tm.assert_index_equal(idx.fillna('obj'), exp) def test_take_fill_value(self): # GH 12631 idx = pd.Float64Index([1., 2., 3.], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = pd.Float64Index([2., 1., 3.], name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.Float64Index([2., 1., np.nan], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.Float64Index([2., 1., 3.], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) class NumericInt(Numeric): def test_view(self): i = self._holder([], name='Foo') i_view = i.view() assert i_view.name == 'Foo' i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name='Foo')) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name='Foo')) def test_is_monotonic(self): assert self.index.is_monotonic is True assert self.index.is_monotonic_increasing is True assert self.index._is_strictly_monotonic_increasing is True assert self.index.is_monotonic_decreasing is False assert self.index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): i = Index(self.index.copy()) assert i.identical(self.index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = self.index.copy(dtype=object) i = i.rename('foo') same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(self.index) assert Index(same_values, name='foo', dtype=object).identical(i) assert not self.index.copy(dtype=object).identical( self.index.copy(dtype=self._dtype)) def test_join_non_unique(self): left = Index([4, 4, 3, 3]) joined, lidx, ridx = left.join(left, return_indexers=True) exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) tm.assert_index_equal(joined, exp_joined) exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) tm.assert_numpy_array_equal(lidx, exp_lidx) exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(ridx, exp_ridx) @pytest.mark.parametrize('kind', ['outer', 'inner', 'left', 'right']) def test_join_self(self, kind): joined = self.index.join(self.index, how=kind) assert self.index is joined def test_union_noncomparable(self): from datetime import datetime, timedelta # corner case, non-Int64Index now = datetime.now() other = Index([now + timedelta(i) for i in range(4)], dtype=object) result = self.index.union(other) expected = Index(np.concatenate((self.index, other))) tm.assert_index_equal(result, expected) result = other.union(self.index) expected = Index(np.concatenate((other, self.index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ("String dtype not supported, you may need to explicitly cast to" " a numeric type") # can't data = ['foo', 'bar', 'baz'] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ['0', '1', '2'] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): self.index.view(Index) def test_prevent_casting(self): result = self.index.astype('O') assert result.dtype == np.object_ def test_take_preserve_name(self): index = self._holder([1, 2, 3, 4], name='foo') taken = index.take([3, 0, 1]) assert index.name == taken.name def test_take_fill_value(self): # see gh-12631 idx = self._holder([1, 2, 3], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = self._holder([2, 1, 3], name='xxx') tm.assert_index_equal(result, expected) name = self._holder.__name__ msg = ("Unable to fill values because " "{name} cannot contain NA").format(name=name) # fill_value=True with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -1]), fill_value=True) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = self._holder([2, 1, 3], name='xxx') tm.assert_index_equal(result, expected) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_slice_keep_name(self): idx = self._holder([1, 2], name='asdf') assert idx.name == idx[1:].name class TestInt64Index(NumericInt): _dtype = 'int64' _holder = Int64Index def setup_method(self, method): self.indices = dict(index=Int64Index(np.arange(0, 20, 2)), index_dec=Int64Index(np.arange(19, -1, -1))) self.setup_indices() def create_index(self): return Int64Index(np.arange(5, dtype='int64')) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = (r"Int64Index\(\.\.\.\) must be called with a collection of some" " kind, 5 was passed") with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = self.index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, self.index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [cls([5, 0], dtype='int64'), cls(np.array([5, 0]), dtype='int64'), cls(Series([5, 0]), dtype='int64')]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, '2', 3, '4'], dtype=object) with pytest.raises(TypeError, match='casting'): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match='casting'): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype='int64') tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_get_indexer(self): target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): other = Index([1, 2, 3, 4, 5]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(self.index) expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))) tm.assert_index_equal(result, expected) def test_join_inner(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='inner', return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() res = res.take(ind) lidx = lidx.take(ind) ridx = ridx.take(ind) eres = Int64Index([2, 12]) elidx = np.array([1, 6], dtype=np.intp) eridx = np.array([4, 1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='inner', return_indexers=True) res2 = self.index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 6], dtype=np.intp) eridx = np.array([1, 4], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='left', return_indexers=True) eres = self.index eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='left', return_indexers=True) eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # non-unique idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='right', return_indexers=True) eres = other elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) assert isinstance(other, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) assert ridx is None # monotonic res, lidx, ridx = self.index.join(other_mono, how='right', return_indexers=True) eres = other_mono elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) assert isinstance(other, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) assert ridx is None # non-unique idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): other = Index([3, 6, 7, 8, 10], dtype=object) outer = self.index.join(other, how='outer') outer2 = other.join(self.index, how='outer') expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) inner = self.index.join(other, how='inner') inner2 = other.join(self.index, how='inner') expected = Index([6, 8, 10]) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) left = self.index.join(other, how='left') tm.assert_index_equal(left, self.index.astype(object)) left2 = other.join(self.index, how='left') tm.assert_index_equal(left2, other) right = self.index.join(other, how='right') tm.assert_index_equal(right, other) right2 = other.join(self.index, how='right') tm.assert_index_equal(right2, self.index.astype(object)) def test_join_outer(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic # guarantee of sortedness res, lidx, ridx = self.index.join(other, how='outer', return_indexers=True) noidx_res = self.index.join(other, how='outer') tm.assert_index_equal(res, noidx_res) eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='outer', return_indexers=True) noidx_res = self.index.join(other_mono, how='outer') tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) class TestUInt64Index(NumericInt): _dtype = 'uint64' _holder = UInt64Index def setup_method(self, method): vals = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] self.indices = dict(index=UInt64Index(vals), index_dec=UInt64Index(reversed(vals))) self.setup_indices() def create_index(self): return UInt64Index(np.arange(5, dtype='uint64')) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2**63]) res = Index([1, 2**63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2**63]) res = Index([1, 2**63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2**63], dtype=object) res = Index(np.array([-1, 2**63], dtype=object)) tm.assert_index_equal(res, idx) def test_get_indexer(self): target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(self.index) expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))) tm.assert_index_equal(result, expected) def test_join_inner(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='inner', return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() res = res.take(ind) lidx = lidx.take(ind) ridx = ridx.take(ind) eres = UInt64Index(2**63 + np.array([10, 25], dtype='uint64')) elidx = np.array([1, 4], dtype=np.intp) eridx = np.array([5, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='inner', return_indexers=True) res2 = self.index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 4], dtype=np.intp) eridx = np.array([3, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='left', return_indexers=True) eres = self.index eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='left', return_indexers=True) eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # non-unique idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) # 1 is in idx2, so it should be x2 eres = UInt64Index(2**63 + np.array( [1, 1, 2, 5, 7, 9], dtype='uint64')) eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='right', return_indexers=True) eres = other elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(lidx, elidx) assert isinstance(other, UInt64Index) tm.assert_index_equal(res, eres) assert ridx is None # monotonic res, lidx, ridx = self.index.join(other_mono, how='right', return_indexers=True) eres = other_mono elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) assert isinstance(other, UInt64Index) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_index_equal(res, eres) assert ridx is None # non-unique idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) # 1 is in idx2, so it should be x2 eres = UInt64Index(2**63 + np.array( [1, 1, 2, 5, 7, 9], dtype='uint64')) elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): other = Index(2**63 + np.array( [1, 5, 7, 10, 20], dtype='uint64'), dtype=object) outer = self.index.join(other, how='outer') outer2 = other.join(self.index, how='outer') expected = Index(2**63 + np.array( [0, 1, 5, 7, 10, 15, 20, 25], dtype='uint64')) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) inner = self.index.join(other, how='inner') inner2 = other.join(self.index, how='inner') expected = Index(2**63 + np.array([10, 20], dtype='uint64')) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) left = self.index.join(other, how='left') tm.assert_index_equal(left, self.index.astype(object)) left2 = other.join(self.index, how='left') tm.assert_index_equal(left2, other) right = self.index.join(other, how='right') tm.assert_index_equal(right, other) right2 = other.join(self.index, how='right') tm.assert_index_equal(right2, self.index.astype(object)) def test_join_outer(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic # guarantee of sortedness res, lidx, ridx = self.index.join(other, how='outer', return_indexers=True) noidx_res = self.index.join(other, how='outer') tm.assert_index_equal(res, noidx_res) eres = UInt64Index(2**63 + np.array( [0, 1, 2, 7, 10, 12, 15, 20, 25], dtype='uint64')) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='outer', return_indexers=True) noidx_res = self.index.join(other_mono, how='outer') tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx)
MJuddBooth/pandas
pandas/tests/indexes/test_numeric.py
pandas/tests/extension/base/reshaping.py
# flake8: noqa from .common import ( is_array_like, is_bool, is_bool_dtype, is_categorical, is_categorical_dtype, is_complex, is_complex_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetimetz, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_extension_type, is_file_like, is_float, is_float_dtype, is_hashable, is_int64_dtype, is_integer, is_integer_dtype, is_interval, is_interval_dtype, is_iterator, is_list_like, is_named_tuple, is_number, is_numeric_dtype, is_object_dtype, is_period, is_period_dtype, is_re, is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype, pandas_dtype)
# -*- coding: utf-8 -*- from datetime import datetime import re import numpy as np import pytest from pandas._libs.tslibs import Timestamp from pandas.compat import PY2, range import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index from pandas.api.types import pandas_dtype from pandas.tests.indexes.common import Base import pandas.util.testing as tm class Numeric(Base): def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype='int64')) # float conversions arr = np.arange(5, dtype='int64') * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype='float64') result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype='float64') result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index('A B C D E F'.split()) dt_idx = pd.date_range('2013-01-01', freq='M', periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal(idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]}) to_groupby = Index([datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1)], tz='UTC').values ex_keys = [Timestamp('2011-11-01'), Timestamp('2011-12-01')] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): i = self.create_index() cond = [True] * len(i) expected = i result = i.where(klass(cond)) cond = [False] + [True] * (len(i) - 1) expected = Float64Index([i._na_value] + i[1:].tolist()) result = i.where(klass(cond)) tm.assert_index_equal(result, expected) def test_insert(self): # GH 18295 (test missing) expected = Float64Index([0, np.nan, 1, 2, 3, 4]) for na in (np.nan, pd.NaT, None): result = self.create_index().insert(1, na) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index def setup_method(self, method): self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]), float=Float64Index(np.arange(5) * 2.5), mixed_dec=Float64Index([5, 4, 3, 2, 1.5]), float_dec=Float64Index(np.arange(4, -1, -1) * 2.5)) self.setup_indices() def create_index(self): return Float64Index(np.arange(5, dtype='float64')) def test_repr_roundtrip(self): for ind in (self.mixed, self.float): tm.assert_index_equal(eval(repr(ind)), ind) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype='float64') tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1., 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1., 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1., 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_invalid(self): # invalid msg = (r"Float64Index\(\.\.\.\) must be called with a collection of" r" some kind, 0\.0 was passed") with pytest.raises(TypeError, match=msg): Float64Index(0.) msg = ("String dtype not supported, you may need to explicitly cast to" " a numeric type") with pytest.raises(TypeError, match=msg): Float64Index(['a', 'b', 0.]) msg = (r"float\(\) argument must be a string or a number, not" " 'Timestamp'") with pytest.raises(TypeError, match=msg): Float64Index([Timestamp('20130101')]) def test_constructor_coerce(self): self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5])) self.check_coerce(self.float, Index(np.arange(5) * 2.5)) self.check_coerce(self.float, Index(np.array( np.arange(5) * 2.5, dtype=object))) def test_constructor_explicit(self): # these don't auto convert self.check_coerce(self.float, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False) self.check_coerce(self.mixed, Index( [1.5, 2, 3, 4, 5], dtype=object), is_float_index=False) def test_astype(self): result = self.float.astype(object) assert result.equals(self.float) assert self.float.equals(result) self.check_is_index(result) i = self.mixed.copy() i.name = 'foo' result = i.astype(object) assert result.equals(i) assert i.equals(result) self.check_is_index(result) # GH 12881 # a float astype int for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1, 2]) result = i.astype(dtype) expected = Int64Index([0, 1, 2]) tm.assert_index_equal(result, expected) i = Float64Index([0, 1.1, 2]) result = i.astype(dtype) expected = Int64Index([0, 1, 2]) tm.assert_index_equal(result, expected) for dtype in ['float32', 'float64']: i = Float64Index([0, 1, 2]) result = i.astype(dtype) expected = i tm.assert_index_equal(result, expected) i = Float64Index([0, 1.1, 2]) result = i.astype(dtype) expected = Index(i.values.astype(dtype)) tm.assert_index_equal(result, expected) # invalid for dtype in ['M8[ns]', 'm8[ns]']: msg = ("Cannot convert Float64Index to dtype {}; integer values" " are required for conversion").format(pandas_dtype(dtype)) with pytest.raises(TypeError, match=re.escape(msg)): i.astype(dtype) # GH 13149 for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1.1, np.NAN]) msg = "Cannot convert NA to integer" with pytest.raises(ValueError, match=msg): i.astype(dtype) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) def test_get_indexer(self): idx = Float64Index([0.0, 1.0, 2.0]) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)) target = [-0.1, 0.5, 1.1] tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.intp)) def test_get_loc(self): idx = Float64Index([0.0, 1.0, 2.0]) for method in [None, 'pad', 'backfill', 'nearest']: assert idx.get_loc(1, method) == 1 if method is not None: assert idx.get_loc(1, method, tolerance=0) == 1 for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc(1.1, method) == loc assert idx.get_loc(1.1, method, tolerance=0.9) == loc with pytest.raises(KeyError, match="^'foo'$"): idx.get_loc('foo') with pytest.raises(KeyError, match=r"^1\.5$"): idx.get_loc(1.5) with pytest.raises(KeyError, match=r"^1\.5$"): idx.get_loc(1.5, method='pad', tolerance=0.1) with pytest.raises(KeyError, match="^True$"): idx.get_loc(True) with pytest.raises(KeyError, match="^False$"): idx.get_loc(False) with pytest.raises(ValueError, match='must be numeric'): idx.get_loc(1.4, method='nearest', tolerance='foo') with pytest.raises(ValueError, match='must contain numeric elements'): idx.get_loc(1.4, method='nearest', tolerance=np.array(['foo'])) with pytest.raises( ValueError, match='tolerance size must match target index size'): idx.get_loc(1.4, method='nearest', tolerance=np.array([1, 2])) def test_get_loc_na(self): idx = Float64Index([np.nan, 1, 2]) assert idx.get_loc(1) == 1 assert idx.get_loc(np.nan) == 0 idx = Float64Index([np.nan, 1, np.nan]) assert idx.get_loc(1) == 1 # representable by slice [0:2:2] # pytest.raises(KeyError, idx.slice_locs, np.nan) sliced = idx.slice_locs(np.nan) assert isinstance(sliced, tuple) assert sliced == (0, 3) # not representable by slice idx = Float64Index([np.nan, 1, np.nan, np.nan]) assert idx.get_loc(1) == 1 msg = "'Cannot get left slice bound for non-unique label: nan" with pytest.raises(KeyError, match=msg): idx.slice_locs(np.nan) def test_get_loc_missing_nan(self): # GH 8569 idx = Float64Index([1, 2]) assert idx.get_loc(1) == 0 with pytest.raises(KeyError, match=r"^3\.0$"): idx.get_loc(3) with pytest.raises(KeyError, match="^nan$"): idx.get_loc(np.nan) with pytest.raises(KeyError, match=r"^\[nan\]$"): idx.get_loc([np.nan]) def test_contains_nans(self): i = Float64Index([1.0, 2.0, np.nan]) assert np.nan in i def test_contains_not_nans(self): i = Float64Index([1.0, 2.0, np.nan]) assert 1.0 in i def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_astype_from_object(self): index = Index([1.0, np.nan, 0.2], dtype='object') result = index.astype(float) expected = Float64Index([1.0, np.nan, 0.2]) assert result.dtype == expected.dtype tm.assert_index_equal(result, expected) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name='x') # can't downcast exp = Index([1.0, 0.1, 3.0], name='x') tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name='x') tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, 'obj', 3.0], name='x') tm.assert_index_equal(idx.fillna('obj'), exp) def test_take_fill_value(self): # GH 12631 idx = pd.Float64Index([1., 2., 3.], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = pd.Float64Index([2., 1., 3.], name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.Float64Index([2., 1., np.nan], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.Float64Index([2., 1., 3.], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) class NumericInt(Numeric): def test_view(self): i = self._holder([], name='Foo') i_view = i.view() assert i_view.name == 'Foo' i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name='Foo')) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name='Foo')) def test_is_monotonic(self): assert self.index.is_monotonic is True assert self.index.is_monotonic_increasing is True assert self.index._is_strictly_monotonic_increasing is True assert self.index.is_monotonic_decreasing is False assert self.index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): i = Index(self.index.copy()) assert i.identical(self.index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = self.index.copy(dtype=object) i = i.rename('foo') same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(self.index) assert Index(same_values, name='foo', dtype=object).identical(i) assert not self.index.copy(dtype=object).identical( self.index.copy(dtype=self._dtype)) def test_join_non_unique(self): left = Index([4, 4, 3, 3]) joined, lidx, ridx = left.join(left, return_indexers=True) exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) tm.assert_index_equal(joined, exp_joined) exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) tm.assert_numpy_array_equal(lidx, exp_lidx) exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(ridx, exp_ridx) @pytest.mark.parametrize('kind', ['outer', 'inner', 'left', 'right']) def test_join_self(self, kind): joined = self.index.join(self.index, how=kind) assert self.index is joined def test_union_noncomparable(self): from datetime import datetime, timedelta # corner case, non-Int64Index now = datetime.now() other = Index([now + timedelta(i) for i in range(4)], dtype=object) result = self.index.union(other) expected = Index(np.concatenate((self.index, other))) tm.assert_index_equal(result, expected) result = other.union(self.index) expected = Index(np.concatenate((other, self.index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ("String dtype not supported, you may need to explicitly cast to" " a numeric type") # can't data = ['foo', 'bar', 'baz'] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ['0', '1', '2'] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): self.index.view(Index) def test_prevent_casting(self): result = self.index.astype('O') assert result.dtype == np.object_ def test_take_preserve_name(self): index = self._holder([1, 2, 3, 4], name='foo') taken = index.take([3, 0, 1]) assert index.name == taken.name def test_take_fill_value(self): # see gh-12631 idx = self._holder([1, 2, 3], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = self._holder([2, 1, 3], name='xxx') tm.assert_index_equal(result, expected) name = self._holder.__name__ msg = ("Unable to fill values because " "{name} cannot contain NA").format(name=name) # fill_value=True with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -1]), fill_value=True) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = self._holder([2, 1, 3], name='xxx') tm.assert_index_equal(result, expected) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_slice_keep_name(self): idx = self._holder([1, 2], name='asdf') assert idx.name == idx[1:].name class TestInt64Index(NumericInt): _dtype = 'int64' _holder = Int64Index def setup_method(self, method): self.indices = dict(index=Int64Index(np.arange(0, 20, 2)), index_dec=Int64Index(np.arange(19, -1, -1))) self.setup_indices() def create_index(self): return Int64Index(np.arange(5, dtype='int64')) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = (r"Int64Index\(\.\.\.\) must be called with a collection of some" " kind, 5 was passed") with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = self.index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, self.index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [cls([5, 0], dtype='int64'), cls(np.array([5, 0]), dtype='int64'), cls(Series([5, 0]), dtype='int64')]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, '2', 3, '4'], dtype=object) with pytest.raises(TypeError, match='casting'): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match='casting'): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype='int64') tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_get_indexer(self): target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): other = Index([1, 2, 3, 4, 5]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(self.index) expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))) tm.assert_index_equal(result, expected) def test_join_inner(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='inner', return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() res = res.take(ind) lidx = lidx.take(ind) ridx = ridx.take(ind) eres = Int64Index([2, 12]) elidx = np.array([1, 6], dtype=np.intp) eridx = np.array([4, 1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='inner', return_indexers=True) res2 = self.index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 6], dtype=np.intp) eridx = np.array([1, 4], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='left', return_indexers=True) eres = self.index eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='left', return_indexers=True) eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # non-unique idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='right', return_indexers=True) eres = other elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) assert isinstance(other, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) assert ridx is None # monotonic res, lidx, ridx = self.index.join(other_mono, how='right', return_indexers=True) eres = other_mono elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) assert isinstance(other, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) assert ridx is None # non-unique idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): other = Index([3, 6, 7, 8, 10], dtype=object) outer = self.index.join(other, how='outer') outer2 = other.join(self.index, how='outer') expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) inner = self.index.join(other, how='inner') inner2 = other.join(self.index, how='inner') expected = Index([6, 8, 10]) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) left = self.index.join(other, how='left') tm.assert_index_equal(left, self.index.astype(object)) left2 = other.join(self.index, how='left') tm.assert_index_equal(left2, other) right = self.index.join(other, how='right') tm.assert_index_equal(right, other) right2 = other.join(self.index, how='right') tm.assert_index_equal(right2, self.index.astype(object)) def test_join_outer(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic # guarantee of sortedness res, lidx, ridx = self.index.join(other, how='outer', return_indexers=True) noidx_res = self.index.join(other, how='outer') tm.assert_index_equal(res, noidx_res) eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='outer', return_indexers=True) noidx_res = self.index.join(other_mono, how='outer') tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) class TestUInt64Index(NumericInt): _dtype = 'uint64' _holder = UInt64Index def setup_method(self, method): vals = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] self.indices = dict(index=UInt64Index(vals), index_dec=UInt64Index(reversed(vals))) self.setup_indices() def create_index(self): return UInt64Index(np.arange(5, dtype='uint64')) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2**63]) res = Index([1, 2**63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2**63]) res = Index([1, 2**63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2**63], dtype=object) res = Index(np.array([-1, 2**63], dtype=object)) tm.assert_index_equal(res, idx) def test_get_indexer(self): target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(self.index) expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))) tm.assert_index_equal(result, expected) def test_join_inner(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='inner', return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() res = res.take(ind) lidx = lidx.take(ind) ridx = ridx.take(ind) eres = UInt64Index(2**63 + np.array([10, 25], dtype='uint64')) elidx = np.array([1, 4], dtype=np.intp) eridx = np.array([5, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='inner', return_indexers=True) res2 = self.index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 4], dtype=np.intp) eridx = np.array([3, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='left', return_indexers=True) eres = self.index eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='left', return_indexers=True) eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # non-unique idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) # 1 is in idx2, so it should be x2 eres = UInt64Index(2**63 + np.array( [1, 1, 2, 5, 7, 9], dtype='uint64')) eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='right', return_indexers=True) eres = other elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(lidx, elidx) assert isinstance(other, UInt64Index) tm.assert_index_equal(res, eres) assert ridx is None # monotonic res, lidx, ridx = self.index.join(other_mono, how='right', return_indexers=True) eres = other_mono elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) assert isinstance(other, UInt64Index) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_index_equal(res, eres) assert ridx is None # non-unique idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) # 1 is in idx2, so it should be x2 eres = UInt64Index(2**63 + np.array( [1, 1, 2, 5, 7, 9], dtype='uint64')) elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): other = Index(2**63 + np.array( [1, 5, 7, 10, 20], dtype='uint64'), dtype=object) outer = self.index.join(other, how='outer') outer2 = other.join(self.index, how='outer') expected = Index(2**63 + np.array( [0, 1, 5, 7, 10, 15, 20, 25], dtype='uint64')) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) inner = self.index.join(other, how='inner') inner2 = other.join(self.index, how='inner') expected = Index(2**63 + np.array([10, 20], dtype='uint64')) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) left = self.index.join(other, how='left') tm.assert_index_equal(left, self.index.astype(object)) left2 = other.join(self.index, how='left') tm.assert_index_equal(left2, other) right = self.index.join(other, how='right') tm.assert_index_equal(right, other) right2 = other.join(self.index, how='right') tm.assert_index_equal(right2, self.index.astype(object)) def test_join_outer(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic # guarantee of sortedness res, lidx, ridx = self.index.join(other, how='outer', return_indexers=True) noidx_res = self.index.join(other, how='outer') tm.assert_index_equal(res, noidx_res) eres = UInt64Index(2**63 + np.array( [0, 1, 2, 7, 10, 12, 15, 20, 25], dtype='uint64')) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='outer', return_indexers=True) noidx_res = self.index.join(other_mono, how='outer') tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx)
MJuddBooth/pandas
pandas/tests/indexes/test_numeric.py
pandas/core/dtypes/api.py
# -*- coding: utf-8 -*- from collections import defaultdict from functools import partial import itertools import operator import re import numpy as np from pandas._libs import internals as libinternals, lib from pandas.compat import map, range, zip from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( find_common_type, infer_dtype_from_scalar, maybe_convert_objects, maybe_promote) from pandas.core.dtypes.common import ( _NS_DTYPE, is_datetimelike_v_numeric, is_extension_array_dtype, is_extension_type, is_list_like, is_numeric_v_string_like, is_scalar) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays.sparse import _maybe_to_sparse from pandas.core.base import PandasObject from pandas.core.index import Index, MultiIndex, ensure_index from pandas.core.indexing import maybe_convert_indices from pandas.io.formats.printing import pprint_thing from .blocks import ( Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, _extend_blocks, _merge_blocks, _safe_reshape, get_block_type, make_block) from .concat import ( # all for concatenate_block_managers combine_concat_plans, concatenate_join_units, get_mgr_concatenation_plan, is_uniform_join_units) # TODO: flexible with index=None and/or items=None class BlockManager(PandasObject): """ Core internal data structure to implement DataFrame, Series, Panel, etc. Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a lightweight blocked set of labeled data to be manipulated by the DataFrame public API class Attributes ---------- shape ndim axes values items Methods ------- set_axis(axis, new_labels) copy(deep=True) get_dtype_counts get_ftype_counts get_dtypes get_ftypes apply(func, axes, block_filter_fn) get_bool_data get_numeric_data get_slice(slice_like, axis) get(label) iget(loc) take(indexer, axis) reindex_axis(new_labels, axis) reindex_indexer(new_labels, indexer, axis) delete(label) insert(loc, label, value) set(label, value) Parameters ---------- Notes ----- This is *not* a public API class """ __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', '_is_consolidated', '_blknos', '_blklocs'] def __init__(self, blocks, axes, do_integrity_check=True): self.axes = [ensure_index(ax) for ax in axes] self.blocks = tuple(blocks) for block in blocks: if block.is_sparse: if len(block.mgr_locs) != 1: raise AssertionError("Sparse block refers to multiple " "items") else: if self.ndim != block.ndim: raise AssertionError( 'Number of Block dimensions ({block}) must equal ' 'number of axes ({self})'.format(block=block.ndim, self=self.ndim)) if do_integrity_check: self._verify_integrity() self._consolidate_check() self._rebuild_blknos_and_blklocs() def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes) def __nonzero__(self): return True # Python3 compat __bool__ = __nonzero__ @property def shape(self): return tuple(len(ax) for ax in self.axes) @property def ndim(self): return len(self.axes) def set_axis(self, axis, new_labels): new_labels = ensure_index(new_labels) old_len = len(self.axes[axis]) new_len = len(new_labels) if new_len != old_len: raise ValueError( 'Length mismatch: Expected axis has {old} elements, new ' 'values have {new} elements'.format(old=old_len, new=new_len)) self.axes[axis] = new_labels def rename_axis(self, mapper, axis, copy=True, level=None): """ Rename one of axes. Parameters ---------- mapper : unary callable axis : int copy : boolean, default True level : int, default None """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj @property def _is_single_block(self): if self.ndim == 1: return True if len(self.blocks) != 1: return False blk = self.blocks[0] return (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(0, len(self), 1)) def _rebuild_blknos_and_blklocs(self): """ Update mgr._blknos / mgr._blklocs. """ new_blknos = np.empty(self.shape[0], dtype=np.int64) new_blklocs = np.empty(self.shape[0], dtype=np.int64) new_blknos.fill(-1) new_blklocs.fill(-1) for blkno, blk in enumerate(self.blocks): rl = blk.mgr_locs new_blknos[rl.indexer] = blkno new_blklocs[rl.indexer] = np.arange(len(rl)) if (new_blknos == -1).any(): raise AssertionError("Gaps in blk ref_locs") self._blknos = new_blknos self._blklocs = new_blklocs @property def items(self): return self.axes[0] def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace() counts = dict() for b in self.blocks: v = f(b) counts[v] = counts.get(v, 0) + b.shape[0] return counts def get_dtype_counts(self): return self._get_counts(lambda b: b.dtype.name) def get_ftype_counts(self): return self._get_counts(lambda b: b.ftype) def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) return algos.take_1d(dtypes, self._blknos, allow_fill=False) def get_ftypes(self): ftypes = np.array([blk.ftype for blk in self.blocks]) return algos.take_1d(ftypes, self._blknos, allow_fill=False) def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = [ax for ax in self.axes] extra_state = { '0.14.1': { 'axes': axes_array, 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) for b in self.blocks] } } # First three elements of the state are to maintain forward # compatibility with 0.13.1. return axes_array, block_values, block_items, extra_state def __setstate__(self, state): def unpickle_block(values, mgr_locs): return make_block(values, placement=mgr_locs) if (isinstance(state, tuple) and len(state) >= 4 and '0.14.1' in state[3]): state = state[3]['0.14.1'] self.axes = [ensure_index(ax) for ax in state['axes']] self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) for b in state['blocks']) else: # discard anything after 3rd, support beta pickling format for a # little while longer ax_arrays, bvalues, bitems = state[:3] self.axes = [ensure_index(ax) for ax in ax_arrays] if len(bitems) == 1 and self.axes[0].equals(bitems[0]): # This is a workaround for pre-0.14.1 pickles that didn't # support unpickling multi-block frames/panels with non-unique # columns/items, because given a manager with items ["a", "b", # "a"] there's no way of knowing which block's "a" is where. # # Single-block case can be supported under the assumption that # block items corresponded to manager items 1-to-1. all_mgr_locs = [slice(0, len(bitems[0]))] else: all_mgr_locs = [self.axes[0].get_indexer(blk_items) for blk_items in bitems] self.blocks = tuple( unpickle_block(values, mgr_locs) for values, mgr_locs in zip(bvalues, all_mgr_locs)) self._post_setstate() def _post_setstate(self): self._is_consolidated = False self._known_consolidated = False self._rebuild_blknos_and_blklocs() def __len__(self): return len(self.items) def __unicode__(self): output = pprint_thing(self.__class__.__name__) for i, ax in enumerate(self.axes): if i == 0: output += u'\nItems: {ax}'.format(ax=ax) else: output += u'\nAxis {i}: {ax}'.format(i=i, ax=ax) for block in self.blocks: output += u'\n{block}'.format(block=pprint_thing(block)) return output def _verify_integrity(self): mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError('Number of manager items must equal union of ' 'block items\n# manager items: {0}, # ' 'tot_items: {1}'.format( len(self.items), tot_items)) def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate=True, **kwargs): """ iterate over the blocks, collect and create a new block manager Parameters ---------- f : the callable or function name to operate on at the block level axes : optional (if not supplied, use self.axes) filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check consolidate: boolean, default True. Join together blocks having same dtype Returns ------- Block Manager (new object) """ result_blocks = [] # filter kwarg is used in replace-* family of methods if filter is not None: filter_locs = set(self.items.get_indexer_for(filter)) if len(filter_locs) == len(self.items): # All items are included, as if there were no filtering filter = None else: kwargs['filter'] = filter_locs if consolidate: self._consolidate_inplace() if f == 'where': align_copy = True if kwargs.get('align', True): align_keys = ['other', 'cond'] else: align_keys = ['cond'] elif f == 'putmask': align_copy = False if kwargs.get('align', True): align_keys = ['new', 'mask'] else: align_keys = ['mask'] elif f == 'fillna': # fillna internally does putmask, maybe it's better to do this # at mgr, not block level? align_copy = False align_keys = ['value'] else: align_keys = [] # TODO(EA): may interfere with ExtensionBlock.setitem for blocks # with a .values attribute. aligned_args = {k: kwargs[k] for k in align_keys if hasattr(kwargs[k], 'values') and not isinstance(kwargs[k], ABCExtensionArray)} for b in self.blocks: if filter is not None: if not b.mgr_locs.isin(filter_locs).any(): result_blocks.append(b) continue if aligned_args: b_items = self.items[b.mgr_locs.indexer] for k, obj in aligned_args.items(): axis = getattr(obj, '_info_axis_number', 0) kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy) applied = getattr(b, f)(**kwargs) result_blocks = _extend_blocks(applied, result_blocks) if len(result_blocks) == 0: return self.make_empty(axes or self.axes) bm = self.__class__(result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm def quantile(self, axis=0, consolidate=True, transposed=False, interpolation='linear', qs=None, numeric_only=None): """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: boolean, default True. Join together blocks having same dtype transposed: boolean, default False we are holding transposed data interpolation : type of interpolation, default 'linear' qs : a scalar or list of the quantiles to be computed numeric_only : ignored Returns ------- Block Manager (new object) """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 if consolidate: self._consolidate_inplace() def get_axe(block, qs, axes): from pandas import Float64Index if is_list_like(qs): ax = Float64Index(qs) elif block.ndim == 1: ax = Float64Index([qs]) else: ax = axes[0] return ax axes, blocks = [], [] for b in self.blocks: block = b.quantile(axis=axis, qs=qs, interpolation=interpolation) axe = get_axe(b, qs, axes=self.axes) axes.append(axe) blocks.append(block) # note that some DatetimeTZ, Categorical are always ndim==1 ndim = {b.ndim for b in blocks} assert 0 not in ndim, ndim if 2 in ndim: new_axes = list(self.axes) # multiple blocks that are reduced if len(blocks) > 1: new_axes[1] = axes[0] # reset the placement to the original for b, sb in zip(blocks, self.blocks): b.mgr_locs = sb.mgr_locs else: new_axes[axis] = Index(np.concatenate( [ax.values for ax in axes])) if transposed: new_axes = new_axes[::-1] blocks = [b.make_block(b.values.T, placement=np.arange(b.shape[1]) ) for b in blocks] return self.__class__(blocks, new_axes) # single block, i.e. ndim == {1} values = _concat._concat_compat([b.values for b in blocks]) # compute the orderings of our original data if len(self.blocks) > 1: indexer = np.empty(len(self.axes[0]), dtype=np.intp) i = 0 for b in self.blocks: for j in b.mgr_locs: indexer[j] = i i = i + 1 values = values.take(indexer) return SingleBlockManager( [make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]) def isna(self, func, **kwargs): return self.apply('apply', func=func, **kwargs) def where(self, **kwargs): return self.apply('where', **kwargs) def setitem(self, **kwargs): return self.apply('setitem', **kwargs) def putmask(self, **kwargs): return self.apply('putmask', **kwargs) def diff(self, **kwargs): return self.apply('diff', **kwargs) def interpolate(self, **kwargs): return self.apply('interpolate', **kwargs) def shift(self, **kwargs): return self.apply('shift', **kwargs) def fillna(self, **kwargs): return self.apply('fillna', **kwargs) def downcast(self, **kwargs): return self.apply('downcast', **kwargs) def astype(self, dtype, **kwargs): return self.apply('astype', dtype=dtype, **kwargs) def convert(self, **kwargs): return self.apply('convert', **kwargs) def replace(self, **kwargs): return self.apply('replace', **kwargs) def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ inplace = validate_bool_kwarg(inplace, 'inplace') # figure out our mask a-priori to avoid repeated replacements values = self.as_array() def comp(s, regex=False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return isna(values) if hasattr(s, 'asm8'): return _compare_or_regex_search(maybe_convert_objects(values), getattr(s, 'asm8'), regex) return _compare_or_regex_search(values, s, regex) masks = [comp(s, regex) for i, s in enumerate(src_list)] result_blocks = [] src_len = len(src_list) - 1 for blk in self.blocks: # its possible to get multiple result blocks here # replace ALWAYS will return a list rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_list, dest_list)): new_rb = [] for b in rb: m = masks[i][b.mgr_locs.indexer] convert = i == src_len result = b._replace_coerce(mask=m, to_replace=s, value=d, inplace=inplace, convert=convert, regex=regex) if m.any(): new_rb = _extend_blocks(result, new_rb) else: new_rb.append(b) rb = new_rb result_blocks.extend(rb) bm = self.__class__(result_blocks, self.axes) bm._consolidate_inplace() return bm def is_consolidated(self): """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self): ftypes = [blk.ftype for blk in self.blocks] self._is_consolidated = len(ftypes) == len(set(ftypes)) self._known_consolidated = True @property def is_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return len(self.blocks) > 1 @property def is_numeric_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return all(block.is_numeric for block in self.blocks) @property def is_datelike_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return any(block.is_datelike for block in self.blocks) @property def any_extension_types(self): """Whether any of the blocks in this manager are extension blocks""" return any(block.is_extension for block in self.blocks) @property def is_view(self): """ return a boolean if we are a single block and are a view """ if len(self.blocks) == 1: return self.blocks[0].is_view # It is technically possible to figure out which blocks are views # e.g. [ b.values.base is not None for b in self.blocks ] # but then we have the case of possibly some blocks being a view # and some blocks not. setting in theory is possible on the non-view # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit # complicated return False def get_bool_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_bool], copy) def get_numeric_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_numeric], copy) def combine(self, blocks, copy=True): """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_blocks = [] for b in blocks: b = b.copy(deep=copy) b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False) new_blocks.append(b) axes = list(self.axes) axes[0] = self.items.take(indexer) return self.__class__(new_blocks, axes, do_integrity_check=False) def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0(slobj) else: slicer = [slice(None)] * (axis + 1) slicer[axis] = slobj slicer = tuple(slicer) new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axes[axis][slobj] bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) bm._consolidate_inplace() return bm def __contains__(self, item): return item in self.items @property def nblocks(self): return len(self.blocks) def copy(self, deep=True): """ Make deep or shallow copy of BlockManager Parameters ---------- deep : boolean o rstring, default True If False, return shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- copy : BlockManager """ # this preserves the notion of view copying of axes if deep: if deep == 'all': copy = lambda ax: ax.copy(deep=True) else: copy = lambda ax: ax.view() new_axes = [copy(ax) for ax in self.axes] else: new_axes = list(self.axes) return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False) def as_array(self, transpose=False, items=None): """Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray """ if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if transpose else arr if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block and mgr.blocks[0].is_datetimetz: # TODO(Block.get_values): Make DatetimeTZBlock.get_values # always be object dtype. Some callers seem to want the # DatetimeArray (previously DTI) arr = mgr.blocks[0].get_values(dtype=object) elif self._is_single_block or not self.is_mixed_type: arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() return arr.transpose() if transpose else arr def _interleave(self): """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ from pandas.core.dtypes.common import is_sparse dtype = _interleaved_dtype(self.blocks) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if is_sparse(dtype): dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = 'object' result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result def to_dict(self, copy=True): """ Return a dict of str(dtype) -> BlockManager Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> BlockManager Notes ----- This consolidates based on str(dtype) """ self._consolidate_inplace() bd = {} for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()} def xs(self, key, axis=1, copy=True, takeable=False): if axis < 1: raise AssertionError( 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) # take by position if takeable: loc = key else: loc = self.axes[axis].get_loc(key) slicer = [slice(None, None) for _ in range(self.ndim)] slicer[axis] = loc slicer = tuple(slicer) new_axes = list(self.axes) # could be an array indexer! if isinstance(loc, (slice, np.ndarray)): new_axes[axis] = new_axes[axis][loc] else: new_axes.pop(axis) new_blocks = [] if len(self.blocks) > 1: # we must copy here as we are mixed type for blk in self.blocks: newb = make_block(values=blk.values[slicer], klass=blk.__class__, placement=blk.mgr_locs) new_blocks.append(newb) elif len(self.blocks) == 1: block = self.blocks[0] vals = block.values[slicer] if copy: vals = vals.copy() new_blocks = [make_block(values=vals, placement=block.mgr_locs, klass=block.__class__)] return self.__class__(new_blocks, new_axes) def fast_xs(self, loc): """ get a cross sectional for a given location in the items ; handle dups return the result, is *could* be a view in the case of a single block """ if len(self.blocks) == 1: return self.blocks[0].iget((slice(None), loc)) items = self.items # non-unique (GH4726) if not items.is_unique: result = self._interleave() if self.ndim == 2: result = result.T return result[loc] # unique dtype = _interleaved_dtype(self.blocks) n = len(items) if is_extension_array_dtype(dtype): # we'll eventually construct an ExtensionArray. result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk._try_coerce_result(blk.iget((i, loc))) if is_extension_array_dtype(dtype): result = dtype.construct_array_type()._from_sequence( result, dtype=dtype ) return result def consolidate(self): """ Join together blocks having same dtype Returns ------- y : BlockManager """ if self.is_consolidated(): return self bm = self.__class__(self.blocks, self.axes) bm._is_consolidated = False bm._consolidate_inplace() return bm def _consolidate_inplace(self): if not self.is_consolidated(): self.blocks = tuple(_consolidate(self.blocks)) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() def get(self, item, fastpath=True): """ Return values for selected item (ndarray or BlockManager). """ if self.items.is_unique: if not isna(item): loc = self.items.get_loc(item) else: indexer = np.arange(len(self.items))[isna(self.items)] # allow a single nan location indexer if not is_scalar(indexer): if len(indexer) == 1: loc = indexer.item() else: raise ValueError("cannot label index with a null key") return self.iget(loc, fastpath=fastpath) else: if isna(item): raise TypeError("cannot label index with a null key") indexer = self.items.get_indexer_for([item]) return self.reindex_indexer(new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True) def iget(self, i, fastpath=True): """ Return the data as a SingleBlockManager if fastpath=True and possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) if not fastpath or not block._box_to_block_values or values.ndim != 1: return values # fastpath shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( [block.make_block_same_class(values, placement=slice(0, len(values)), ndim=1)], self.axes[1]) def delete(self, item): """ Delete selected item (items if non-unique) in-place. """ indexer = self.items.get_loc(item) is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True ref_loc_offset = -is_deleted.cumsum() is_blk_deleted = [False] * len(self.blocks) if isinstance(indexer, int): affected_start = indexer else: affected_start = is_deleted.nonzero()[0][0] for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): blk = self.blocks[blkno] bml = blk.mgr_locs blk_del = is_deleted[bml.indexer].nonzero()[0] if len(blk_del) == len(bml): is_blk_deleted[blkno] = True continue elif len(blk_del) != 0: blk.delete(blk_del) bml = blk.mgr_locs blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) # FIXME: use Index.delete as soon as it uses fastpath=True self.axes[0] = self.items[~is_deleted] self.blocks = tuple(b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]) self._shape = None self._rebuild_blknos_and_blklocs() def set(self, item, value): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical # TODO(EA): Remove an is_extension_ when all extension types satisfy # the interface value_is_extension_type = (is_extension_type(value) or is_extension_array_dtype(value)) # categorical/spares/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: if value.ndim == self.ndim - 1: value = _safe_reshape(value, (1,) + value.shape) def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible ' 'with manager shape') try: loc = self.items.get_loc(item) except KeyError: # This item wasn't present, just insert at end self.insert(len(self.items), item, value) return if isinstance(loc, int): loc = [loc] blknos = self._blknos[loc] blklocs = self._blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno, val_locs in libinternals.get_blkno_placements(blknos, self.nblocks, group=True): blk = self.blocks[blkno] blk_locs = blklocs[val_locs.indexer] if blk.should_store(value): blk.set(blk_locs, value_getitem(val_locs)) else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno) else: self._blklocs[blk.mgr_locs.indexer] = -1 blk.delete(blk_locs) self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.int64) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False) self.blocks = tuple(blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)) if unfit_val_locs: unfit_mgr_locs = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_mgr_locs) new_blocks = [] if value_is_extension_type: # This code (ab-)uses the fact that sparse blocks contain only # one item. new_blocks.extend( make_block(values=value.copy(), ndim=self.ndim, placement=slice(mgr_loc, mgr_loc + 1)) for mgr_loc in unfit_mgr_locs) self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + len(self.blocks)) self._blklocs[unfit_mgr_locs] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( make_block(values=value_getitem(unfit_val_items), ndim=self.ndim, placement=unfit_mgr_locs)) self._blknos[unfit_mgr_locs] = len(self.blocks) self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def insert(self, loc, item, value, allow_duplicates=False): """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : array_like allow_duplicates: bool If False, trying to insert non-unique item will raise """ if not allow_duplicates and item in self.items: # Should this be a different kind of error?? raise ValueError('cannot insert {}, already exists'.format(item)) if not isinstance(loc, int): raise TypeError("loc must be int") # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) for blkno, count in _fast_count_smallints(self._blknos[loc:]): blk = self.blocks[blkno] if count == len(blk.mgr_locs): blk.mgr_locs = blk.mgr_locs.add(1) else: new_mgr_locs = blk.mgr_locs.as_array.copy() new_mgr_locs[new_mgr_locs >= loc] += 1 blk.mgr_locs = new_mgr_locs if loc == self._blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) else: self._blklocs = np.insert(self._blklocs, loc, 0) self._blknos = np.insert(self._blknos, loc, len(self.blocks)) self.axes[0] = new_axis self.blocks += (block,) self._shape = None self._known_consolidated = False if len(self.blocks) > 100: self._consolidate_inplace() def reindex_axis(self, new_index, axis, method=None, limit=None, fill_value=None, copy=True): """ Conform block manager to new index. """ new_index = ensure_index(new_index) new_index, indexer = self.axes[axis].reindex(new_index, method=method, limit=limit) return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, copy=copy) def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True): """ Parameters ---------- new_axis : Index indexer : ndarray of int64 or None axis : int fill_value : object allow_dups : bool pandas-indexer with -1's only. """ if indexer is None: if new_axis is self.axes[axis] and not copy: return self result = self.copy(deep=copy) result.axes = list(self.axes) result.axes[axis] = new_axis return result self._consolidate_inplace() # some axes don't allow reindexing with dups if not allow_dups: self.axes[axis]._can_reindex(indexer) if axis >= self.ndim: raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,)) else: new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( fill_value if fill_value is not None else blk.fill_value,)) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axis return self.__class__(new_blocks, new_axes) def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): """ Slice/take blocks along axis=0. Overloaded for SingleBlock Returns ------- new_blocks : list of Block """ allow_fill = fill_tuple is not None sl_type, slobj, sllen = _preprocess_slice_or_indexer( slice_or_indexer, self.shape[0], allow_fill=allow_fill) if self._is_single_block: blk = self.blocks[0] if sl_type in ('slice', 'mask'): return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] elif not allow_fill or self.ndim == 1: if allow_fill and fill_tuple[0] is None: _, fill_value = maybe_promote(blk.dtype) fill_tuple = (fill_value, ) return [blk.take_nd(slobj, axis=0, new_mgr_locs=slice(0, sllen), fill_tuple=fill_tuple)] if sl_type in ('slice', 'mask'): blknos = self._blknos[slobj] blklocs = self._blklocs[slobj] else: blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, allow_fill=allow_fill) blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill) # When filling blknos, make sure blknos is updated before appending to # blocks list, that way new blkno is exactly len(blocks). # # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, # pytables serialization will break otherwise. blocks = [] for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, self.nblocks, group=True): if blkno == -1: # If we've got here, fill_tuple was not None. fill_value = fill_tuple[0] blocks.append(self._make_na_block(placement=mgr_locs, fill_value=fill_value)) else: blk = self.blocks[blkno] # Otherwise, slicing along items axis is necessary. if not blk._can_consolidate: # A non-consolidatable block, it's easy, because there's # only one item and each mgr loc is a copy of that single # item. for mgr_loc in mgr_locs: newblk = blk.copy(deep=True) newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) blocks.append(newblk) else: blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], axis=0, new_mgr_locs=mgr_locs, fill_tuple=None)) return blocks def _make_na_block(self, placement, fill_value=None): # TODO: infer dtypes other than float64 from fill_value if fill_value is None: fill_value = np.nan block_shape = list(self.shape) block_shape[0] = len(placement) dtype, fill_value = infer_dtype_from_scalar(fill_value) block_values = np.empty(block_shape, dtype=dtype) block_values.fill(fill_value) return make_block(block_values, placement=placement) def take(self, indexer, axis=1, verify=True, convert=True): """ Take items along any axis. """ self._consolidate_inplace() indexer = (np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) else np.asanyarray(indexer, dtype='int64')) n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True) def merge(self, other, lsuffix='', rsuffix=''): # We assume at this point that the axes of self and other match. # This is only called from Panel.join, which reindexes prior # to calling to ensure this assumption holds. l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, right=other.items, rsuffix=rsuffix) new_items = _concat_indexes([l, r]) new_blocks = [blk.copy(deep=False) for blk in self.blocks] offset = self.shape[0] for blk in other.blocks: blk = blk.copy(deep=False) blk.mgr_locs = blk.mgr_locs.add(offset) new_blocks.append(blk) new_axes = list(self.axes) new_axes[0] = new_items return self.__class__(_consolidate(new_blocks), new_axes) def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): return False if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): return False self._consolidate_inplace() other._consolidate_inplace() if len(self.blocks) != len(other.blocks): return False # canonicalize block order, using a tuple combining the type # name and then mgr_locs because there might be unconsolidated # blocks (say, Categorical) which can only be distinguished by # the iteration order def canonicalize(block): return (block.dtype.name, block.mgr_locs.as_array.tolist()) self_blocks = sorted(self.blocks, key=canonicalize) other_blocks = sorted(other.blocks, key=canonicalize) return all(block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)) def unstack(self, unstacker_func, fill_value): """Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ n_rows = self.shape[-1] dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() new_blocks = [] columns_mask = [] for blk in self.blocks: blocks, mask = blk._unstack( partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]), new_columns, n_rows, fill_value ) new_blocks.extend(blocks) columns_mask.extend(mask) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index]) return bm class SingleBlockManager(BlockManager): """ manage a single block with """ ndim = 1 _is_consolidated = True _known_consolidated = True __slots__ = () def __init__(self, block, axis, do_integrity_check=False, fastpath=False): if isinstance(axis, list): if len(axis) != 1: raise ValueError("cannot create SingleBlockManager with more " "than 1 axis") axis = axis[0] # passed from constructor, single block, single axis if fastpath: self.axes = [axis] if isinstance(block, list): # empty block if len(block) == 0: block = [np.array([])] elif len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] else: self.axes = [ensure_index(axis)] # create the block here if isinstance(block, list): # provide consolidation to the interleaved_dtype if len(block) > 1: dtype = _interleaved_dtype(block) block = [b.astype(dtype) for b in block] block = _consolidate(block) if len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] if not isinstance(block, Block): block = make_block(block, placement=slice(0, len(axis)), ndim=1) self.blocks = [block] def _post_setstate(self): pass @property def _block(self): return self.blocks[0] @property def _values(self): return self._block.values @property def _blknos(self): """ compat with BlockManager """ return None @property def _blklocs(self): """ compat with BlockManager """ return None def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") return self.__class__(self._block._slice(slobj), self.index[slobj], fastpath=True) @property def index(self): return self.axes[0] def convert(self, **kwargs): """ convert the whole block as one """ kwargs['by_item'] = False return self.apply('convert', **kwargs) @property def dtype(self): return self._block.dtype @property def array_dtype(self): return self._block.array_dtype @property def ftype(self): return self._block.ftype def get_dtype_counts(self): return {self.dtype.name: 1} def get_ftype_counts(self): return {self.ftype: 1} def get_dtypes(self): return np.array([self._block.dtype]) def get_ftypes(self): return np.array([self._block.ftype]) def external_values(self): return self._block.external_values() def internal_values(self): return self._block.internal_values() def formatting_values(self): """Return the internal values used by the DataFrame/SeriesFormatter""" return self._block.formatting_values() def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) @property def asobject(self): """ return a object dtype array. datetime/timedelta like values are boxed to Timestamp/Timedelta instances. """ return self._block.get_values(dtype=object) @property def _can_hold_na(self): return self._block._can_hold_na def is_consolidated(self): return True def _consolidate_check(self): pass def _consolidate_inplace(self): pass def delete(self, item): """ Delete single item from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ loc = self.items.get_loc(item) self._block.delete(loc) self.axes[0] = self.axes[0].delete(loc) def fast_xs(self, loc): """ fast path for getting a cross-section return a view of the data """ return self._block.values[loc] def concat(self, to_concat, new_axis): """ Concatenate a list of SingleBlockManagers into a single SingleBlockManager. Used for pd.concat of Series objects with axis=0. Parameters ---------- to_concat : list of SingleBlockManagers new_axis : Index of the result Returns ------- SingleBlockManager """ non_empties = [x for x in to_concat if len(x) > 0] # check if all series are of the same block type: if len(non_empties) > 0: blocks = [obj.blocks[0] for obj in non_empties] if len({b.dtype for b in blocks}) == 1: new_block = blocks[0].concat_same_type(blocks) else: values = [x.values for x in blocks] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) else: values = [x._block.values for x in to_concat] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) mgr = SingleBlockManager(new_block, new_axis) return mgr # -------------------------------------------------------------------- # Constructor Helpers def create_block_manager_from_blocks(blocks, axes): try: if len(blocks) == 1 and not isinstance(blocks[0], Block): # if blocks[0] is of length 0, return empty blocks if not len(blocks[0]): blocks = [] else: # It's OK if a single block is passed as values, its placement # is basically "all items", but if there're many, don't bother # converting, it's an error anyway. blocks = [make_block(values=blocks[0], placement=slice(0, len(axes[0])))] mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except (ValueError) as e: blocks = [getattr(b, 'values', b) for b in blocks] tot_items = sum(b.shape[0] for b in blocks) construction_error(tot_items, blocks[0].shape[1:], axes, e) def create_block_manager_from_arrays(arrays, names, axes): try: blocks = form_blocks(arrays, names, axes) mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except ValueError as e: construction_error(len(arrays), arrays[0].shape, axes, e) def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) # Correcting the user facing error message during dataframe construction if len(passed) <= 2: passed = passed[::-1] implied = tuple(len(ax) for ax in axes) # Correcting the user facing error message during dataframe construction if len(implied) <= 2: implied = implied[::-1] if passed == implied and e is not None: raise e if block_shape[0] == 0: raise ValueError("Empty data passed with indices specified.") raise ValueError("Shape of passed values is {0}, indices imply {1}".format( passed, implied)) # ----------------------------------------------------------------------- def form_blocks(arrays, names, axes): # put "leftover" items in float bucket, where else? # generalize? items_dict = defaultdict(list) extra_locs = [] names_idx = ensure_index(names) if names_idx.equals(axes[0]): names_indexer = np.arange(len(names_idx)) else: assert names_idx.intersection(axes[0]).is_unique names_indexer = names_idx.get_indexer_for(axes[0]) for i, name_idx in enumerate(names_indexer): if name_idx == -1: extra_locs.append(i) continue k = names[name_idx] v = arrays[name_idx] block_type = get_block_type(v) items_dict[block_type.__name__].append((i, k, v)) blocks = [] if len(items_dict['FloatBlock']): float_blocks = _multi_blockify(items_dict['FloatBlock']) blocks.extend(float_blocks) if len(items_dict['ComplexBlock']): complex_blocks = _multi_blockify(items_dict['ComplexBlock']) blocks.extend(complex_blocks) if len(items_dict['TimeDeltaBlock']): timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) blocks.extend(timedelta_blocks) if len(items_dict['IntBlock']): int_blocks = _multi_blockify(items_dict['IntBlock']) blocks.extend(int_blocks) if len(items_dict['DatetimeBlock']): datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], _NS_DTYPE) blocks.extend(datetime_blocks) if len(items_dict['DatetimeTZBlock']): dttz_blocks = [make_block(array, klass=DatetimeTZBlock, placement=[i]) for i, _, array in items_dict['DatetimeTZBlock']] blocks.extend(dttz_blocks) if len(items_dict['BoolBlock']): bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) blocks.extend(bool_blocks) if len(items_dict['ObjectBlock']) > 0: object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) blocks.extend(object_blocks) if len(items_dict['SparseBlock']) > 0: sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) blocks.extend(sparse_blocks) if len(items_dict['CategoricalBlock']) > 0: cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) for i, _, array in items_dict['CategoricalBlock']] blocks.extend(cat_blocks) if len(items_dict['ExtensionBlock']): external_blocks = [ make_block(array, klass=ExtensionBlock, placement=[i]) for i, _, array in items_dict['ExtensionBlock'] ] blocks.extend(external_blocks) if len(items_dict['ObjectValuesExtensionBlock']): external_blocks = [ make_block(array, klass=ObjectValuesExtensionBlock, placement=[i]) for i, _, array in items_dict['ObjectValuesExtensionBlock'] ] blocks.extend(external_blocks) if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) # empty items -> dtype object block_values = np.empty(shape, dtype=object) block_values.fill(np.nan) na_block = make_block(block_values, placement=extra_locs) blocks.append(na_block) return blocks def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) block = make_block(values, placement=placement) return [block] def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: values, placement = _stack_arrays(list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks def _sparse_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes (and are sparse) """ new_blocks = [] for i, names, array in tuples: array = _maybe_to_sparse(array) block = make_block(array, placement=[i]) new_blocks.append(block) return new_blocks def _stack_arrays(tuples, dtype): # fml def _asarray_compat(x): if isinstance(x, ABCSeries): return x._values else: return np.asarray(x) def _shape_compat(x): if isinstance(x, ABCSeries): return len(x), else: return x.shape placement, names, arrays = zip(*tuples) first = arrays[0] shape = (len(arrays),) + _shape_compat(first) stacked = np.empty(shape, dtype=dtype) for i, arr in enumerate(arrays): stacked[i] = _asarray_compat(arr) return stacked, placement def _interleaved_dtype(blocks): # type: (List[Block]) -> Optional[Union[np.dtype, ExtensionDtype]] """Find the common dtype for `blocks`. Parameters ---------- blocks : List[Block] Returns ------- dtype : Optional[Union[np.dtype, ExtensionDtype]] None is returned when `blocks` is empty. """ if not len(blocks): return None return find_common_type([b.dtype for b in blocks]) def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) new_blocks = _extend_blocks(merged_blocks, new_blocks) return new_blocks def _compare_or_regex_search(a, b, regex=False): """ Compare two array_like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array_like or scalar b : array_like or scalar regex : bool, default False Returns ------- mask : array_like of bool """ if not regex: op = lambda x: operator.eq(x, b) else: op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) else False) is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) # numpy deprecation warning to have i8 vs integer comparisons if is_datetimelike_v_numeric(a, b): result = False # numpy deprecation warning if comparing numeric vs string-like elif is_numeric_v_string_like(a, b): result = False else: result = op(a) if is_scalar(result) and (is_a_array or is_b_array): type_names = [type(a).__name__, type(b).__name__] if is_a_array: type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) if is_b_array: type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) raise TypeError( "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], b=type_names[1])) return result def _concat_indexes(indexes): return indexes[0].append(indexes[1:]) def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return left, right else: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) def renamer(x, suffix): """Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return '{x}{suffix}'.format(x=x, suffix=suffix) return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) return (_transform_index(left, lrenamer), _transform_index(right, rrenamer)) def _transform_index(index, func, level=None): """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(index, MultiIndex): if level is not None: items = [tuple(func(y) if i == level else y for i, y in enumerate(x)) for x in index] else: items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] return Index(items, name=index.name, tupleize_cols=False) def _fast_count_smallints(arr): """Faster version of set(arr) for sequences of small numbers.""" counts = np.bincount(arr.astype(np.int_)) nz = counts.nonzero()[0] return np.c_[nz, counts[nz]] def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): if isinstance(slice_or_indexer, slice): return ('slice', slice_or_indexer, libinternals.slice_len(slice_or_indexer, length)) elif (isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_): return 'mask', slice_or_indexer, slice_or_indexer.sum() else: indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) if not allow_fill: indexer = maybe_convert_indices(indexer, length) return 'fancy', indexer, len(indexer) def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool """ concat_plans = [get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers] concat_plan = combine_concat_plans(concat_plans, concat_axis) blocks = [] for placement, join_units in concat_plan: if len(join_units) == 1 and not join_units[0].indexers: b = join_units[0].block values = b.values if copy: values = values.copy() elif not copy: values = values.view() b = b.make_block_same_class(values, placement=placement) elif is_uniform_join_units(join_units): b = join_units[0].block.concat_same_type( [ju.block for ju in join_units], placement=placement) else: b = make_block( concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement) blocks.append(b) return BlockManager(blocks, axes)
# -*- coding: utf-8 -*- from datetime import datetime import re import numpy as np import pytest from pandas._libs.tslibs import Timestamp from pandas.compat import PY2, range import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index from pandas.api.types import pandas_dtype from pandas.tests.indexes.common import Base import pandas.util.testing as tm class Numeric(Base): def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype='int64')) # float conversions arr = np.arange(5, dtype='int64') * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype='float64') result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype='float64') result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index('A B C D E F'.split()) dt_idx = pd.date_range('2013-01-01', freq='M', periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal(idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]}) to_groupby = Index([datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1)], tz='UTC').values ex_keys = [Timestamp('2011-11-01'), Timestamp('2011-12-01')] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): i = self.create_index() cond = [True] * len(i) expected = i result = i.where(klass(cond)) cond = [False] + [True] * (len(i) - 1) expected = Float64Index([i._na_value] + i[1:].tolist()) result = i.where(klass(cond)) tm.assert_index_equal(result, expected) def test_insert(self): # GH 18295 (test missing) expected = Float64Index([0, np.nan, 1, 2, 3, 4]) for na in (np.nan, pd.NaT, None): result = self.create_index().insert(1, na) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index def setup_method(self, method): self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]), float=Float64Index(np.arange(5) * 2.5), mixed_dec=Float64Index([5, 4, 3, 2, 1.5]), float_dec=Float64Index(np.arange(4, -1, -1) * 2.5)) self.setup_indices() def create_index(self): return Float64Index(np.arange(5, dtype='float64')) def test_repr_roundtrip(self): for ind in (self.mixed, self.float): tm.assert_index_equal(eval(repr(ind)), ind) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype='float64') tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1., 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1., 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1., 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_invalid(self): # invalid msg = (r"Float64Index\(\.\.\.\) must be called with a collection of" r" some kind, 0\.0 was passed") with pytest.raises(TypeError, match=msg): Float64Index(0.) msg = ("String dtype not supported, you may need to explicitly cast to" " a numeric type") with pytest.raises(TypeError, match=msg): Float64Index(['a', 'b', 0.]) msg = (r"float\(\) argument must be a string or a number, not" " 'Timestamp'") with pytest.raises(TypeError, match=msg): Float64Index([Timestamp('20130101')]) def test_constructor_coerce(self): self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5])) self.check_coerce(self.float, Index(np.arange(5) * 2.5)) self.check_coerce(self.float, Index(np.array( np.arange(5) * 2.5, dtype=object))) def test_constructor_explicit(self): # these don't auto convert self.check_coerce(self.float, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False) self.check_coerce(self.mixed, Index( [1.5, 2, 3, 4, 5], dtype=object), is_float_index=False) def test_astype(self): result = self.float.astype(object) assert result.equals(self.float) assert self.float.equals(result) self.check_is_index(result) i = self.mixed.copy() i.name = 'foo' result = i.astype(object) assert result.equals(i) assert i.equals(result) self.check_is_index(result) # GH 12881 # a float astype int for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1, 2]) result = i.astype(dtype) expected = Int64Index([0, 1, 2]) tm.assert_index_equal(result, expected) i = Float64Index([0, 1.1, 2]) result = i.astype(dtype) expected = Int64Index([0, 1, 2]) tm.assert_index_equal(result, expected) for dtype in ['float32', 'float64']: i = Float64Index([0, 1, 2]) result = i.astype(dtype) expected = i tm.assert_index_equal(result, expected) i = Float64Index([0, 1.1, 2]) result = i.astype(dtype) expected = Index(i.values.astype(dtype)) tm.assert_index_equal(result, expected) # invalid for dtype in ['M8[ns]', 'm8[ns]']: msg = ("Cannot convert Float64Index to dtype {}; integer values" " are required for conversion").format(pandas_dtype(dtype)) with pytest.raises(TypeError, match=re.escape(msg)): i.astype(dtype) # GH 13149 for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1.1, np.NAN]) msg = "Cannot convert NA to integer" with pytest.raises(ValueError, match=msg): i.astype(dtype) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) def test_get_indexer(self): idx = Float64Index([0.0, 1.0, 2.0]) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)) target = [-0.1, 0.5, 1.1] tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.intp)) def test_get_loc(self): idx = Float64Index([0.0, 1.0, 2.0]) for method in [None, 'pad', 'backfill', 'nearest']: assert idx.get_loc(1, method) == 1 if method is not None: assert idx.get_loc(1, method, tolerance=0) == 1 for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc(1.1, method) == loc assert idx.get_loc(1.1, method, tolerance=0.9) == loc with pytest.raises(KeyError, match="^'foo'$"): idx.get_loc('foo') with pytest.raises(KeyError, match=r"^1\.5$"): idx.get_loc(1.5) with pytest.raises(KeyError, match=r"^1\.5$"): idx.get_loc(1.5, method='pad', tolerance=0.1) with pytest.raises(KeyError, match="^True$"): idx.get_loc(True) with pytest.raises(KeyError, match="^False$"): idx.get_loc(False) with pytest.raises(ValueError, match='must be numeric'): idx.get_loc(1.4, method='nearest', tolerance='foo') with pytest.raises(ValueError, match='must contain numeric elements'): idx.get_loc(1.4, method='nearest', tolerance=np.array(['foo'])) with pytest.raises( ValueError, match='tolerance size must match target index size'): idx.get_loc(1.4, method='nearest', tolerance=np.array([1, 2])) def test_get_loc_na(self): idx = Float64Index([np.nan, 1, 2]) assert idx.get_loc(1) == 1 assert idx.get_loc(np.nan) == 0 idx = Float64Index([np.nan, 1, np.nan]) assert idx.get_loc(1) == 1 # representable by slice [0:2:2] # pytest.raises(KeyError, idx.slice_locs, np.nan) sliced = idx.slice_locs(np.nan) assert isinstance(sliced, tuple) assert sliced == (0, 3) # not representable by slice idx = Float64Index([np.nan, 1, np.nan, np.nan]) assert idx.get_loc(1) == 1 msg = "'Cannot get left slice bound for non-unique label: nan" with pytest.raises(KeyError, match=msg): idx.slice_locs(np.nan) def test_get_loc_missing_nan(self): # GH 8569 idx = Float64Index([1, 2]) assert idx.get_loc(1) == 0 with pytest.raises(KeyError, match=r"^3\.0$"): idx.get_loc(3) with pytest.raises(KeyError, match="^nan$"): idx.get_loc(np.nan) with pytest.raises(KeyError, match=r"^\[nan\]$"): idx.get_loc([np.nan]) def test_contains_nans(self): i = Float64Index([1.0, 2.0, np.nan]) assert np.nan in i def test_contains_not_nans(self): i = Float64Index([1.0, 2.0, np.nan]) assert 1.0 in i def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_astype_from_object(self): index = Index([1.0, np.nan, 0.2], dtype='object') result = index.astype(float) expected = Float64Index([1.0, np.nan, 0.2]) assert result.dtype == expected.dtype tm.assert_index_equal(result, expected) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name='x') # can't downcast exp = Index([1.0, 0.1, 3.0], name='x') tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name='x') tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, 'obj', 3.0], name='x') tm.assert_index_equal(idx.fillna('obj'), exp) def test_take_fill_value(self): # GH 12631 idx = pd.Float64Index([1., 2., 3.], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = pd.Float64Index([2., 1., 3.], name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.Float64Index([2., 1., np.nan], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.Float64Index([2., 1., 3.], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) class NumericInt(Numeric): def test_view(self): i = self._holder([], name='Foo') i_view = i.view() assert i_view.name == 'Foo' i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name='Foo')) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name='Foo')) def test_is_monotonic(self): assert self.index.is_monotonic is True assert self.index.is_monotonic_increasing is True assert self.index._is_strictly_monotonic_increasing is True assert self.index.is_monotonic_decreasing is False assert self.index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): i = Index(self.index.copy()) assert i.identical(self.index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = self.index.copy(dtype=object) i = i.rename('foo') same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(self.index) assert Index(same_values, name='foo', dtype=object).identical(i) assert not self.index.copy(dtype=object).identical( self.index.copy(dtype=self._dtype)) def test_join_non_unique(self): left = Index([4, 4, 3, 3]) joined, lidx, ridx = left.join(left, return_indexers=True) exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) tm.assert_index_equal(joined, exp_joined) exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) tm.assert_numpy_array_equal(lidx, exp_lidx) exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(ridx, exp_ridx) @pytest.mark.parametrize('kind', ['outer', 'inner', 'left', 'right']) def test_join_self(self, kind): joined = self.index.join(self.index, how=kind) assert self.index is joined def test_union_noncomparable(self): from datetime import datetime, timedelta # corner case, non-Int64Index now = datetime.now() other = Index([now + timedelta(i) for i in range(4)], dtype=object) result = self.index.union(other) expected = Index(np.concatenate((self.index, other))) tm.assert_index_equal(result, expected) result = other.union(self.index) expected = Index(np.concatenate((other, self.index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ("String dtype not supported, you may need to explicitly cast to" " a numeric type") # can't data = ['foo', 'bar', 'baz'] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ['0', '1', '2'] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): self.index.view(Index) def test_prevent_casting(self): result = self.index.astype('O') assert result.dtype == np.object_ def test_take_preserve_name(self): index = self._holder([1, 2, 3, 4], name='foo') taken = index.take([3, 0, 1]) assert index.name == taken.name def test_take_fill_value(self): # see gh-12631 idx = self._holder([1, 2, 3], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = self._holder([2, 1, 3], name='xxx') tm.assert_index_equal(result, expected) name = self._holder.__name__ msg = ("Unable to fill values because " "{name} cannot contain NA").format(name=name) # fill_value=True with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -1]), fill_value=True) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = self._holder([2, 1, 3], name='xxx') tm.assert_index_equal(result, expected) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_slice_keep_name(self): idx = self._holder([1, 2], name='asdf') assert idx.name == idx[1:].name class TestInt64Index(NumericInt): _dtype = 'int64' _holder = Int64Index def setup_method(self, method): self.indices = dict(index=Int64Index(np.arange(0, 20, 2)), index_dec=Int64Index(np.arange(19, -1, -1))) self.setup_indices() def create_index(self): return Int64Index(np.arange(5, dtype='int64')) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = (r"Int64Index\(\.\.\.\) must be called with a collection of some" " kind, 5 was passed") with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = self.index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, self.index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [cls([5, 0], dtype='int64'), cls(np.array([5, 0]), dtype='int64'), cls(Series([5, 0]), dtype='int64')]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, '2', 3, '4'], dtype=object) with pytest.raises(TypeError, match='casting'): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match='casting'): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype='int64') tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_get_indexer(self): target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): other = Index([1, 2, 3, 4, 5]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(self.index) expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))) tm.assert_index_equal(result, expected) def test_join_inner(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='inner', return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() res = res.take(ind) lidx = lidx.take(ind) ridx = ridx.take(ind) eres = Int64Index([2, 12]) elidx = np.array([1, 6], dtype=np.intp) eridx = np.array([4, 1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='inner', return_indexers=True) res2 = self.index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 6], dtype=np.intp) eridx = np.array([1, 4], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='left', return_indexers=True) eres = self.index eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='left', return_indexers=True) eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # non-unique idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic res, lidx, ridx = self.index.join(other, how='right', return_indexers=True) eres = other elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) assert isinstance(other, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) assert ridx is None # monotonic res, lidx, ridx = self.index.join(other_mono, how='right', return_indexers=True) eres = other_mono elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) assert isinstance(other, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) assert ridx is None # non-unique idx = Index([1, 1, 2, 5]) idx2 = Index([1, 2, 5, 7, 9]) res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): other = Index([3, 6, 7, 8, 10], dtype=object) outer = self.index.join(other, how='outer') outer2 = other.join(self.index, how='outer') expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) inner = self.index.join(other, how='inner') inner2 = other.join(self.index, how='inner') expected = Index([6, 8, 10]) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) left = self.index.join(other, how='left') tm.assert_index_equal(left, self.index.astype(object)) left2 = other.join(self.index, how='left') tm.assert_index_equal(left2, other) right = self.index.join(other, how='right') tm.assert_index_equal(right, other) right2 = other.join(self.index, how='right') tm.assert_index_equal(right2, self.index.astype(object)) def test_join_outer(self): other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic # guarantee of sortedness res, lidx, ridx = self.index.join(other, how='outer', return_indexers=True) noidx_res = self.index.join(other, how='outer') tm.assert_index_equal(res, noidx_res) eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='outer', return_indexers=True) noidx_res = self.index.join(other_mono, how='outer') tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) class TestUInt64Index(NumericInt): _dtype = 'uint64' _holder = UInt64Index def setup_method(self, method): vals = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] self.indices = dict(index=UInt64Index(vals), index_dec=UInt64Index(reversed(vals))) self.setup_indices() def create_index(self): return UInt64Index(np.arange(5, dtype='uint64')) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2**63]) res = Index([1, 2**63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2**63]) res = Index([1, 2**63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2**63], dtype=object) res = Index(np.array([-1, 2**63], dtype=object)) tm.assert_index_equal(res, idx) def test_get_indexer(self): target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(self.index) expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))) tm.assert_index_equal(result, expected) def test_join_inner(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='inner', return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() res = res.take(ind) lidx = lidx.take(ind) ridx = ridx.take(ind) eres = UInt64Index(2**63 + np.array([10, 25], dtype='uint64')) elidx = np.array([1, 4], dtype=np.intp) eridx = np.array([5, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='inner', return_indexers=True) res2 = self.index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 4], dtype=np.intp) eridx = np.array([3, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='left', return_indexers=True) eres = self.index eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='left', return_indexers=True) eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) assert lidx is None tm.assert_numpy_array_equal(ridx, eridx) # non-unique idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) # 1 is in idx2, so it should be x2 eres = UInt64Index(2**63 + np.array( [1, 1, 2, 5, 7, 9], dtype='uint64')) eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic res, lidx, ridx = self.index.join(other, how='right', return_indexers=True) eres = other elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(lidx, elidx) assert isinstance(other, UInt64Index) tm.assert_index_equal(res, eres) assert ridx is None # monotonic res, lidx, ridx = self.index.join(other_mono, how='right', return_indexers=True) eres = other_mono elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) assert isinstance(other, UInt64Index) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_index_equal(res, eres) assert ridx is None # non-unique idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) # 1 is in idx2, so it should be x2 eres = UInt64Index(2**63 + np.array( [1, 1, 2, 5, 7, 9], dtype='uint64')) elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): other = Index(2**63 + np.array( [1, 5, 7, 10, 20], dtype='uint64'), dtype=object) outer = self.index.join(other, how='outer') outer2 = other.join(self.index, how='outer') expected = Index(2**63 + np.array( [0, 1, 5, 7, 10, 15, 20, 25], dtype='uint64')) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) inner = self.index.join(other, how='inner') inner2 = other.join(self.index, how='inner') expected = Index(2**63 + np.array([10, 20], dtype='uint64')) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) left = self.index.join(other, how='left') tm.assert_index_equal(left, self.index.astype(object)) left2 = other.join(self.index, how='left') tm.assert_index_equal(left2, other) right = self.index.join(other, how='right') tm.assert_index_equal(right, other) right2 = other.join(self.index, how='right') tm.assert_index_equal(right2, self.index.astype(object)) def test_join_outer(self): other = UInt64Index(2**63 + np.array( [7, 12, 25, 1, 2, 10], dtype='uint64')) other_mono = UInt64Index(2**63 + np.array( [1, 2, 7, 10, 12, 25], dtype='uint64')) # not monotonic # guarantee of sortedness res, lidx, ridx = self.index.join(other, how='outer', return_indexers=True) noidx_res = self.index.join(other, how='outer') tm.assert_index_equal(res, noidx_res) eres = UInt64Index(2**63 + np.array( [0, 1, 2, 7, 10, 12, 15, 20, 25], dtype='uint64')) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) # monotonic res, lidx, ridx = self.index.join(other_mono, how='outer', return_indexers=True) noidx_res = self.index.join(other_mono, how='outer') tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) tm.assert_index_equal(res, eres) tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx)
MJuddBooth/pandas
pandas/tests/indexes/test_numeric.py
pandas/core/internals/managers.py
""" masked_reductions.py is for reduction algorithms using a mask-based approach for missing values. """ from typing import Callable import numpy as np from pandas._libs import missing as libmissing from pandas.compat.numpy import np_version_under1p17 from pandas.core.nanops import check_below_min_count def _sumprod( func: Callable, values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0, ): """ Sum or product for 1D masked array. Parameters ---------- func : np.sum or np.prod values : np.ndarray Numpy array with the values (can be of any dtype that support the operation). mask : np.ndarray Boolean numpy array (True values indicate missing values). skipna : bool, default True Whether to skip NA. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. """ if not skipna: if mask.any() or check_below_min_count(values.shape, None, min_count): return libmissing.NA else: return func(values) else: if check_below_min_count(values.shape, mask, min_count): return libmissing.NA if np_version_under1p17: return func(values[~mask]) else: return func(values, where=~mask) def sum(values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0): return _sumprod( np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count ) def prod(values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0): return _sumprod( np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count ) def _minmax(func: Callable, values: np.ndarray, mask: np.ndarray, skipna: bool = True): """ Reduction for 1D masked array. Parameters ---------- func : np.min or np.max values : np.ndarray Numpy array with the values (can be of any dtype that support the operation). mask : np.ndarray Boolean numpy array (True values indicate missing values). skipna : bool, default True Whether to skip NA. """ if not skipna: if mask.any() or not values.size: # min/max with empty array raise in numpy, pandas returns NA return libmissing.NA else: return func(values) else: subset = values[~mask] if subset.size: return func(subset) else: # min/max with empty array raise in numpy, pandas returns NA return libmissing.NA def min(values: np.ndarray, mask: np.ndarray, skipna: bool = True): return _minmax(np.min, values=values, mask=mask, skipna=skipna) def max(values: np.ndarray, mask: np.ndarray, skipna: bool = True): return _minmax(np.max, values=values, mask=mask, skipna=skipna)
from datetime import datetime, timedelta import numpy as np import pytest from pandas._libs.tslibs import Timestamp import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index import pandas._testing as tm from pandas.tests.indexes.common import Base class Numeric(Base): def test_where(self): # Tested in numeric.test_indexing pass def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_format(self): # GH35439 idx = self.create_index() max_width = max(len(str(x)) for x in idx) expected = [str(x).ljust(max_width) for x in idx] assert idx.format() == expected def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype="int64")) # float conversions arr = np.arange(5, dtype="int64") * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype="float64") result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype="float64") result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index("A B C D E F".split()) dt_idx = pd.date_range("2013-01-01", freq="M", periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal( idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]} ) to_groupby = Index( [ datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1), ], tz="UTC", ).values ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) def test_insert_na(self, nulls_fixture): # GH 18295 (test missing) index = self.create_index() if nulls_fixture is pd.NaT: expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) else: expected = Float64Index([index[0], np.nan] + list(index[1:])) result = index.insert(1, nulls_fixture) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index @pytest.fixture( params=[ [1.5, 2, 3, 4, 5], [0.0, 2.5, 5.0, 7.5, 10.0], [5, 4, 3, 2, 1.5], [10.0, 7.5, 5.0, 2.5, 0.0], ], ids=["mixed", "float", "mixed_dec", "float_dec"], ) def index(self, request): return Float64Index(request.param) @pytest.fixture def mixed_index(self): return Float64Index([1.5, 2, 3, 4, 5]) @pytest.fixture def float_index(self): return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0]) def create_index(self) -> Float64Index: return Float64Index(np.arange(5, dtype="float64")) def test_repr_roundtrip(self, index): tm.assert_index_equal(eval(repr(index)), index) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype="float64") tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1.0, 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1.0, 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1.0, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.parametrize( "index, dtype", [ (pd.Int64Index, "float64"), (pd.UInt64Index, "categorical"), (pd.Float64Index, "datetime64"), (pd.RangeIndex, "float64"), ], ) def test_invalid_dtype(self, index, dtype): # GH 29539 with pytest.raises( ValueError, match=rf"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}", ): index([1, 2, 3], dtype=dtype) def test_constructor_invalid(self): # invalid msg = ( r"Float64Index\(\.\.\.\) must be called with a collection of " r"some kind, 0\.0 was passed" ) with pytest.raises(TypeError, match=msg): Float64Index(0.0) msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) with pytest.raises(TypeError, match=msg): Float64Index(["a", "b", 0.0]) msg = r"float\(\) argument must be a string or a number, not 'Timestamp'" with pytest.raises(TypeError, match=msg): Float64Index([Timestamp("20130101")]) def test_constructor_coerce(self, mixed_index, float_index): self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) self.check_coerce(float_index, Index(np.arange(5) * 2.5)) self.check_coerce( float_index, Index(np.array(np.arange(5) * 2.5, dtype=object)) ) def test_constructor_explicit(self, mixed_index, float_index): # these don't auto convert self.check_coerce( float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False ) self.check_coerce( mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False ) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) @pytest.mark.parametrize( "other", ( Int64Index([1, 2]), Index([1.0, 2.0], dtype=object), Index([1, 2], dtype=object), ), ) def test_equals_numeric_other_index_type(self, other): i = Float64Index([1.0, 2.0]) assert i.equals(other) assert other.equals(i) @pytest.mark.parametrize( "vals", [ pd.date_range("2016-01-01", periods=3), pd.timedelta_range("1 Day", periods=3), ], ) def test_lookups_datetimelike_values(self, vals): # If we have datetime64 or timedelta64 values, make sure they are # wrappped correctly GH#31163 ser = pd.Series(vals, index=range(3, 6)) ser.index = ser.index.astype("float64") expected = vals[1] with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4.0) assert isinstance(result, type(expected)) and result == expected with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4) assert isinstance(result, type(expected)) and result == expected result = ser[4.0] assert isinstance(result, type(expected)) and result == expected result = ser[4] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4.0] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4] assert isinstance(result, type(expected)) and result == expected result = ser.at[4.0] assert isinstance(result, type(expected)) and result == expected # GH#31329 .at[4] should cast to 4.0, matching .loc behavior result = ser.at[4] assert isinstance(result, type(expected)) and result == expected result = ser.iloc[1] assert isinstance(result, type(expected)) and result == expected result = ser.iat[1] assert isinstance(result, type(expected)) and result == expected def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") # can't downcast exp = Index([1.0, 0.1, 3.0], name="x") tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name="x") tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, "obj", 3.0], name="x") tm.assert_index_equal(idx.fillna("obj"), exp) class NumericInt(Numeric): def test_view(self): i = self._holder([], name="Foo") i_view = i.view() assert i_view.name == "Foo" i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) def test_is_monotonic(self): index = self._holder([1, 2, 3, 4]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is True assert index.is_monotonic_decreasing is False assert index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): index = self.create_index() i = Index(index.copy()) assert i.identical(index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = index.astype(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype)) def test_union_noncomparable(self): # corner case, non-Int64Index index = self.create_index() other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) result = index.union(other) expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) result = other.union(index) expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) # can't data = ["foo", "bar", "baz"] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ["0", "1", "2"] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): index = self.create_index() index.view(Index) def test_prevent_casting(self): index = self.create_index() result = index.astype("O") assert result.dtype == np.object_ class TestInt64Index(NumericInt): _dtype = "int64" _holder = Int64Index @pytest.fixture( params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] ) def index(self, request): return Int64Index(request.param) def create_index(self) -> Int64Index: # return Int64Index(np.arange(5, dtype="int64")) return Int64Index(range(0, 20, 2)) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = ( r"Int64Index\(\.\.\.\) must be called with a collection of some " "kind, 5 was passed" ) with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [ cls([5, 0], dtype="int64"), cls(np.array([5, 0]), dtype="int64"), cls(Series([5, 0]), dtype="int64"), ]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, "2", 3, "4"], dtype=object) with pytest.raises(TypeError, match="casting"): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match="casting"): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype="int64") tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_intersection(self): index = self.create_index() other = Index([1, 2, 3, 4, 5]) result = index.intersection(other) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index) expected = Index( np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) class TestUInt64Index(NumericInt): _dtype = "uint64" _holder = UInt64Index @pytest.fixture( params=[ [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25], [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63], ], ids=["index_inc", "index_dec"], ) def index(self, request): return UInt64Index(request.param) @pytest.fixture def index_large(self): # large values used in TestUInt64Index where no compat needed with Int64/Float64 large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] return UInt64Index(large) def create_index(self) -> UInt64Index: # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests return UInt64Index(np.arange(5, dtype="uint64")) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2 ** 63], dtype=object) res = Index(np.array([-1, 2 ** 63], dtype=object)) tm.assert_index_equal(res, idx) # https://github.com/pandas-dev/pandas/issues/29526 idx = UInt64Index([1, 2 ** 63 + 1], dtype=np.uint64) res = Index([1, 2 ** 63 + 1], dtype=np.uint64) tm.assert_index_equal(res, idx) def test_intersection(self, index_large): other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20]) result = index_large.intersection(other) expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index_large) expected = Index( np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) ) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("dtype", ["int64", "uint64"]) def test_int_float_union_dtype(dtype): # https://github.com/pandas-dev/pandas/issues/26778 # [u]int | float -> float index = pd.Index([0, 2, 3], dtype=dtype) other = pd.Float64Index([0.5, 1.5]) expected = pd.Float64Index([0.0, 0.5, 1.5, 2.0, 3.0]) result = index.union(other) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) def test_range_float_union_dtype(): # https://github.com/pandas-dev/pandas/issues/26778 index = pd.RangeIndex(start=0, stop=3) other = pd.Float64Index([0.5, 1.5]) result = index.union(other) expected = pd.Float64Index([0.0, 0.5, 1, 1.5, 2.0]) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "box", [list, lambda x: np.array(x, dtype=object), lambda x: pd.Index(x, dtype=object)], ) def test_uint_index_does_not_convert_to_float64(box): # https://github.com/pandas-dev/pandas/issues/28279 # https://github.com/pandas-dev/pandas/issues/28023 series = pd.Series( [0, 1, 2, 3, 4, 5], index=[ 7606741985629028552, 17876870360202815256, 17876870360202815256, 13106359306506049338, 8991270399732411471, 8991270399732411472, ], ) result = series.loc[box([7606741985629028552, 17876870360202815256])] expected = UInt64Index( [7606741985629028552, 17876870360202815256, 17876870360202815256], dtype="uint64", ) tm.assert_index_equal(result.index, expected) tm.assert_equal(result, series[:3]) def test_float64_index_equals(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.equals(string_index) assert result is False result = string_index.equals(float_index) assert result is False def test_float64_index_difference(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.difference(string_index) tm.assert_index_equal(result, float_index) result = string_index.difference(float_index) tm.assert_index_equal(result, string_index) class TestGetSliceBounds: @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) def test_get_slice_bounds_within(self, kind, side, expected): index = Index(range(6)) result = index.get_slice_bound(4, kind=kind, side=side) assert result == expected @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) def test_get_slice_bounds_outside(self, kind, side, expected, bound): index = Index(range(6)) result = index.get_slice_bound(bound, kind=kind, side=side) assert result == expected
rs2/pandas
pandas/tests/indexes/test_numeric.py
pandas/core/array_algos/masked_reductions.py
import numpy as np import pytest import pandas as pd from pandas.core.internals import ObjectBlock from .base import BaseExtensionTests class BaseCastingTests(BaseExtensionTests): """Casting to and from ExtensionDtypes""" def test_astype_object_series(self, all_data): ser = pd.Series(all_data, name="A") result = ser.astype(object) assert isinstance(result._mgr.blocks[0], ObjectBlock) def test_astype_object_frame(self, all_data): df = pd.DataFrame({"A": all_data}) result = df.astype(object) blk = result._data.blocks[0] assert isinstance(blk, ObjectBlock), type(blk) # FIXME: these currently fail; dont leave commented-out # check that we can compare the dtypes # cmp = result.dtypes.equals(df.dtypes) # assert not cmp.any() def test_tolist(self, data): result = pd.Series(data).tolist() expected = list(data) assert result == expected def test_astype_str(self, data): result = pd.Series(data[:5]).astype(str) expected = pd.Series([str(x) for x in data[:5]], dtype=str) self.assert_series_equal(result, expected) def test_astype_string(self, data): # GH-33465 result = pd.Series(data[:5]).astype("string") expected = pd.Series([str(x) for x in data[:5]], dtype="string") self.assert_series_equal(result, expected) def test_to_numpy(self, data): expected = np.asarray(data) result = data.to_numpy() self.assert_equal(result, expected) result = pd.Series(data).to_numpy() self.assert_equal(result, expected) def test_astype_empty_dataframe(self, dtype): # https://github.com/pandas-dev/pandas/issues/33113 df = pd.DataFrame() result = df.astype(dtype) self.assert_frame_equal(result, df) @pytest.mark.parametrize("copy", [True, False]) def test_astype_own_type(self, data, copy): # ensure that astype returns the original object for equal dtype and copy=False # https://github.com/pandas-dev/pandas/issues/28488 result = data.astype(data.dtype, copy=copy) assert (result is data) is (not copy) self.assert_extension_array_equal(result, data)
from datetime import datetime, timedelta import numpy as np import pytest from pandas._libs.tslibs import Timestamp import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index import pandas._testing as tm from pandas.tests.indexes.common import Base class Numeric(Base): def test_where(self): # Tested in numeric.test_indexing pass def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_format(self): # GH35439 idx = self.create_index() max_width = max(len(str(x)) for x in idx) expected = [str(x).ljust(max_width) for x in idx] assert idx.format() == expected def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype="int64")) # float conversions arr = np.arange(5, dtype="int64") * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype="float64") result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype="float64") result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index("A B C D E F".split()) dt_idx = pd.date_range("2013-01-01", freq="M", periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal( idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]} ) to_groupby = Index( [ datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1), ], tz="UTC", ).values ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) def test_insert_na(self, nulls_fixture): # GH 18295 (test missing) index = self.create_index() if nulls_fixture is pd.NaT: expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) else: expected = Float64Index([index[0], np.nan] + list(index[1:])) result = index.insert(1, nulls_fixture) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index @pytest.fixture( params=[ [1.5, 2, 3, 4, 5], [0.0, 2.5, 5.0, 7.5, 10.0], [5, 4, 3, 2, 1.5], [10.0, 7.5, 5.0, 2.5, 0.0], ], ids=["mixed", "float", "mixed_dec", "float_dec"], ) def index(self, request): return Float64Index(request.param) @pytest.fixture def mixed_index(self): return Float64Index([1.5, 2, 3, 4, 5]) @pytest.fixture def float_index(self): return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0]) def create_index(self) -> Float64Index: return Float64Index(np.arange(5, dtype="float64")) def test_repr_roundtrip(self, index): tm.assert_index_equal(eval(repr(index)), index) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype="float64") tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1.0, 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1.0, 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1.0, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.parametrize( "index, dtype", [ (pd.Int64Index, "float64"), (pd.UInt64Index, "categorical"), (pd.Float64Index, "datetime64"), (pd.RangeIndex, "float64"), ], ) def test_invalid_dtype(self, index, dtype): # GH 29539 with pytest.raises( ValueError, match=rf"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}", ): index([1, 2, 3], dtype=dtype) def test_constructor_invalid(self): # invalid msg = ( r"Float64Index\(\.\.\.\) must be called with a collection of " r"some kind, 0\.0 was passed" ) with pytest.raises(TypeError, match=msg): Float64Index(0.0) msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) with pytest.raises(TypeError, match=msg): Float64Index(["a", "b", 0.0]) msg = r"float\(\) argument must be a string or a number, not 'Timestamp'" with pytest.raises(TypeError, match=msg): Float64Index([Timestamp("20130101")]) def test_constructor_coerce(self, mixed_index, float_index): self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) self.check_coerce(float_index, Index(np.arange(5) * 2.5)) self.check_coerce( float_index, Index(np.array(np.arange(5) * 2.5, dtype=object)) ) def test_constructor_explicit(self, mixed_index, float_index): # these don't auto convert self.check_coerce( float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False ) self.check_coerce( mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False ) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) @pytest.mark.parametrize( "other", ( Int64Index([1, 2]), Index([1.0, 2.0], dtype=object), Index([1, 2], dtype=object), ), ) def test_equals_numeric_other_index_type(self, other): i = Float64Index([1.0, 2.0]) assert i.equals(other) assert other.equals(i) @pytest.mark.parametrize( "vals", [ pd.date_range("2016-01-01", periods=3), pd.timedelta_range("1 Day", periods=3), ], ) def test_lookups_datetimelike_values(self, vals): # If we have datetime64 or timedelta64 values, make sure they are # wrappped correctly GH#31163 ser = pd.Series(vals, index=range(3, 6)) ser.index = ser.index.astype("float64") expected = vals[1] with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4.0) assert isinstance(result, type(expected)) and result == expected with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4) assert isinstance(result, type(expected)) and result == expected result = ser[4.0] assert isinstance(result, type(expected)) and result == expected result = ser[4] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4.0] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4] assert isinstance(result, type(expected)) and result == expected result = ser.at[4.0] assert isinstance(result, type(expected)) and result == expected # GH#31329 .at[4] should cast to 4.0, matching .loc behavior result = ser.at[4] assert isinstance(result, type(expected)) and result == expected result = ser.iloc[1] assert isinstance(result, type(expected)) and result == expected result = ser.iat[1] assert isinstance(result, type(expected)) and result == expected def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") # can't downcast exp = Index([1.0, 0.1, 3.0], name="x") tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name="x") tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, "obj", 3.0], name="x") tm.assert_index_equal(idx.fillna("obj"), exp) class NumericInt(Numeric): def test_view(self): i = self._holder([], name="Foo") i_view = i.view() assert i_view.name == "Foo" i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) def test_is_monotonic(self): index = self._holder([1, 2, 3, 4]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is True assert index.is_monotonic_decreasing is False assert index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): index = self.create_index() i = Index(index.copy()) assert i.identical(index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = index.astype(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype)) def test_union_noncomparable(self): # corner case, non-Int64Index index = self.create_index() other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) result = index.union(other) expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) result = other.union(index) expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) # can't data = ["foo", "bar", "baz"] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ["0", "1", "2"] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): index = self.create_index() index.view(Index) def test_prevent_casting(self): index = self.create_index() result = index.astype("O") assert result.dtype == np.object_ class TestInt64Index(NumericInt): _dtype = "int64" _holder = Int64Index @pytest.fixture( params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] ) def index(self, request): return Int64Index(request.param) def create_index(self) -> Int64Index: # return Int64Index(np.arange(5, dtype="int64")) return Int64Index(range(0, 20, 2)) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = ( r"Int64Index\(\.\.\.\) must be called with a collection of some " "kind, 5 was passed" ) with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [ cls([5, 0], dtype="int64"), cls(np.array([5, 0]), dtype="int64"), cls(Series([5, 0]), dtype="int64"), ]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, "2", 3, "4"], dtype=object) with pytest.raises(TypeError, match="casting"): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match="casting"): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype="int64") tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_intersection(self): index = self.create_index() other = Index([1, 2, 3, 4, 5]) result = index.intersection(other) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index) expected = Index( np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) class TestUInt64Index(NumericInt): _dtype = "uint64" _holder = UInt64Index @pytest.fixture( params=[ [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25], [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63], ], ids=["index_inc", "index_dec"], ) def index(self, request): return UInt64Index(request.param) @pytest.fixture def index_large(self): # large values used in TestUInt64Index where no compat needed with Int64/Float64 large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] return UInt64Index(large) def create_index(self) -> UInt64Index: # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests return UInt64Index(np.arange(5, dtype="uint64")) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2 ** 63], dtype=object) res = Index(np.array([-1, 2 ** 63], dtype=object)) tm.assert_index_equal(res, idx) # https://github.com/pandas-dev/pandas/issues/29526 idx = UInt64Index([1, 2 ** 63 + 1], dtype=np.uint64) res = Index([1, 2 ** 63 + 1], dtype=np.uint64) tm.assert_index_equal(res, idx) def test_intersection(self, index_large): other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20]) result = index_large.intersection(other) expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index_large) expected = Index( np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) ) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("dtype", ["int64", "uint64"]) def test_int_float_union_dtype(dtype): # https://github.com/pandas-dev/pandas/issues/26778 # [u]int | float -> float index = pd.Index([0, 2, 3], dtype=dtype) other = pd.Float64Index([0.5, 1.5]) expected = pd.Float64Index([0.0, 0.5, 1.5, 2.0, 3.0]) result = index.union(other) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) def test_range_float_union_dtype(): # https://github.com/pandas-dev/pandas/issues/26778 index = pd.RangeIndex(start=0, stop=3) other = pd.Float64Index([0.5, 1.5]) result = index.union(other) expected = pd.Float64Index([0.0, 0.5, 1, 1.5, 2.0]) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "box", [list, lambda x: np.array(x, dtype=object), lambda x: pd.Index(x, dtype=object)], ) def test_uint_index_does_not_convert_to_float64(box): # https://github.com/pandas-dev/pandas/issues/28279 # https://github.com/pandas-dev/pandas/issues/28023 series = pd.Series( [0, 1, 2, 3, 4, 5], index=[ 7606741985629028552, 17876870360202815256, 17876870360202815256, 13106359306506049338, 8991270399732411471, 8991270399732411472, ], ) result = series.loc[box([7606741985629028552, 17876870360202815256])] expected = UInt64Index( [7606741985629028552, 17876870360202815256, 17876870360202815256], dtype="uint64", ) tm.assert_index_equal(result.index, expected) tm.assert_equal(result, series[:3]) def test_float64_index_equals(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.equals(string_index) assert result is False result = string_index.equals(float_index) assert result is False def test_float64_index_difference(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.difference(string_index) tm.assert_index_equal(result, float_index) result = string_index.difference(float_index) tm.assert_index_equal(result, string_index) class TestGetSliceBounds: @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) def test_get_slice_bounds_within(self, kind, side, expected): index = Index(range(6)) result = index.get_slice_bound(4, kind=kind, side=side) assert result == expected @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) def test_get_slice_bounds_outside(self, kind, side, expected, bound): index = Index(range(6)) result = index.get_slice_bound(bound, kind=kind, side=side) assert result == expected
rs2/pandas
pandas/tests/indexes/test_numeric.py
pandas/tests/extension/base/casting.py
""" Pyperclip A cross-platform clipboard module for Python, with copy & paste functions for plain text. By Al Sweigart al@inventwithpython.com BSD License Usage: import pyperclip pyperclip.copy('The text to be copied to the clipboard.') spam = pyperclip.paste() if not pyperclip.is_available(): print("Copy functionality unavailable!") On Windows, no additional modules are needed. On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli commands. (These commands should come with OS X.). On Linux, install xclip or xsel via package manager. For example, in Debian: sudo apt-get install xclip sudo apt-get install xsel Otherwise on Linux, you will need the PyQt5 modules installed. This module does not work with PyGObject yet. Cygwin is currently not supported. Security Note: This module runs programs with these names: - which - where - pbcopy - pbpaste - xclip - xsel - klipper - qdbus A malicious user could rename or add programs with these names, tricking Pyperclip into running them with whatever permissions the Python process has. """ __version__ = "1.7.0" import contextlib import ctypes from ctypes import c_size_t, c_wchar, c_wchar_p, get_errno, sizeof import os import platform import subprocess import time import warnings # `import PyQt4` sys.exit()s if DISPLAY is not in the environment. # Thus, we need to detect the presence of $DISPLAY manually # and not load PyQt4 if it is absent. HAS_DISPLAY = os.getenv("DISPLAY", False) EXCEPT_MSG = """ Pyperclip could not find a copy/paste mechanism for your system. For more information, please visit https://pyperclip.readthedocs.io/en/latest/introduction.html#not-implemented-error """ ENCODING = "utf-8" # The "which" unix command finds where a command is. if platform.system() == "Windows": WHICH_CMD = "where" else: WHICH_CMD = "which" def _executable_exists(name): return ( subprocess.call( [WHICH_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) == 0 ) # Exceptions class PyperclipException(RuntimeError): pass class PyperclipWindowsException(PyperclipException): def __init__(self, message): message += f" ({ctypes.WinError()})" super().__init__(message) def _stringifyText(text) -> str: acceptedTypes = (str, int, float, bool) if not isinstance(text, acceptedTypes): raise PyperclipException( f"only str, int, float, and bool values " f"can be copied to the clipboard, not {type(text).__name__}" ) return str(text) def init_osx_pbcopy_clipboard(): def copy_osx_pbcopy(text): text = _stringifyText(text) # Converts non-str values to str. p = subprocess.Popen(["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True) p.communicate(input=text.encode(ENCODING)) def paste_osx_pbcopy(): p = subprocess.Popen(["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True) stdout, stderr = p.communicate() return stdout.decode(ENCODING) return copy_osx_pbcopy, paste_osx_pbcopy def init_osx_pyobjc_clipboard(): def copy_osx_pyobjc(text): """Copy string argument to clipboard""" text = _stringifyText(text) # Converts non-str values to str. newStr = Foundation.NSString.stringWithString_(text).nsstring() newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding) board = AppKit.NSPasteboard.generalPasteboard() board.declareTypes_owner_([AppKit.NSStringPboardType], None) board.setData_forType_(newData, AppKit.NSStringPboardType) def paste_osx_pyobjc(): """Returns contents of clipboard""" board = AppKit.NSPasteboard.generalPasteboard() content = board.stringForType_(AppKit.NSStringPboardType) return content return copy_osx_pyobjc, paste_osx_pyobjc def init_qt_clipboard(): global QApplication # $DISPLAY should exist # Try to import from qtpy, but if that fails try PyQt5 then PyQt4 try: from qtpy.QtWidgets import QApplication except ImportError: try: from PyQt5.QtWidgets import QApplication except ImportError: from PyQt4.QtGui import QApplication app = QApplication.instance() if app is None: app = QApplication([]) def copy_qt(text): text = _stringifyText(text) # Converts non-str values to str. cb = app.clipboard() cb.setText(text) def paste_qt() -> str: cb = app.clipboard() return str(cb.text()) return copy_qt, paste_qt def init_xclip_clipboard(): DEFAULT_SELECTION = "c" PRIMARY_SELECTION = "p" def copy_xclip(text, primary=False): text = _stringifyText(text) # Converts non-str values to str. selection = DEFAULT_SELECTION if primary: selection = PRIMARY_SELECTION p = subprocess.Popen( ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True ) p.communicate(input=text.encode(ENCODING)) def paste_xclip(primary=False): selection = DEFAULT_SELECTION if primary: selection = PRIMARY_SELECTION p = subprocess.Popen( ["xclip", "-selection", selection, "-o"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) stdout, stderr = p.communicate() # Intentionally ignore extraneous output on stderr when clipboard is empty return stdout.decode(ENCODING) return copy_xclip, paste_xclip def init_xsel_clipboard(): DEFAULT_SELECTION = "-b" PRIMARY_SELECTION = "-p" def copy_xsel(text, primary=False): text = _stringifyText(text) # Converts non-str values to str. selection_flag = DEFAULT_SELECTION if primary: selection_flag = PRIMARY_SELECTION p = subprocess.Popen( ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True ) p.communicate(input=text.encode(ENCODING)) def paste_xsel(primary=False): selection_flag = DEFAULT_SELECTION if primary: selection_flag = PRIMARY_SELECTION p = subprocess.Popen( ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True ) stdout, stderr = p.communicate() return stdout.decode(ENCODING) return copy_xsel, paste_xsel def init_klipper_clipboard(): def copy_klipper(text): text = _stringifyText(text) # Converts non-str values to str. p = subprocess.Popen( [ "qdbus", "org.kde.klipper", "/klipper", "setClipboardContents", text.encode(ENCODING), ], stdin=subprocess.PIPE, close_fds=True, ) p.communicate(input=None) def paste_klipper(): p = subprocess.Popen( ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"], stdout=subprocess.PIPE, close_fds=True, ) stdout, stderr = p.communicate() # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874 # TODO: https://github.com/asweigart/pyperclip/issues/43 clipboardContents = stdout.decode(ENCODING) # even if blank, Klipper will append a newline at the end assert len(clipboardContents) > 0 # make sure that newline is there assert clipboardContents.endswith("\n") if clipboardContents.endswith("\n"): clipboardContents = clipboardContents[:-1] return clipboardContents return copy_klipper, paste_klipper def init_dev_clipboard_clipboard(): def copy_dev_clipboard(text): text = _stringifyText(text) # Converts non-str values to str. if text == "": warnings.warn( "Pyperclip cannot copy a blank string to the clipboard on Cygwin." "This is effectively a no-op." ) if "\r" in text: warnings.warn("Pyperclip cannot handle \\r characters on Cygwin.") with open("/dev/clipboard", "wt") as fo: fo.write(text) def paste_dev_clipboard() -> str: with open("/dev/clipboard") as fo: content = fo.read() return content return copy_dev_clipboard, paste_dev_clipboard def init_no_clipboard(): class ClipboardUnavailable: def __call__(self, *args, **kwargs): raise PyperclipException(EXCEPT_MSG) def __bool__(self) -> bool: return False return ClipboardUnavailable(), ClipboardUnavailable() # Windows-related clipboard functions: class CheckedCall: def __init__(self, f): super().__setattr__("f", f) def __call__(self, *args): ret = self.f(*args) if not ret and get_errno(): raise PyperclipWindowsException("Error calling " + self.f.__name__) return ret def __setattr__(self, key, value): setattr(self.f, key, value) def init_windows_clipboard(): global HGLOBAL, LPVOID, DWORD, LPCSTR, INT global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE from ctypes.wintypes import ( BOOL, DWORD, HANDLE, HGLOBAL, HINSTANCE, HMENU, HWND, INT, LPCSTR, LPVOID, UINT, ) windll = ctypes.windll msvcrt = ctypes.CDLL("msvcrt") safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA) safeCreateWindowExA.argtypes = [ DWORD, LPCSTR, LPCSTR, DWORD, INT, INT, INT, INT, HWND, HMENU, HINSTANCE, LPVOID, ] safeCreateWindowExA.restype = HWND safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow) safeDestroyWindow.argtypes = [HWND] safeDestroyWindow.restype = BOOL OpenClipboard = windll.user32.OpenClipboard OpenClipboard.argtypes = [HWND] OpenClipboard.restype = BOOL safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard) safeCloseClipboard.argtypes = [] safeCloseClipboard.restype = BOOL safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard) safeEmptyClipboard.argtypes = [] safeEmptyClipboard.restype = BOOL safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData) safeGetClipboardData.argtypes = [UINT] safeGetClipboardData.restype = HANDLE safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData) safeSetClipboardData.argtypes = [UINT, HANDLE] safeSetClipboardData.restype = HANDLE safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc) safeGlobalAlloc.argtypes = [UINT, c_size_t] safeGlobalAlloc.restype = HGLOBAL safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock) safeGlobalLock.argtypes = [HGLOBAL] safeGlobalLock.restype = LPVOID safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock) safeGlobalUnlock.argtypes = [HGLOBAL] safeGlobalUnlock.restype = BOOL wcslen = CheckedCall(msvcrt.wcslen) wcslen.argtypes = [c_wchar_p] wcslen.restype = UINT GMEM_MOVEABLE = 0x0002 CF_UNICODETEXT = 13 @contextlib.contextmanager def window(): """ Context that provides a valid Windows hwnd. """ # we really just need the hwnd, so setting "STATIC" # as predefined lpClass is just fine. hwnd = safeCreateWindowExA( 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None ) try: yield hwnd finally: safeDestroyWindow(hwnd) @contextlib.contextmanager def clipboard(hwnd): """ Context manager that opens the clipboard and prevents other applications from modifying the clipboard content. """ # We may not get the clipboard handle immediately because # some other application is accessing it (?) # We try for at least 500ms to get the clipboard. t = time.time() + 0.5 success = False while time.time() < t: success = OpenClipboard(hwnd) if success: break time.sleep(0.01) if not success: raise PyperclipWindowsException("Error calling OpenClipboard") try: yield finally: safeCloseClipboard() def copy_windows(text): # This function is heavily based on # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard text = _stringifyText(text) # Converts non-str values to str. with window() as hwnd: # http://msdn.com/ms649048 # If an application calls OpenClipboard with hwnd set to NULL, # EmptyClipboard sets the clipboard owner to NULL; # this causes SetClipboardData to fail. # => We need a valid hwnd to copy something. with clipboard(hwnd): safeEmptyClipboard() if text: # http://msdn.com/ms649051 # If the hMem parameter identifies a memory object, # the object must have been allocated using the # function with the GMEM_MOVEABLE flag. count = wcslen(text) + 1 handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar)) locked_handle = safeGlobalLock(handle) ctypes.memmove( c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar), ) safeGlobalUnlock(handle) safeSetClipboardData(CF_UNICODETEXT, handle) def paste_windows(): with clipboard(None): handle = safeGetClipboardData(CF_UNICODETEXT) if not handle: # GetClipboardData may return NULL with errno == NO_ERROR # if the clipboard is empty. # (Also, it may return a handle to an empty buffer, # but technically that's not empty) return "" return c_wchar_p(handle).value return copy_windows, paste_windows def init_wsl_clipboard(): def copy_wsl(text): text = _stringifyText(text) # Converts non-str values to str. p = subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) p.communicate(input=text.encode(ENCODING)) def paste_wsl(): p = subprocess.Popen( ["powershell.exe", "-command", "Get-Clipboard"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) stdout, stderr = p.communicate() # WSL appends "\r\n" to the contents. return stdout[:-2].decode(ENCODING) return copy_wsl, paste_wsl # Automatic detection of clipboard mechanisms # and importing is done in determine_clipboard(): def determine_clipboard(): """ Determine the OS/platform and set the copy() and paste() functions accordingly. """ global Foundation, AppKit, qtpy, PyQt4, PyQt5 # Setup for the CYGWIN platform: if ( "cygwin" in platform.system().lower() ): # Cygwin has a variety of values returned by platform.system(), # such as 'CYGWIN_NT-6.1' # FIXME: pyperclip currently does not support Cygwin, # see https://github.com/asweigart/pyperclip/issues/55 if os.path.exists("/dev/clipboard"): warnings.warn( "Pyperclip's support for Cygwin is not perfect," "see https://github.com/asweigart/pyperclip/issues/55" ) return init_dev_clipboard_clipboard() # Setup for the WINDOWS platform: elif os.name == "nt" or platform.system() == "Windows": return init_windows_clipboard() if platform.system() == "Linux": with open("/proc/version") as f: if "Microsoft" in f.read(): return init_wsl_clipboard() # Setup for the MAC OS X platform: if os.name == "mac" or platform.system() == "Darwin": try: import AppKit import Foundation # check if pyobjc is installed except ImportError: return init_osx_pbcopy_clipboard() else: return init_osx_pyobjc_clipboard() # Setup for the LINUX platform: if HAS_DISPLAY: if _executable_exists("xsel"): return init_xsel_clipboard() if _executable_exists("xclip"): return init_xclip_clipboard() if _executable_exists("klipper") and _executable_exists("qdbus"): return init_klipper_clipboard() try: # qtpy is a small abstraction layer that lets you write applications # using a single api call to either PyQt or PySide. # https://pypi.python.org/project/QtPy import qtpy # check if qtpy is installed except ImportError: # If qtpy isn't installed, fall back on importing PyQt4. try: import PyQt5 # check if PyQt5 is installed except ImportError: try: import PyQt4 # check if PyQt4 is installed except ImportError: pass # We want to fail fast for all non-ImportError exceptions. else: return init_qt_clipboard() else: return init_qt_clipboard() else: return init_qt_clipboard() return init_no_clipboard() def set_clipboard(clipboard): """ Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how the copy() and paste() functions interact with the operating system to implement the copy/paste feature. The clipboard parameter must be one of: - pbcopy - pbobjc (default on Mac OS X) - qt - xclip - xsel - klipper - windows (default on Windows) - no (this is what is set when no clipboard mechanism can be found) """ global copy, paste clipboard_types = { "pbcopy": init_osx_pbcopy_clipboard, "pyobjc": init_osx_pyobjc_clipboard, "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5' "xclip": init_xclip_clipboard, "xsel": init_xsel_clipboard, "klipper": init_klipper_clipboard, "windows": init_windows_clipboard, "no": init_no_clipboard, } if clipboard not in clipboard_types: allowed_clipboard_types = [repr(_) for _ in clipboard_types.keys()] raise ValueError( f"Argument must be one of {', '.join(allowed_clipboard_types)}" ) # Sets pyperclip's copy() and paste() functions: copy, paste = clipboard_types[clipboard]() def lazy_load_stub_copy(text): """ A stub function for copy(), which will load the real copy() function when called so that the real copy() function is used for later calls. This allows users to import pyperclip without having determine_clipboard() automatically run, which will automatically select a clipboard mechanism. This could be a problem if it selects, say, the memory-heavy PyQt4 module but the user was just going to immediately call set_clipboard() to use a different clipboard mechanism. The lazy loading this stub function implements gives the user a chance to call set_clipboard() to pick another clipboard mechanism. Or, if the user simply calls copy() or paste() without calling set_clipboard() first, will fall back on whatever clipboard mechanism that determine_clipboard() automatically chooses. """ global copy, paste copy, paste = determine_clipboard() return copy(text) def lazy_load_stub_paste(): """ A stub function for paste(), which will load the real paste() function when called so that the real paste() function is used for later calls. This allows users to import pyperclip without having determine_clipboard() automatically run, which will automatically select a clipboard mechanism. This could be a problem if it selects, say, the memory-heavy PyQt4 module but the user was just going to immediately call set_clipboard() to use a different clipboard mechanism. The lazy loading this stub function implements gives the user a chance to call set_clipboard() to pick another clipboard mechanism. Or, if the user simply calls copy() or paste() without calling set_clipboard() first, will fall back on whatever clipboard mechanism that determine_clipboard() automatically chooses. """ global copy, paste copy, paste = determine_clipboard() return paste() def is_available() -> bool: return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste # Initially, copy() and paste() are set to lazy loading wrappers which will # set `copy` and `paste` to real functions the first time they're used, unless # set_clipboard() or determine_clipboard() is called first. copy, paste = lazy_load_stub_copy, lazy_load_stub_paste __all__ = ["copy", "paste", "set_clipboard", "determine_clipboard"] # pandas aliases clipboard_get = paste clipboard_set = copy
from datetime import datetime, timedelta import numpy as np import pytest from pandas._libs.tslibs import Timestamp import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index import pandas._testing as tm from pandas.tests.indexes.common import Base class Numeric(Base): def test_where(self): # Tested in numeric.test_indexing pass def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_format(self): # GH35439 idx = self.create_index() max_width = max(len(str(x)) for x in idx) expected = [str(x).ljust(max_width) for x in idx] assert idx.format() == expected def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype="int64")) # float conversions arr = np.arange(5, dtype="int64") * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype="float64") result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype="float64") result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index("A B C D E F".split()) dt_idx = pd.date_range("2013-01-01", freq="M", periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal( idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]} ) to_groupby = Index( [ datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1), ], tz="UTC", ).values ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) def test_insert_na(self, nulls_fixture): # GH 18295 (test missing) index = self.create_index() if nulls_fixture is pd.NaT: expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) else: expected = Float64Index([index[0], np.nan] + list(index[1:])) result = index.insert(1, nulls_fixture) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index @pytest.fixture( params=[ [1.5, 2, 3, 4, 5], [0.0, 2.5, 5.0, 7.5, 10.0], [5, 4, 3, 2, 1.5], [10.0, 7.5, 5.0, 2.5, 0.0], ], ids=["mixed", "float", "mixed_dec", "float_dec"], ) def index(self, request): return Float64Index(request.param) @pytest.fixture def mixed_index(self): return Float64Index([1.5, 2, 3, 4, 5]) @pytest.fixture def float_index(self): return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0]) def create_index(self) -> Float64Index: return Float64Index(np.arange(5, dtype="float64")) def test_repr_roundtrip(self, index): tm.assert_index_equal(eval(repr(index)), index) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype="float64") tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1.0, 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1.0, 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1.0, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.parametrize( "index, dtype", [ (pd.Int64Index, "float64"), (pd.UInt64Index, "categorical"), (pd.Float64Index, "datetime64"), (pd.RangeIndex, "float64"), ], ) def test_invalid_dtype(self, index, dtype): # GH 29539 with pytest.raises( ValueError, match=rf"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}", ): index([1, 2, 3], dtype=dtype) def test_constructor_invalid(self): # invalid msg = ( r"Float64Index\(\.\.\.\) must be called with a collection of " r"some kind, 0\.0 was passed" ) with pytest.raises(TypeError, match=msg): Float64Index(0.0) msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) with pytest.raises(TypeError, match=msg): Float64Index(["a", "b", 0.0]) msg = r"float\(\) argument must be a string or a number, not 'Timestamp'" with pytest.raises(TypeError, match=msg): Float64Index([Timestamp("20130101")]) def test_constructor_coerce(self, mixed_index, float_index): self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) self.check_coerce(float_index, Index(np.arange(5) * 2.5)) self.check_coerce( float_index, Index(np.array(np.arange(5) * 2.5, dtype=object)) ) def test_constructor_explicit(self, mixed_index, float_index): # these don't auto convert self.check_coerce( float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False ) self.check_coerce( mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False ) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) @pytest.mark.parametrize( "other", ( Int64Index([1, 2]), Index([1.0, 2.0], dtype=object), Index([1, 2], dtype=object), ), ) def test_equals_numeric_other_index_type(self, other): i = Float64Index([1.0, 2.0]) assert i.equals(other) assert other.equals(i) @pytest.mark.parametrize( "vals", [ pd.date_range("2016-01-01", periods=3), pd.timedelta_range("1 Day", periods=3), ], ) def test_lookups_datetimelike_values(self, vals): # If we have datetime64 or timedelta64 values, make sure they are # wrappped correctly GH#31163 ser = pd.Series(vals, index=range(3, 6)) ser.index = ser.index.astype("float64") expected = vals[1] with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4.0) assert isinstance(result, type(expected)) and result == expected with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4) assert isinstance(result, type(expected)) and result == expected result = ser[4.0] assert isinstance(result, type(expected)) and result == expected result = ser[4] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4.0] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4] assert isinstance(result, type(expected)) and result == expected result = ser.at[4.0] assert isinstance(result, type(expected)) and result == expected # GH#31329 .at[4] should cast to 4.0, matching .loc behavior result = ser.at[4] assert isinstance(result, type(expected)) and result == expected result = ser.iloc[1] assert isinstance(result, type(expected)) and result == expected result = ser.iat[1] assert isinstance(result, type(expected)) and result == expected def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") # can't downcast exp = Index([1.0, 0.1, 3.0], name="x") tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name="x") tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, "obj", 3.0], name="x") tm.assert_index_equal(idx.fillna("obj"), exp) class NumericInt(Numeric): def test_view(self): i = self._holder([], name="Foo") i_view = i.view() assert i_view.name == "Foo" i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) def test_is_monotonic(self): index = self._holder([1, 2, 3, 4]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is True assert index.is_monotonic_decreasing is False assert index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): index = self.create_index() i = Index(index.copy()) assert i.identical(index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = index.astype(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype)) def test_union_noncomparable(self): # corner case, non-Int64Index index = self.create_index() other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) result = index.union(other) expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) result = other.union(index) expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) # can't data = ["foo", "bar", "baz"] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ["0", "1", "2"] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): index = self.create_index() index.view(Index) def test_prevent_casting(self): index = self.create_index() result = index.astype("O") assert result.dtype == np.object_ class TestInt64Index(NumericInt): _dtype = "int64" _holder = Int64Index @pytest.fixture( params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] ) def index(self, request): return Int64Index(request.param) def create_index(self) -> Int64Index: # return Int64Index(np.arange(5, dtype="int64")) return Int64Index(range(0, 20, 2)) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = ( r"Int64Index\(\.\.\.\) must be called with a collection of some " "kind, 5 was passed" ) with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [ cls([5, 0], dtype="int64"), cls(np.array([5, 0]), dtype="int64"), cls(Series([5, 0]), dtype="int64"), ]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, "2", 3, "4"], dtype=object) with pytest.raises(TypeError, match="casting"): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match="casting"): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype="int64") tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_intersection(self): index = self.create_index() other = Index([1, 2, 3, 4, 5]) result = index.intersection(other) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index) expected = Index( np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) class TestUInt64Index(NumericInt): _dtype = "uint64" _holder = UInt64Index @pytest.fixture( params=[ [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25], [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63], ], ids=["index_inc", "index_dec"], ) def index(self, request): return UInt64Index(request.param) @pytest.fixture def index_large(self): # large values used in TestUInt64Index where no compat needed with Int64/Float64 large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] return UInt64Index(large) def create_index(self) -> UInt64Index: # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests return UInt64Index(np.arange(5, dtype="uint64")) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2 ** 63], dtype=object) res = Index(np.array([-1, 2 ** 63], dtype=object)) tm.assert_index_equal(res, idx) # https://github.com/pandas-dev/pandas/issues/29526 idx = UInt64Index([1, 2 ** 63 + 1], dtype=np.uint64) res = Index([1, 2 ** 63 + 1], dtype=np.uint64) tm.assert_index_equal(res, idx) def test_intersection(self, index_large): other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20]) result = index_large.intersection(other) expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index_large) expected = Index( np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) ) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("dtype", ["int64", "uint64"]) def test_int_float_union_dtype(dtype): # https://github.com/pandas-dev/pandas/issues/26778 # [u]int | float -> float index = pd.Index([0, 2, 3], dtype=dtype) other = pd.Float64Index([0.5, 1.5]) expected = pd.Float64Index([0.0, 0.5, 1.5, 2.0, 3.0]) result = index.union(other) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) def test_range_float_union_dtype(): # https://github.com/pandas-dev/pandas/issues/26778 index = pd.RangeIndex(start=0, stop=3) other = pd.Float64Index([0.5, 1.5]) result = index.union(other) expected = pd.Float64Index([0.0, 0.5, 1, 1.5, 2.0]) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "box", [list, lambda x: np.array(x, dtype=object), lambda x: pd.Index(x, dtype=object)], ) def test_uint_index_does_not_convert_to_float64(box): # https://github.com/pandas-dev/pandas/issues/28279 # https://github.com/pandas-dev/pandas/issues/28023 series = pd.Series( [0, 1, 2, 3, 4, 5], index=[ 7606741985629028552, 17876870360202815256, 17876870360202815256, 13106359306506049338, 8991270399732411471, 8991270399732411472, ], ) result = series.loc[box([7606741985629028552, 17876870360202815256])] expected = UInt64Index( [7606741985629028552, 17876870360202815256, 17876870360202815256], dtype="uint64", ) tm.assert_index_equal(result.index, expected) tm.assert_equal(result, series[:3]) def test_float64_index_equals(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.equals(string_index) assert result is False result = string_index.equals(float_index) assert result is False def test_float64_index_difference(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.difference(string_index) tm.assert_index_equal(result, float_index) result = string_index.difference(float_index) tm.assert_index_equal(result, string_index) class TestGetSliceBounds: @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) def test_get_slice_bounds_within(self, kind, side, expected): index = Index(range(6)) result = index.get_slice_bound(4, kind=kind, side=side) assert result == expected @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) def test_get_slice_bounds_outside(self, kind, side, expected, bound): index = Index(range(6)) result = index.get_slice_bound(bound, kind=kind, side=side) assert result == expected
rs2/pandas
pandas/tests/indexes/test_numeric.py
pandas/io/clipboard/__init__.py
""" miscellaneous sorting / groupby utilities """ from collections import defaultdict from typing import ( TYPE_CHECKING, Callable, DefaultDict, Iterable, List, Optional, Tuple, Union, ) import numpy as np from pandas._libs import algos, hashtable, lib from pandas._libs.hashtable import unique_label_indices from pandas._typing import IndexKeyFunc from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algorithms from pandas.core.construction import extract_array if TYPE_CHECKING: from pandas.core.indexes.base import Index _INT64_MAX = np.iinfo(np.int64).max def get_indexer_indexer( target: "Index", level: Union[str, int, List[str], List[int]], ascending: bool, kind: str, na_position: str, sort_remaining: bool, key: IndexKeyFunc, ) -> Optional[np.array]: """ Helper method that return the indexer according to input parameters for the sort_index method of DataFrame and Series. Parameters ---------- target : Index level : int or level name or list of ints or list of level names ascending : bool or list of bools, default True kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' na_position : {'first', 'last'}, default 'last' sort_remaining : bool, default True key : callable, optional Returns ------- Optional[ndarray] The indexer for the new index. """ target = ensure_key_mapped(target, key, levels=level) target = target._sort_levels_monotonic() if level is not None: _, indexer = target.sortlevel( level, ascending=ascending, sort_remaining=sort_remaining ) elif isinstance(target, ABCMultiIndex): indexer = lexsort_indexer( target._get_codes_for_sorting(), orders=ascending, na_position=na_position ) else: # Check monotonic-ness before sort an index (GH 11080) if (ascending and target.is_monotonic_increasing) or ( not ascending and target.is_monotonic_decreasing ): return None indexer = nargsort( target, kind=kind, ascending=ascending, na_position=na_position ) return indexer def get_group_index(labels, shape, sort: bool, xnull: bool): """ For the particular label_list, gets the offsets into the hypothetical list representing the totally ordered cartesian product of all possible label combinations, *as long as* this space fits within int64 bounds; otherwise, though group indices identify unique combinations of labels, they cannot be deconstructed. - If `sort`, rank of returned ids preserve lexical ranks of labels. i.e. returned id's can be used to do lexical sort on labels; - If `xnull` nulls (-1 labels) are passed through. Parameters ---------- labels : sequence of arrays Integers identifying levels at each location shape : sequence of ints Number of unique levels at each location sort : bool If the ranks of returned ids should match lexical ranks of labels xnull : bool If true nulls are excluded. i.e. -1 values in the labels are passed through. Returns ------- An array of type int64 where two elements are equal if their corresponding labels are equal at all location. Notes ----- The length of `labels` and `shape` must be identical. """ def _int64_cut_off(shape) -> int: acc = 1 for i, mul in enumerate(shape): acc *= int(mul) if not acc < _INT64_MAX: return i return len(shape) def maybe_lift(lab, size): # promote nan values (assigned -1 label in lab array) # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) labels = map(ensure_int64, labels) if not xnull: labels, shape = map(list, zip(*map(maybe_lift, labels, shape))) labels = list(labels) shape = list(shape) # Iteratively process all the labels in chunks sized so less # than _INT64_MAX unique int ids will be required for each chunk while True: # how many levels can be done without overflow: nlev = _int64_cut_off(shape) # compute flat ids for the first `nlev` levels stride = np.prod(shape[1:nlev], dtype="i8") out = stride * labels[0].astype("i8", subok=False, copy=False) for i in range(1, nlev): if shape[i] == 0: stride = 0 else: stride //= shape[i] out += labels[i] * stride if xnull: # exclude nulls mask = labels[0] == -1 for lab in labels[1:nlev]: mask |= lab == -1 out[mask] = -1 if nlev == len(shape): # all levels done! break # compress what has been done so far in order to avoid overflow # to retain lexical ranks, obs_ids should be sorted comp_ids, obs_ids = compress_group_index(out, sort=sort) labels = [comp_ids] + labels[nlev:] shape = [len(obs_ids)] + shape[nlev:] return out def get_compressed_ids(labels, sizes): """ Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). Parameters ---------- labels : list of label arrays sizes : list of size of the levels Returns ------- tuple of (comp_ids, obs_group_ids) """ ids = get_group_index(labels, sizes, sort=True, xnull=False) return compress_group_index(ids, sort=True) def is_int64_overflow_possible(shape) -> bool: the_prod = 1 for x in shape: the_prod *= int(x) return the_prod >= _INT64_MAX def decons_group_index(comp_labels, shape): # reconstruct labels if is_int64_overflow_possible(shape): # at some point group indices are factorized, # and may not be deconstructed here! wrong path! raise ValueError("cannot deconstruct factorized group indices!") label_list = [] factor = 1 y = 0 x = comp_labels for i in reversed(range(len(shape))): labels = (x - y) % (factor * shape[i]) // factor np.putmask(labels, comp_labels < 0, -1) label_list.append(labels) y = labels * factor factor *= shape[i] return label_list[::-1] def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool): """ Reconstruct labels from observed group ids. Parameters ---------- xnull : bool If nulls are excluded; i.e. -1 labels are passed through. """ if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype="i8") shape = np.asarray(shape, dtype="i8") + lift if not is_int64_overflow_possible(shape): # obs ids are deconstructable! take the fast route! out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)] i = unique_label_indices(comp_ids) i8copy = lambda a: a.astype("i8", subok=False, copy=True) return [i8copy(lab[i]) for lab in labels] def indexer_from_factorized(labels, shape, compress: bool = True): ids = get_group_index(labels, shape, sort=True, xnull=False) if not compress: ngroups = (ids.size and ids.max()) + 1 else: ids, obs = compress_group_index(ids, sort=True) ngroups = len(obs) return get_group_index_sorter(ids, ngroups) def lexsort_indexer( keys, orders=None, na_position: str = "last", key: Optional[Callable] = None ): """ Performs lexical sorting on a set of keys Parameters ---------- keys : sequence of arrays Sequence of ndarrays to be sorted by the indexer orders : boolean or list of booleans, optional Determines the sorting order for each element in keys. If a list, it must be the same length as keys. This determines whether the corresponding element in keys should be sorted in ascending (True) or descending (False) order. if bool, applied to all elements as above. if None, defaults to True. na_position : {'first', 'last'}, default 'last' Determines placement of NA elements in the sorted list ("last" or "first") key : Callable, optional Callable key function applied to every element in keys before sorting .. versionadded:: 1.0.0 """ from pandas.core.arrays import Categorical labels = [] shape = [] if isinstance(orders, bool): orders = [orders] * len(keys) elif orders is None: orders = [True] * len(keys) keys = [ensure_key_mapped(k, key) for k in keys] for k, order in zip(keys, orders): cat = Categorical(k, ordered=True) if na_position not in ["last", "first"]: raise ValueError(f"invalid na_position: {na_position}") n = len(cat.categories) codes = cat.codes.copy() mask = cat.codes == -1 if order: # ascending if na_position == "last": codes = np.where(mask, n, codes) elif na_position == "first": codes += 1 else: # not order means descending if na_position == "last": codes = np.where(mask, n, n - codes - 1) elif na_position == "first": codes = np.where(mask, 0, n - codes) if mask.any(): n += 1 shape.append(n) labels.append(codes) return indexer_from_factorized(labels, shape) def nargsort( items, kind: str = "quicksort", ascending: bool = True, na_position: str = "last", key: Optional[Callable] = None, ): """ Intended to be a drop-in replacement for np.argsort which handles NaNs. Adds ascending, na_position, and key parameters. (GH #6399, #5231, #27237) Parameters ---------- kind : str, default 'quicksort' ascending : bool, default True na_position : {'first', 'last'}, default 'last' key : Optional[Callable], default None """ if key is not None: items = ensure_key_mapped(items, key) return nargsort( items, kind=kind, ascending=ascending, na_position=na_position, key=None ) items = extract_array(items) mask = np.asarray(isna(items)) if is_extension_array_dtype(items): items = items._values_for_argsort() else: items = np.asanyarray(items) idx = np.arange(len(items)) non_nans = items[~mask] non_nan_idx = idx[~mask] nan_idx = np.nonzero(mask)[0] if not ascending: non_nans = non_nans[::-1] non_nan_idx = non_nan_idx[::-1] indexer = non_nan_idx[non_nans.argsort(kind=kind)] if not ascending: indexer = indexer[::-1] # Finally, place the NaNs at the end or the beginning according to # na_position if na_position == "last": indexer = np.concatenate([indexer, nan_idx]) elif na_position == "first": indexer = np.concatenate([nan_idx, indexer]) else: raise ValueError(f"invalid na_position: {na_position}") return indexer def nargminmax(values, method: str): """ Implementation of np.argmin/argmax but for ExtensionArray and which handles missing values. Parameters ---------- values : ExtensionArray method : {"argmax", "argmin"} Returns ------- int """ assert method in {"argmax", "argmin"} func = np.argmax if method == "argmax" else np.argmin mask = np.asarray(isna(values)) values = values._values_for_argsort() idx = np.arange(len(values)) non_nans = values[~mask] non_nan_idx = idx[~mask] return non_nan_idx[func(non_nans)] def ensure_key_mapped_multiindex(index, key: Callable, level=None): """ Returns a new MultiIndex in which key has been applied to all levels specified in level (or all levels if level is None). Used for key sorting for MultiIndex. Parameters ---------- index : MultiIndex Index to which to apply the key function on the specified levels. key : Callable Function that takes an Index and returns an Index of the same shape. This key is applied to each level separately. The name of the level can be used to distinguish different levels for application. level : list-like, int or str, default None Level or list of levels to apply the key function to. If None, key function is applied to all levels. Other levels are left unchanged. Returns ------- labels : MultiIndex Resulting MultiIndex with modified levels. """ from pandas.core.indexes.api import MultiIndex if level is not None: if isinstance(level, (str, int)): sort_levels = [level] else: sort_levels = level sort_levels = [index._get_level_number(lev) for lev in sort_levels] else: sort_levels = list(range(index.nlevels)) # satisfies mypy mapped = [ ensure_key_mapped(index._get_level_values(level), key) if level in sort_levels else index._get_level_values(level) for level in range(index.nlevels) ] labels = MultiIndex.from_arrays(mapped) return labels def ensure_key_mapped(values, key: Optional[Callable], levels=None): """ Applies a callable key function to the values function and checks that the resulting value has the same shape. Can be called on Index subclasses, Series, DataFrames, or ndarrays. Parameters ---------- values : Series, DataFrame, Index subclass, or ndarray key : Optional[Callable], key to be called on the values array levels : Optional[List], if values is a MultiIndex, list of levels to apply the key to. """ from pandas.core.indexes.api import Index # noqa:F811 if not key: return values if isinstance(values, ABCMultiIndex): return ensure_key_mapped_multiindex(values, key, level=levels) result = key(values.copy()) if len(result) != len(values): raise ValueError( "User-provided `key` function must not change the shape of the array." ) try: if isinstance( values, Index ): # convert to a new Index subclass, not necessarily the same result = Index(result) else: type_of_values = type(values) result = type_of_values(result) # try to revert to original type otherwise except TypeError: raise TypeError( f"User-provided `key` function returned an invalid type {type(result)} \ which could not be converted to {type(values)}." ) return result def get_flattened_list( comp_ids: np.ndarray, ngroups: int, levels: Iterable["Index"], labels: Iterable[np.ndarray], ) -> List[Tuple]: """Map compressed group id -> key tuple.""" comp_ids = comp_ids.astype(np.int64, copy=False) arrays: DefaultDict[int, List[int]] = defaultdict(list) for labs, level in zip(labels, levels): table = hashtable.Int64HashTable(ngroups) table.map(comp_ids, labs.astype(np.int64, copy=False)) for i in range(ngroups): arrays[i].append(level[table.get_item(i)]) return [tuple(array) for array in arrays.values()] def get_indexer_dict(label_list, keys): """ Returns ------- dict Labels mapped to indexers. """ shape = [len(x) for x in keys] group_index = get_group_index(label_list, shape, sort=True, xnull=True) ngroups = ( ((group_index.size and group_index.max()) + 1) if is_int64_overflow_possible(shape) else np.prod(shape, dtype="i8") ) sorter = get_group_index_sorter(group_index, ngroups) sorted_labels = [lab.take(sorter) for lab in label_list] group_index = group_index.take(sorter) return lib.indices_fast(sorter, group_index, keys, sorted_labels) # ---------------------------------------------------------------------- # sorting levels...cleverly? def get_group_index_sorter(group_index, ngroups: int): """ algos.groupsort_indexer implements `counting sort` and it is at least O(ngroups), where ngroups = prod(shape) shape = map(len, keys) that is, linear in the number of combinations (cartesian product) of unique values of groupby keys. This can be huge when doing multi-key groupby. np.argsort(kind='mergesort') is O(count x log(count)) where count is the length of the data-frame; Both algorithms are `stable` sort and that is necessary for correctness of groupby operations. e.g. consider: df.groupby(key)[col].transform('first') """ count = len(group_index) alpha = 0.0 # taking complexities literally; there may be beta = 1.0 # some room for fine-tuning these parameters do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count))) if do_groupsort: sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) return ensure_platform_int(sorter) else: return group_index.argsort(kind="mergesort") def compress_group_index(group_index, sort: bool = True): """ Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). """ size_hint = min(len(group_index), hashtable.SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) group_index = ensure_int64(group_index) # note, group labels come out ascending (ie, 1,2,3 etc) comp_ids, obs_group_ids = table.get_labels_groupby(group_index) if sort and len(obs_group_ids) > 0: obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids) return comp_ids, obs_group_ids def _reorder_by_uniques(uniques, labels): # sorter is index where elements ought to go sorter = uniques.argsort() # reverse_indexer is where elements came from reverse_indexer = np.empty(len(sorter), dtype=np.int64) reverse_indexer.put(sorter, np.arange(len(sorter))) mask = labels < 0 # move labels to right locations (ie, unsort ascending labels) labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False) np.putmask(labels, mask, -1) # sort observed ids uniques = algorithms.take_nd(uniques, sorter, allow_fill=False) return uniques, labels
from datetime import datetime, timedelta import numpy as np import pytest from pandas._libs.tslibs import Timestamp import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index import pandas._testing as tm from pandas.tests.indexes.common import Base class Numeric(Base): def test_where(self): # Tested in numeric.test_indexing pass def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False def test_format(self): # GH35439 idx = self.create_index() max_width = max(len(str(x)) for x in idx) expected = [str(x).ljust(max_width) for x in idx] assert idx.format() == expected def test_numeric_compat(self): pass # override Base method def test_explicit_conversions(self): # GH 8608 # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype="int64")) # float conversions arr = np.arange(5, dtype="int64") * 3.2 expected = Float64Index(arr) fidx = idx * 3.2 tm.assert_index_equal(fidx, expected) fidx = 3.2 * idx tm.assert_index_equal(fidx, expected) # interops with numpy arrays expected = Float64Index(arr) a = np.zeros(5, dtype="float64") result = fidx - a tm.assert_index_equal(result, expected) expected = Float64Index(-arr) a = np.zeros(5, dtype="float64") result = a - fidx tm.assert_index_equal(result, expected) def test_index_groupby(self): int_idx = Index(range(6)) float_idx = Index(np.arange(0, 0.6, 0.1)) obj_idx = Index("A B C D E F".split()) dt_idx = pd.date_range("2013-01-01", freq="M", periods=6) for idx in [int_idx, float_idx, obj_idx, dt_idx]: to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) tm.assert_dict_equal( idx.groupby(to_groupby), {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]} ) to_groupby = Index( [ datetime(2011, 11, 1), datetime(2011, 12, 1), pd.NaT, pd.NaT, datetime(2011, 12, 1), datetime(2011, 11, 1), ], tz="UTC", ).values ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")] expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) def test_insert_na(self, nulls_fixture): # GH 18295 (test missing) index = self.create_index() if nulls_fixture is pd.NaT: expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) else: expected = Float64Index([index[0], np.nan] + list(index[1:])) result = index.insert(1, nulls_fixture) tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index @pytest.fixture( params=[ [1.5, 2, 3, 4, 5], [0.0, 2.5, 5.0, 7.5, 10.0], [5, 4, 3, 2, 1.5], [10.0, 7.5, 5.0, 2.5, 0.0], ], ids=["mixed", "float", "mixed_dec", "float_dec"], ) def index(self, request): return Float64Index(request.param) @pytest.fixture def mixed_index(self): return Float64Index([1.5, 2, 3, 4, 5]) @pytest.fixture def float_index(self): return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0]) def create_index(self) -> Float64Index: return Float64Index(np.arange(5, dtype="float64")) def test_repr_roundtrip(self, index): tm.assert_index_equal(eval(repr(index)), index) def check_is_index(self, i): assert isinstance(i, Index) assert not isinstance(i, Float64Index) def check_coerce(self, a, b, is_float_index=True): assert a.equals(b) tm.assert_index_equal(a, b, exact=False) if is_float_index: assert isinstance(b, Float64Index) else: self.check_is_index(b) def test_constructor(self): # explicit construction index = Float64Index([1, 2, 3, 4, 5]) assert isinstance(index, Float64Index) expected = np.array([1, 2, 3, 4, 5], dtype="float64") tm.assert_numpy_array_equal(index.values, expected) index = Float64Index(np.array([1, 2, 3, 4, 5])) assert isinstance(index, Float64Index) index = Float64Index([1.0, 2, 3, 4, 5]) assert isinstance(index, Float64Index) index = Float64Index(np.array([1.0, 2, 3, 4, 5])) assert isinstance(index, Float64Index) assert index.dtype == float index = Float64Index(np.array([1.0, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32) assert isinstance(index, Float64Index) assert index.dtype == np.float64 # nan handling result = Float64Index([np.nan, np.nan]) assert pd.isna(result.values).all() result = Float64Index(np.array([np.nan])) assert pd.isna(result.values).all() result = Index(np.array([np.nan])) assert pd.isna(result.values).all() @pytest.mark.parametrize( "index, dtype", [ (pd.Int64Index, "float64"), (pd.UInt64Index, "categorical"), (pd.Float64Index, "datetime64"), (pd.RangeIndex, "float64"), ], ) def test_invalid_dtype(self, index, dtype): # GH 29539 with pytest.raises( ValueError, match=rf"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}", ): index([1, 2, 3], dtype=dtype) def test_constructor_invalid(self): # invalid msg = ( r"Float64Index\(\.\.\.\) must be called with a collection of " r"some kind, 0\.0 was passed" ) with pytest.raises(TypeError, match=msg): Float64Index(0.0) msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) with pytest.raises(TypeError, match=msg): Float64Index(["a", "b", 0.0]) msg = r"float\(\) argument must be a string or a number, not 'Timestamp'" with pytest.raises(TypeError, match=msg): Float64Index([Timestamp("20130101")]) def test_constructor_coerce(self, mixed_index, float_index): self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) self.check_coerce(float_index, Index(np.arange(5) * 2.5)) self.check_coerce( float_index, Index(np.array(np.arange(5) * 2.5, dtype=object)) ) def test_constructor_explicit(self, mixed_index, float_index): # these don't auto convert self.check_coerce( float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False ) self.check_coerce( mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False ) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 msg = "Trying to coerce float values to integers" with pytest.raises(ValueError, match=msg): Index([1, 2, 3.5], dtype=any_int_dtype) def test_type_coercion_valid(self, float_dtype): # There is no Float32Index, so we always # generate Float64Index. i = Index([1, 2, 3.5], dtype=float_dtype) tm.assert_index_equal(i, Index([1, 2, 3.5])) def test_equals_numeric(self): i = Float64Index([1.0, 2.0]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, 2.0]) assert i.equals(i2) i = Float64Index([1.0, np.nan]) assert i.equals(i) assert i.identical(i) i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) @pytest.mark.parametrize( "other", ( Int64Index([1, 2]), Index([1.0, 2.0], dtype=object), Index([1, 2], dtype=object), ), ) def test_equals_numeric_other_index_type(self, other): i = Float64Index([1.0, 2.0]) assert i.equals(other) assert other.equals(i) @pytest.mark.parametrize( "vals", [ pd.date_range("2016-01-01", periods=3), pd.timedelta_range("1 Day", periods=3), ], ) def test_lookups_datetimelike_values(self, vals): # If we have datetime64 or timedelta64 values, make sure they are # wrappped correctly GH#31163 ser = pd.Series(vals, index=range(3, 6)) ser.index = ser.index.astype("float64") expected = vals[1] with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4.0) assert isinstance(result, type(expected)) and result == expected with tm.assert_produces_warning(FutureWarning): result = ser.index.get_value(ser, 4) assert isinstance(result, type(expected)) and result == expected result = ser[4.0] assert isinstance(result, type(expected)) and result == expected result = ser[4] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4.0] assert isinstance(result, type(expected)) and result == expected result = ser.loc[4] assert isinstance(result, type(expected)) and result == expected result = ser.at[4.0] assert isinstance(result, type(expected)) and result == expected # GH#31329 .at[4] should cast to 4.0, matching .loc behavior result = ser.at[4] assert isinstance(result, type(expected)) and result == expected result = ser.iloc[1] assert isinstance(result, type(expected)) and result == expected result = ser.iat[1] assert isinstance(result, type(expected)) and result == expected def test_doesnt_contain_all_the_things(self): i = Float64Index([np.nan]) assert not i.isin([0]).item() assert not i.isin([1]).item() assert i.isin([np.nan]).item() def test_nan_multiple_containment(self): i = Float64Index([1.0, np.nan]) tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False])) tm.assert_numpy_array_equal(i.isin([2.0, np.pi]), np.array([False, False])) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(i.isin([1.0, np.nan]), np.array([True, True])) i = Float64Index([1.0, 2.0]) tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False])) def test_fillna_float64(self): # GH 11343 idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") # can't downcast exp = Index([1.0, 0.1, 3.0], name="x") tm.assert_index_equal(idx.fillna(0.1), exp) # downcast exp = Float64Index([1.0, 2.0, 3.0], name="x") tm.assert_index_equal(idx.fillna(2), exp) # object exp = Index([1.0, "obj", 3.0], name="x") tm.assert_index_equal(idx.fillna("obj"), exp) class NumericInt(Numeric): def test_view(self): i = self._holder([], name="Foo") i_view = i.view() assert i_view.name == "Foo" i_view = i.view(self._dtype) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) i_view = i.view(self._holder) tm.assert_index_equal(i, self._holder(i_view, name="Foo")) def test_is_monotonic(self): index = self._holder([1, 2, 3, 4]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is True assert index.is_monotonic_decreasing is False assert index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is True index = self._holder([1]) assert index.is_monotonic is True assert index.is_monotonic_increasing is True assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_increasing is True assert index._is_strictly_monotonic_decreasing is True def test_is_strictly_monotonic(self): index = self._holder([1, 1, 2, 3]) assert index.is_monotonic_increasing is True assert index._is_strictly_monotonic_increasing is False index = self._holder([3, 2, 1, 1]) assert index.is_monotonic_decreasing is True assert index._is_strictly_monotonic_decreasing is False index = self._holder([1, 1]) assert index.is_monotonic_increasing assert index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_identical(self): index = self.create_index() i = Index(index.copy()) assert i.identical(index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) i = index.astype(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) assert same_values.identical(i) assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype)) def test_union_noncomparable(self): # corner case, non-Int64Index index = self.create_index() other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) result = index.union(other) expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) result = other.union(index) expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): msg = ( "String dtype not supported, " "you may need to explicitly cast to a numeric type" ) # can't data = ["foo", "bar", "baz"] with pytest.raises(TypeError, match=msg): self._holder(data) # shouldn't data = ["0", "1", "2"] with pytest.raises(TypeError, match=msg): self._holder(data) def test_view_index(self): index = self.create_index() index.view(Index) def test_prevent_casting(self): index = self.create_index() result = index.astype("O") assert result.dtype == np.object_ class TestInt64Index(NumericInt): _dtype = "int64" _holder = Int64Index @pytest.fixture( params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] ) def index(self, request): return Int64Index(request.param) def create_index(self) -> Int64Index: # return Int64Index(np.arange(5, dtype="int64")) return Int64Index(range(0, 20, 2)) def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) expected = Index([-5, 0, 1, 2], dtype=np.int64) tm.assert_index_equal(index, expected) # from iterable index = Int64Index(iter([-5, 0, 1, 2])) tm.assert_index_equal(index, expected) # scalar raise Exception msg = ( r"Int64Index\(\.\.\.\) must be called with a collection of some " "kind, 5 was passed" ) with pytest.raises(TypeError, match=msg): Int64Index(5) # copy arr = index.values new_index = Int64Index(arr, copy=True) tm.assert_index_equal(new_index, index) val = arr[0] + 3000 # this should not change index arr[0] = val assert new_index[0] != val # interpret list-like expected = Int64Index([5, 0]) for cls in [Index, Int64Index]: for idx in [ cls([5, 0], dtype="int64"), cls(np.array([5, 0]), dtype="int64"), cls(Series([5, 0]), dtype="int64"), ]: tm.assert_index_equal(idx, expected) def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = Int64Index(arr) assert index.values.dtype == np.int64 tm.assert_index_equal(index, Index(arr)) # preventing casting arr = np.array([1, "2", 3, "4"], dtype=object) with pytest.raises(TypeError, match="casting"): Int64Index(arr) arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] with pytest.raises(TypeError, match="casting"): Int64Index(arr_with_floats) def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) def test_constructor_unwraps_index(self): idx = pd.Index([1, 2]) result = pd.Int64Index(idx) expected = np.array([1, 2], dtype="int64") tm.assert_numpy_array_equal(result._data, expected) def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) assert isinstance(arr, Int64Index) # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) assert isinstance(arr, Index) def test_intersection(self): index = self.create_index() other = Index([1, 2, 3, 4, 5]) result = index.intersection(other) expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index) expected = Index( np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) class TestUInt64Index(NumericInt): _dtype = "uint64" _holder = UInt64Index @pytest.fixture( params=[ [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25], [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63], ], ids=["index_inc", "index_dec"], ) def index(self, request): return UInt64Index(request.param) @pytest.fixture def index_large(self): # large values used in TestUInt64Index where no compat needed with Int64/Float64 large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] return UInt64Index(large) def create_index(self) -> UInt64Index: # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests return UInt64Index(np.arange(5, dtype="uint64")) def test_constructor(self): idx = UInt64Index([1, 2, 3]) res = Index([1, 2, 3], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63], dtype=np.uint64) tm.assert_index_equal(res, idx) idx = UInt64Index([1, 2 ** 63]) res = Index([1, 2 ** 63]) tm.assert_index_equal(res, idx) idx = Index([-1, 2 ** 63], dtype=object) res = Index(np.array([-1, 2 ** 63], dtype=object)) tm.assert_index_equal(res, idx) # https://github.com/pandas-dev/pandas/issues/29526 idx = UInt64Index([1, 2 ** 63 + 1], dtype=np.uint64) res = Index([1, 2 ** 63 + 1], dtype=np.uint64) tm.assert_index_equal(res, idx) def test_intersection(self, index_large): other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20]) result = index_large.intersection(other) expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) tm.assert_index_equal(result, expected) result = other.intersection(index_large) expected = Index( np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) ) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("dtype", ["int64", "uint64"]) def test_int_float_union_dtype(dtype): # https://github.com/pandas-dev/pandas/issues/26778 # [u]int | float -> float index = pd.Index([0, 2, 3], dtype=dtype) other = pd.Float64Index([0.5, 1.5]) expected = pd.Float64Index([0.0, 0.5, 1.5, 2.0, 3.0]) result = index.union(other) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) def test_range_float_union_dtype(): # https://github.com/pandas-dev/pandas/issues/26778 index = pd.RangeIndex(start=0, stop=3) other = pd.Float64Index([0.5, 1.5]) result = index.union(other) expected = pd.Float64Index([0.0, 0.5, 1, 1.5, 2.0]) tm.assert_index_equal(result, expected) result = other.union(index) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "box", [list, lambda x: np.array(x, dtype=object), lambda x: pd.Index(x, dtype=object)], ) def test_uint_index_does_not_convert_to_float64(box): # https://github.com/pandas-dev/pandas/issues/28279 # https://github.com/pandas-dev/pandas/issues/28023 series = pd.Series( [0, 1, 2, 3, 4, 5], index=[ 7606741985629028552, 17876870360202815256, 17876870360202815256, 13106359306506049338, 8991270399732411471, 8991270399732411472, ], ) result = series.loc[box([7606741985629028552, 17876870360202815256])] expected = UInt64Index( [7606741985629028552, 17876870360202815256, 17876870360202815256], dtype="uint64", ) tm.assert_index_equal(result.index, expected) tm.assert_equal(result, series[:3]) def test_float64_index_equals(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.equals(string_index) assert result is False result = string_index.equals(float_index) assert result is False def test_float64_index_difference(): # https://github.com/pandas-dev/pandas/issues/35217 float_index = pd.Index([1.0, 2, 3]) string_index = pd.Index(["1", "2", "3"]) result = float_index.difference(string_index) tm.assert_index_equal(result, float_index) result = string_index.difference(float_index) tm.assert_index_equal(result, string_index) class TestGetSliceBounds: @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) def test_get_slice_bounds_within(self, kind, side, expected): index = Index(range(6)) result = index.get_slice_bound(4, kind=kind, side=side) assert result == expected @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) def test_get_slice_bounds_outside(self, kind, side, expected, bound): index = Index(range(6)) result = index.get_slice_bound(bound, kind=kind, side=side) assert result == expected
rs2/pandas
pandas/tests/indexes/test_numeric.py
pandas/core/sorting.py
from .path_histogram import PathHistogram, PathDensityHistogram from .channel_analysis import ChannelAnalysis from .replica_network import ReplicaNetwork, ReplicaNetworkGraph from .shooting_point_analysis import ShootingPointAnalysis from . import tis from . import tools
""" @author David W.H. Swenson @author Jan-Hendrik Prinz """ from __future__ import absolute_import from builtins import zip from builtins import range from builtins import object import os import pytest from nose.tools import (assert_equal) import openpathsampling as paths import openpathsampling.engines.openmm as peng import openpathsampling.engines.toy as toys from openpathsampling.netcdfplus import ObjectJSON from openpathsampling.storage import Storage from .test_helpers import (data_filename, md, compare_snapshot) import numpy as np from nose.plugins.skip import SkipTest class TestStorage(object): def setup(self): if not md: raise SkipTest("mdtraj not installed") self.mdtraj = md.load(data_filename("ala_small_traj.pdb")) _ = pytest.importorskip('simtk.unit') self.traj = peng.trajectory_from_mdtraj( self.mdtraj, simple_topology=True) self.filename = data_filename("storage_test.nc") self.filename_clone = data_filename("storage_test_clone.nc") self.simplifier = ObjectJSON() self.template_snapshot = self.traj[0] self.solute_indices = list(range(22)) self.toy_topology = toys.Topology( n_spatial=2, masses=[1.0, 1.0], pes=None ) self.engine = toys.Engine({}, self.toy_topology) self.toy_template = toys.Snapshot( coordinates=np.array([[-0.5, -0.5]]), velocities=np.array([[0.0,0.0]]), engine=self.engine ) def teardown(self): if os.path.isfile(self.filename): os.remove(self.filename) if os.path.isfile(self.filename_clone): os.remove(self.filename_clone) def test_create_storage(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(data_filename("storage_test.nc"))) store.close() def test_stored_topology(self): raise SkipTest store = Storage( filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.close() store = Storage(filename=self.filename, mode='a') loaded_topology = store.template.topology # check if path topologies have the same JSON string # this also tests the simplifier for topologies assert_equal( self.simplifier.to_json(self.template_snapshot.topology), self.simplifier.to_json(loaded_topology) ) store.close() def test_safemode(self): fname = data_filename("cv_storage_safemode_test.nc") if os.path.isfile(fname): os.remove(fname) cv = paths.CoordinateFunctionCV('cv', lambda x: x) traj = paths.Trajectory(list(self.traj)) template = traj[0] storage_w = paths.Storage(fname, "w") storage_w.snapshots.save(template) storage_w.cvs.save(cv) storage_w.close() storage_r = paths.Storage(fname, 'r') # default safemode = False assert(storage_r.simplifier.safemode is False) cv_r = storage_r.cvs[0] assert(cv_r == cv) assert(cv.cv_callable is not None) storage_r.close() storage_r = paths.Storage(fname, 'r') storage_r.simplifier.safemode = True cv_r = storage_r.cvs[0] assert(cv_r == cv) assert(cv_r.cv_callable is None) storage_r.close() def test_store_snapshots(self): fname = data_filename("cv_storage_test.nc") if os.path.isfile(fname): os.remove(fname) traj = paths.Trajectory(list(self.traj)) template = traj[0] for use_cache in (False, True): # print '==========================================================' # print 'UUID', use_uuid, 'CACHE', use_cache # print '==========================================================' storage_w = paths.Storage(fname, "w") storage_w.snapshots.save(template) # let's mess up the order in which we save and include # reversed ones as well assert(len(storage_w.snapshots) == 2) assert(len(storage_w.trajectories) == 0) assert(len(storage_w.stores['snapshot0']) == 2) storage_w.snapshots.save(traj[8].reversed) assert(len(storage_w.snapshots) == 4) assert(len(storage_w.trajectories) == 0) assert(len(storage_w.stores['snapshot0']) == 4) # this will store traj[6:] under pos IDX #0 storage_w.trajectories.save(traj[6:]) assert(len(storage_w.snapshots) == 10) assert(len(storage_w.trajectories) == 1) assert(len(storage_w.stores['snapshot0']) == 10) traj_rev = traj.reversed # this will store traj_rev under pos IDX #1 storage_w.trajectories.mention(traj_rev) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 2) assert(len(storage_w.stores['snapshot0']) == 10) # this will not do anything since traj is already saved storage_w.trajectories.save(traj_rev) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 2) assert(len(storage_w.stores['snapshot0']) == 10) # this will store traj under pos IDX #2 storage_w.trajectories.save(traj) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 3) assert(len(storage_w.stores['snapshot0']) == 20) # this will not store since traj is already stored storage_w.trajectories.save(traj) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 3) assert(len(storage_w.stores['snapshot0']) == 20) # we saved in this order [0f, 8r, 6f, 7f, 9f, 5r, 4r, 3r, 2r, 1r ] # these are indices [ 0, 17, 12, 14, 18, 3, 5, 7, 9, 11 ] storage_w.close() if use_cache: storage_r = paths.AnalysisStorage(fname) else: storage_r = paths.Storage(fname, 'r') storage_r.snapshots.set_caching(False) storage_r.stores['snapshot0'].set_caching(False) # check if the loaded trajectory is reproduced for s1, s2 in zip(traj, storage_r.trajectories[2]): compare_snapshot(s1, s2, True) # this is the expected order in which it is saved eff_traj = [ traj[0], traj[8].reversed, traj[6], traj[7], traj[9], traj[5].reversed, traj[4].reversed, traj[3].reversed, traj[2].reversed, traj[1].reversed, ] # load from hidden and see, if the hidden store looks as expected # we open every second snapshot from the hidden store because the # ones in between correspond to the reversed ones hidden_snapshots = storage_r.stores['snapshot0'][:] for idx in range(10): s1 = eff_traj[idx] s1r = s1.reversed s2 = hidden_snapshots[2 * idx] s2r = hidden_snapshots[2 * idx + 1] compare_snapshot(s1, s2, True) compare_snapshot(s1r, s2r, True) storage_r.close() def test_load_save(self): for use_uuid in [True]: store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.template_snapshot) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[0] loaded_r = store.snapshots[1] compare_snapshot(loaded_template, self.template_snapshot, True) compare_snapshot( loaded_template.reversed, self.template_snapshot.reversed, True) compare_snapshot(loaded_r, self.template_snapshot.reversed) store.close() def test_proxy(self): for use_uuid in [True]: store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) tm = self.template_snapshot store.save(tm) px = store.snapshots.proxy(store.snapshots.index.list[0]) # make sure that the proxy and assert(hash(px) == hash(tm)) assert(px == tm) store.snapshots.cache.clear() s0 = store.snapshots[0] assert(hash(px) == hash(s0)) assert(px == s0) compare_snapshot(px, tm) compare_snapshot(s0, tm) px = store.snapshots.proxy(store.snapshots.index.list[0]) # make sure that after reloading it still works assert(hash(px) == hash(tm)) assert(px == tm) store.close() store = Storage(filename=self.filename, mode='a') s1 = store.snapshots[0] store.close() # when loading only for uuid based storages you get the same id assert((hash(px) == hash(s1)) is use_uuid) assert((px == s1) is use_uuid) def test_mention_only(self): storage_w = paths.Storage(self.filename, "w") template = self.template_snapshot storage_w.snapshots.add_type(template) test_snap = self.traj[2] # only touch a new snapshot storage_w.snapshots.only_mention = True storage_w.snapshots.save(test_snap) # check that the snapshot is there assert(len(storage_w.snapshots) == 4) # in the memory uuid index assert(test_snap.__uuid__ in storage_w.snapshots.index) # and stored assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][1]) # but no real snapshot has been stored # print len(storage_w.objects['snapshot0']) assert(len(storage_w.objects['snapshot0']) == 2) # switch on normal saving storage_w.snapshots.only_mention = False test_snap = self.traj[4] storage_w.snapshots.mention(test_snap) # check that the snapshot is there assert(len(storage_w.snapshots) == 6) # in the memory uuid index assert(test_snap.__uuid__ in storage_w.snapshots.index) # and stored assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][2]) # but no real snapshot has been stored assert(len(storage_w.objects['snapshot0']) == 2) # try to now add it storage_w.snapshots.save(test_snap) # check that the snapshot is not stored again (only 3 snapshots) assert(len(storage_w.snapshots) == 6) assert(len(storage_w.objects['snapshot0']) == 4) # print storage_w.objects['snapshot0'][1].coordinates # print template.coordinates # print storage_w.objects['snapshot0'][0].coordinates # print test_snap.coordinates # print storage_w.objects['snapshot0'].vars['statics'][0].coordinates # print storage_w.objects['snapshot0'].vars['statics'][1].coordinates # print storage_w.objects['snapshot0'].index compare_snapshot(storage_w.objects['snapshot0'][4], test_snap) storage_w.close() def test_load_save_uuid(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.template_snapshot) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[self.template_snapshot.__uuid__] loaded_r = store.snapshots[self.template_snapshot.reversed.__uuid__] compare_snapshot(loaded_template, self.template_snapshot, True) compare_snapshot( loaded_template.reversed, self.template_snapshot.reversed, True) compare_snapshot(loaded_r, self.template_snapshot.reversed) store.close() def test_load_save_toy(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.toy_template) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[0] loaded_r = store.snapshots[1] compare_snapshot(loaded_template, self.toy_template, True) compare_snapshot( loaded_template.reversed, self.toy_template.reversed, True) compare_snapshot(loaded_r, self.toy_template.reversed) store.close() def test_reverse_bug(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.snapshots.save(self.template_snapshot) rev = self.template_snapshot.reversed # save the reversed one store.snapshots.save(rev) # check that the reversed one has index 1 and not 3! assert(store.idx(rev) == 1) # and we have exactly one snapshot assert(len(store.snapshots) == 2) assert(len(store.dimensions['snapshots']) == 1) store.close() def test_version(self): store = Storage( filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) assert(store.storage_version == paths.version.version)
openpathsampling/openpathsampling
openpathsampling/tests/test_storage.py
openpathsampling/analysis/__init__.py
import math import numpy as np from openpathsampling.netcdfplus import StorableNamedObject class ToyIntegrator(StorableNamedObject): """ Abstract base class for toy engine integrators. """ def __init__(self): super(StorableNamedObject, self).__init__() class LeapfrogVerletIntegrator(ToyIntegrator): """Leapfrog Verlet integrator Not for actual use, but the momentum and position update functions are used in other integrators, so we inherit from this. Parameters ---------- dt : float time step """ dd = None def __init__(self, dt): super(LeapfrogVerletIntegrator, self).__init__() self.dt = dt def _momentum_update(self, sys, mydt): sys.velocities -= sys.pes.dVdx(sys)*sys._minv*mydt def _position_update(self, sys, mydt): sys.positions += sys.velocities * mydt def step(self, sys): """ Take an MD step. Update in-place. Parameters ---------- sys : :class:`.ToyEngine` engine contains its state, including velocities and masses """ self._position_update(sys, 0.5*self.dt) self._momentum_update(sys, self.dt) self._position_update(sys, 0.5*self.dt) class LangevinBAOABIntegrator(LeapfrogVerletIntegrator): """Langevin integrator for simple toy models Implementation of the BAOAB integrator of Leimkuhler and Matthews [1]_. In particular, see the appendix on p.54 of that reference, which is where we take our notation from. Parameters ---------- dt : float time step temperature : float gamma : float friction constant for the Langevin equation Attributes ---------- beta : float inverse temperature c1 : float c1 parameter from Leimkuhler and Matthews c2 : float c2 parameter from Leimkuhler and Matthews c3 : float c3 parameter from Leimkuhler and Matthews References ---------- .. [1] B. Leimkuhler and C. Matthews. "Rational Construction of Stochastic Numerical Methods for Molecular Sampling." Appl. Math. Res. Express, 2013, 34-56 (2013). doi:10.1093/amrx/abs010 """ def __init__(self, dt, temperature, gamma): super(LangevinBAOABIntegrator, self).__init__(dt) self._beta = 1.0 / temperature self._c1 = math.exp(-gamma*dt) self._c2 = (1.0-self._c1)/gamma self._c3 = math.sqrt((1.0 - self._c1*self._c1) / self._beta) self.temperature = temperature self.gamma = gamma @property def beta(self): return self._beta @property def c1(self): return self._c1 @property def c2(self): return self._c2 @property def c3(self): return self._c3 def _OU_update(self, sys, mydt): R = np.random.normal(size=len(sys.velocities)) sys.velocities = (self._c1 * sys.velocities + self._c3 * np.sqrt(sys._minv) * R) def step(self, sys): """ Take an MD step. Update in-place. Parameters ---------- sys : :class:`.ToyEngine` engine contains its state, including velocities and masses """ self._momentum_update(sys, 0.5*self.dt) self._position_update(sys, 0.5*self.dt) self._OU_update(sys, self.dt) self._position_update(sys, 0.5*self.dt) self._momentum_update(sys, 0.5*self.dt)
""" @author David W.H. Swenson @author Jan-Hendrik Prinz """ from __future__ import absolute_import from builtins import zip from builtins import range from builtins import object import os import pytest from nose.tools import (assert_equal) import openpathsampling as paths import openpathsampling.engines.openmm as peng import openpathsampling.engines.toy as toys from openpathsampling.netcdfplus import ObjectJSON from openpathsampling.storage import Storage from .test_helpers import (data_filename, md, compare_snapshot) import numpy as np from nose.plugins.skip import SkipTest class TestStorage(object): def setup(self): if not md: raise SkipTest("mdtraj not installed") self.mdtraj = md.load(data_filename("ala_small_traj.pdb")) _ = pytest.importorskip('simtk.unit') self.traj = peng.trajectory_from_mdtraj( self.mdtraj, simple_topology=True) self.filename = data_filename("storage_test.nc") self.filename_clone = data_filename("storage_test_clone.nc") self.simplifier = ObjectJSON() self.template_snapshot = self.traj[0] self.solute_indices = list(range(22)) self.toy_topology = toys.Topology( n_spatial=2, masses=[1.0, 1.0], pes=None ) self.engine = toys.Engine({}, self.toy_topology) self.toy_template = toys.Snapshot( coordinates=np.array([[-0.5, -0.5]]), velocities=np.array([[0.0,0.0]]), engine=self.engine ) def teardown(self): if os.path.isfile(self.filename): os.remove(self.filename) if os.path.isfile(self.filename_clone): os.remove(self.filename_clone) def test_create_storage(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(data_filename("storage_test.nc"))) store.close() def test_stored_topology(self): raise SkipTest store = Storage( filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.close() store = Storage(filename=self.filename, mode='a') loaded_topology = store.template.topology # check if path topologies have the same JSON string # this also tests the simplifier for topologies assert_equal( self.simplifier.to_json(self.template_snapshot.topology), self.simplifier.to_json(loaded_topology) ) store.close() def test_safemode(self): fname = data_filename("cv_storage_safemode_test.nc") if os.path.isfile(fname): os.remove(fname) cv = paths.CoordinateFunctionCV('cv', lambda x: x) traj = paths.Trajectory(list(self.traj)) template = traj[0] storage_w = paths.Storage(fname, "w") storage_w.snapshots.save(template) storage_w.cvs.save(cv) storage_w.close() storage_r = paths.Storage(fname, 'r') # default safemode = False assert(storage_r.simplifier.safemode is False) cv_r = storage_r.cvs[0] assert(cv_r == cv) assert(cv.cv_callable is not None) storage_r.close() storage_r = paths.Storage(fname, 'r') storage_r.simplifier.safemode = True cv_r = storage_r.cvs[0] assert(cv_r == cv) assert(cv_r.cv_callable is None) storage_r.close() def test_store_snapshots(self): fname = data_filename("cv_storage_test.nc") if os.path.isfile(fname): os.remove(fname) traj = paths.Trajectory(list(self.traj)) template = traj[0] for use_cache in (False, True): # print '==========================================================' # print 'UUID', use_uuid, 'CACHE', use_cache # print '==========================================================' storage_w = paths.Storage(fname, "w") storage_w.snapshots.save(template) # let's mess up the order in which we save and include # reversed ones as well assert(len(storage_w.snapshots) == 2) assert(len(storage_w.trajectories) == 0) assert(len(storage_w.stores['snapshot0']) == 2) storage_w.snapshots.save(traj[8].reversed) assert(len(storage_w.snapshots) == 4) assert(len(storage_w.trajectories) == 0) assert(len(storage_w.stores['snapshot0']) == 4) # this will store traj[6:] under pos IDX #0 storage_w.trajectories.save(traj[6:]) assert(len(storage_w.snapshots) == 10) assert(len(storage_w.trajectories) == 1) assert(len(storage_w.stores['snapshot0']) == 10) traj_rev = traj.reversed # this will store traj_rev under pos IDX #1 storage_w.trajectories.mention(traj_rev) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 2) assert(len(storage_w.stores['snapshot0']) == 10) # this will not do anything since traj is already saved storage_w.trajectories.save(traj_rev) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 2) assert(len(storage_w.stores['snapshot0']) == 10) # this will store traj under pos IDX #2 storage_w.trajectories.save(traj) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 3) assert(len(storage_w.stores['snapshot0']) == 20) # this will not store since traj is already stored storage_w.trajectories.save(traj) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 3) assert(len(storage_w.stores['snapshot0']) == 20) # we saved in this order [0f, 8r, 6f, 7f, 9f, 5r, 4r, 3r, 2r, 1r ] # these are indices [ 0, 17, 12, 14, 18, 3, 5, 7, 9, 11 ] storage_w.close() if use_cache: storage_r = paths.AnalysisStorage(fname) else: storage_r = paths.Storage(fname, 'r') storage_r.snapshots.set_caching(False) storage_r.stores['snapshot0'].set_caching(False) # check if the loaded trajectory is reproduced for s1, s2 in zip(traj, storage_r.trajectories[2]): compare_snapshot(s1, s2, True) # this is the expected order in which it is saved eff_traj = [ traj[0], traj[8].reversed, traj[6], traj[7], traj[9], traj[5].reversed, traj[4].reversed, traj[3].reversed, traj[2].reversed, traj[1].reversed, ] # load from hidden and see, if the hidden store looks as expected # we open every second snapshot from the hidden store because the # ones in between correspond to the reversed ones hidden_snapshots = storage_r.stores['snapshot0'][:] for idx in range(10): s1 = eff_traj[idx] s1r = s1.reversed s2 = hidden_snapshots[2 * idx] s2r = hidden_snapshots[2 * idx + 1] compare_snapshot(s1, s2, True) compare_snapshot(s1r, s2r, True) storage_r.close() def test_load_save(self): for use_uuid in [True]: store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.template_snapshot) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[0] loaded_r = store.snapshots[1] compare_snapshot(loaded_template, self.template_snapshot, True) compare_snapshot( loaded_template.reversed, self.template_snapshot.reversed, True) compare_snapshot(loaded_r, self.template_snapshot.reversed) store.close() def test_proxy(self): for use_uuid in [True]: store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) tm = self.template_snapshot store.save(tm) px = store.snapshots.proxy(store.snapshots.index.list[0]) # make sure that the proxy and assert(hash(px) == hash(tm)) assert(px == tm) store.snapshots.cache.clear() s0 = store.snapshots[0] assert(hash(px) == hash(s0)) assert(px == s0) compare_snapshot(px, tm) compare_snapshot(s0, tm) px = store.snapshots.proxy(store.snapshots.index.list[0]) # make sure that after reloading it still works assert(hash(px) == hash(tm)) assert(px == tm) store.close() store = Storage(filename=self.filename, mode='a') s1 = store.snapshots[0] store.close() # when loading only for uuid based storages you get the same id assert((hash(px) == hash(s1)) is use_uuid) assert((px == s1) is use_uuid) def test_mention_only(self): storage_w = paths.Storage(self.filename, "w") template = self.template_snapshot storage_w.snapshots.add_type(template) test_snap = self.traj[2] # only touch a new snapshot storage_w.snapshots.only_mention = True storage_w.snapshots.save(test_snap) # check that the snapshot is there assert(len(storage_w.snapshots) == 4) # in the memory uuid index assert(test_snap.__uuid__ in storage_w.snapshots.index) # and stored assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][1]) # but no real snapshot has been stored # print len(storage_w.objects['snapshot0']) assert(len(storage_w.objects['snapshot0']) == 2) # switch on normal saving storage_w.snapshots.only_mention = False test_snap = self.traj[4] storage_w.snapshots.mention(test_snap) # check that the snapshot is there assert(len(storage_w.snapshots) == 6) # in the memory uuid index assert(test_snap.__uuid__ in storage_w.snapshots.index) # and stored assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][2]) # but no real snapshot has been stored assert(len(storage_w.objects['snapshot0']) == 2) # try to now add it storage_w.snapshots.save(test_snap) # check that the snapshot is not stored again (only 3 snapshots) assert(len(storage_w.snapshots) == 6) assert(len(storage_w.objects['snapshot0']) == 4) # print storage_w.objects['snapshot0'][1].coordinates # print template.coordinates # print storage_w.objects['snapshot0'][0].coordinates # print test_snap.coordinates # print storage_w.objects['snapshot0'].vars['statics'][0].coordinates # print storage_w.objects['snapshot0'].vars['statics'][1].coordinates # print storage_w.objects['snapshot0'].index compare_snapshot(storage_w.objects['snapshot0'][4], test_snap) storage_w.close() def test_load_save_uuid(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.template_snapshot) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[self.template_snapshot.__uuid__] loaded_r = store.snapshots[self.template_snapshot.reversed.__uuid__] compare_snapshot(loaded_template, self.template_snapshot, True) compare_snapshot( loaded_template.reversed, self.template_snapshot.reversed, True) compare_snapshot(loaded_r, self.template_snapshot.reversed) store.close() def test_load_save_toy(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.toy_template) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[0] loaded_r = store.snapshots[1] compare_snapshot(loaded_template, self.toy_template, True) compare_snapshot( loaded_template.reversed, self.toy_template.reversed, True) compare_snapshot(loaded_r, self.toy_template.reversed) store.close() def test_reverse_bug(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.snapshots.save(self.template_snapshot) rev = self.template_snapshot.reversed # save the reversed one store.snapshots.save(rev) # check that the reversed one has index 1 and not 3! assert(store.idx(rev) == 1) # and we have exactly one snapshot assert(len(store.snapshots) == 2) assert(len(store.dimensions['snapshots']) == 1) store.close() def test_version(self): store = Storage( filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) assert(store.storage_version == paths.version.version)
openpathsampling/openpathsampling
openpathsampling/tests/test_storage.py
openpathsampling/engines/toy/integrators.py
from __future__ import absolute_import import os from openpathsampling.storage import Storage import openpathsampling.engines.openmm as peng try: import mdtraj as md except ImportError: md = None from .test_helpers import data_filename def setup_package(): if not md: return # this should generate the ala_small_traj.nc file which we'll use for # everything else # NOTE: tests using this must be skipped if there's no mdtraj! mdtrajectory = md.load(data_filename("ala_small_traj.pdb")) snapshot = peng.snapshot_from_pdb(data_filename("ala_small_traj.pdb")) storage = Storage( filename=data_filename("ala_small_traj.nc"), template=snapshot, mode='w' ) mytraj = peng.trajectory_from_mdtraj(mdtrajectory, simple_topology=True) storage.trajectories.save(mytraj) storage.close() def teardown_package(): if os.path.isfile(data_filename("ala_small_traj.nc")): os.remove(data_filename("ala_small_traj.nc"))
""" @author David W.H. Swenson @author Jan-Hendrik Prinz """ from __future__ import absolute_import from builtins import zip from builtins import range from builtins import object import os import pytest from nose.tools import (assert_equal) import openpathsampling as paths import openpathsampling.engines.openmm as peng import openpathsampling.engines.toy as toys from openpathsampling.netcdfplus import ObjectJSON from openpathsampling.storage import Storage from .test_helpers import (data_filename, md, compare_snapshot) import numpy as np from nose.plugins.skip import SkipTest class TestStorage(object): def setup(self): if not md: raise SkipTest("mdtraj not installed") self.mdtraj = md.load(data_filename("ala_small_traj.pdb")) _ = pytest.importorskip('simtk.unit') self.traj = peng.trajectory_from_mdtraj( self.mdtraj, simple_topology=True) self.filename = data_filename("storage_test.nc") self.filename_clone = data_filename("storage_test_clone.nc") self.simplifier = ObjectJSON() self.template_snapshot = self.traj[0] self.solute_indices = list(range(22)) self.toy_topology = toys.Topology( n_spatial=2, masses=[1.0, 1.0], pes=None ) self.engine = toys.Engine({}, self.toy_topology) self.toy_template = toys.Snapshot( coordinates=np.array([[-0.5, -0.5]]), velocities=np.array([[0.0,0.0]]), engine=self.engine ) def teardown(self): if os.path.isfile(self.filename): os.remove(self.filename) if os.path.isfile(self.filename_clone): os.remove(self.filename_clone) def test_create_storage(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(data_filename("storage_test.nc"))) store.close() def test_stored_topology(self): raise SkipTest store = Storage( filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.close() store = Storage(filename=self.filename, mode='a') loaded_topology = store.template.topology # check if path topologies have the same JSON string # this also tests the simplifier for topologies assert_equal( self.simplifier.to_json(self.template_snapshot.topology), self.simplifier.to_json(loaded_topology) ) store.close() def test_safemode(self): fname = data_filename("cv_storage_safemode_test.nc") if os.path.isfile(fname): os.remove(fname) cv = paths.CoordinateFunctionCV('cv', lambda x: x) traj = paths.Trajectory(list(self.traj)) template = traj[0] storage_w = paths.Storage(fname, "w") storage_w.snapshots.save(template) storage_w.cvs.save(cv) storage_w.close() storage_r = paths.Storage(fname, 'r') # default safemode = False assert(storage_r.simplifier.safemode is False) cv_r = storage_r.cvs[0] assert(cv_r == cv) assert(cv.cv_callable is not None) storage_r.close() storage_r = paths.Storage(fname, 'r') storage_r.simplifier.safemode = True cv_r = storage_r.cvs[0] assert(cv_r == cv) assert(cv_r.cv_callable is None) storage_r.close() def test_store_snapshots(self): fname = data_filename("cv_storage_test.nc") if os.path.isfile(fname): os.remove(fname) traj = paths.Trajectory(list(self.traj)) template = traj[0] for use_cache in (False, True): # print '==========================================================' # print 'UUID', use_uuid, 'CACHE', use_cache # print '==========================================================' storage_w = paths.Storage(fname, "w") storage_w.snapshots.save(template) # let's mess up the order in which we save and include # reversed ones as well assert(len(storage_w.snapshots) == 2) assert(len(storage_w.trajectories) == 0) assert(len(storage_w.stores['snapshot0']) == 2) storage_w.snapshots.save(traj[8].reversed) assert(len(storage_w.snapshots) == 4) assert(len(storage_w.trajectories) == 0) assert(len(storage_w.stores['snapshot0']) == 4) # this will store traj[6:] under pos IDX #0 storage_w.trajectories.save(traj[6:]) assert(len(storage_w.snapshots) == 10) assert(len(storage_w.trajectories) == 1) assert(len(storage_w.stores['snapshot0']) == 10) traj_rev = traj.reversed # this will store traj_rev under pos IDX #1 storage_w.trajectories.mention(traj_rev) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 2) assert(len(storage_w.stores['snapshot0']) == 10) # this will not do anything since traj is already saved storage_w.trajectories.save(traj_rev) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 2) assert(len(storage_w.stores['snapshot0']) == 10) # this will store traj under pos IDX #2 storage_w.trajectories.save(traj) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 3) assert(len(storage_w.stores['snapshot0']) == 20) # this will not store since traj is already stored storage_w.trajectories.save(traj) assert(len(storage_w.snapshots) == 20) assert(len(storage_w.trajectories) == 3) assert(len(storage_w.stores['snapshot0']) == 20) # we saved in this order [0f, 8r, 6f, 7f, 9f, 5r, 4r, 3r, 2r, 1r ] # these are indices [ 0, 17, 12, 14, 18, 3, 5, 7, 9, 11 ] storage_w.close() if use_cache: storage_r = paths.AnalysisStorage(fname) else: storage_r = paths.Storage(fname, 'r') storage_r.snapshots.set_caching(False) storage_r.stores['snapshot0'].set_caching(False) # check if the loaded trajectory is reproduced for s1, s2 in zip(traj, storage_r.trajectories[2]): compare_snapshot(s1, s2, True) # this is the expected order in which it is saved eff_traj = [ traj[0], traj[8].reversed, traj[6], traj[7], traj[9], traj[5].reversed, traj[4].reversed, traj[3].reversed, traj[2].reversed, traj[1].reversed, ] # load from hidden and see, if the hidden store looks as expected # we open every second snapshot from the hidden store because the # ones in between correspond to the reversed ones hidden_snapshots = storage_r.stores['snapshot0'][:] for idx in range(10): s1 = eff_traj[idx] s1r = s1.reversed s2 = hidden_snapshots[2 * idx] s2r = hidden_snapshots[2 * idx + 1] compare_snapshot(s1, s2, True) compare_snapshot(s1r, s2r, True) storage_r.close() def test_load_save(self): for use_uuid in [True]: store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.template_snapshot) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[0] loaded_r = store.snapshots[1] compare_snapshot(loaded_template, self.template_snapshot, True) compare_snapshot( loaded_template.reversed, self.template_snapshot.reversed, True) compare_snapshot(loaded_r, self.template_snapshot.reversed) store.close() def test_proxy(self): for use_uuid in [True]: store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) tm = self.template_snapshot store.save(tm) px = store.snapshots.proxy(store.snapshots.index.list[0]) # make sure that the proxy and assert(hash(px) == hash(tm)) assert(px == tm) store.snapshots.cache.clear() s0 = store.snapshots[0] assert(hash(px) == hash(s0)) assert(px == s0) compare_snapshot(px, tm) compare_snapshot(s0, tm) px = store.snapshots.proxy(store.snapshots.index.list[0]) # make sure that after reloading it still works assert(hash(px) == hash(tm)) assert(px == tm) store.close() store = Storage(filename=self.filename, mode='a') s1 = store.snapshots[0] store.close() # when loading only for uuid based storages you get the same id assert((hash(px) == hash(s1)) is use_uuid) assert((px == s1) is use_uuid) def test_mention_only(self): storage_w = paths.Storage(self.filename, "w") template = self.template_snapshot storage_w.snapshots.add_type(template) test_snap = self.traj[2] # only touch a new snapshot storage_w.snapshots.only_mention = True storage_w.snapshots.save(test_snap) # check that the snapshot is there assert(len(storage_w.snapshots) == 4) # in the memory uuid index assert(test_snap.__uuid__ in storage_w.snapshots.index) # and stored assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][1]) # but no real snapshot has been stored # print len(storage_w.objects['snapshot0']) assert(len(storage_w.objects['snapshot0']) == 2) # switch on normal saving storage_w.snapshots.only_mention = False test_snap = self.traj[4] storage_w.snapshots.mention(test_snap) # check that the snapshot is there assert(len(storage_w.snapshots) == 6) # in the memory uuid index assert(test_snap.__uuid__ in storage_w.snapshots.index) # and stored assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][2]) # but no real snapshot has been stored assert(len(storage_w.objects['snapshot0']) == 2) # try to now add it storage_w.snapshots.save(test_snap) # check that the snapshot is not stored again (only 3 snapshots) assert(len(storage_w.snapshots) == 6) assert(len(storage_w.objects['snapshot0']) == 4) # print storage_w.objects['snapshot0'][1].coordinates # print template.coordinates # print storage_w.objects['snapshot0'][0].coordinates # print test_snap.coordinates # print storage_w.objects['snapshot0'].vars['statics'][0].coordinates # print storage_w.objects['snapshot0'].vars['statics'][1].coordinates # print storage_w.objects['snapshot0'].index compare_snapshot(storage_w.objects['snapshot0'][4], test_snap) storage_w.close() def test_load_save_uuid(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.template_snapshot) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[self.template_snapshot.__uuid__] loaded_r = store.snapshots[self.template_snapshot.reversed.__uuid__] compare_snapshot(loaded_template, self.template_snapshot, True) compare_snapshot( loaded_template.reversed, self.template_snapshot.reversed, True) compare_snapshot(loaded_r, self.template_snapshot.reversed) store.close() def test_load_save_toy(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.save(self.toy_template) store.close() store = Storage(filename=self.filename, mode='a') loaded_template = store.snapshots[0] loaded_r = store.snapshots[1] compare_snapshot(loaded_template, self.toy_template, True) compare_snapshot( loaded_template.reversed, self.toy_template.reversed, True) compare_snapshot(loaded_r, self.toy_template.reversed) store.close() def test_reverse_bug(self): store = Storage(filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) store.snapshots.save(self.template_snapshot) rev = self.template_snapshot.reversed # save the reversed one store.snapshots.save(rev) # check that the reversed one has index 1 and not 3! assert(store.idx(rev) == 1) # and we have exactly one snapshot assert(len(store.snapshots) == 2) assert(len(store.dimensions['snapshots']) == 1) store.close() def test_version(self): store = Storage( filename=self.filename, mode='w') assert(os.path.isfile(self.filename)) assert(store.storage_version == paths.version.version)
openpathsampling/openpathsampling
openpathsampling/tests/test_storage.py
openpathsampling/tests/__init__.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file connects the readers/writers to the astropy.table.Table class from __future__ import absolute_import, division, print_function import re import functools from .. import registry as io_registry from ...table import Table from ...extern.six.moves import zip __all__ = [] # Generic # ======= def read_asciitable(filename, **kwargs): from .ui import read return read(filename, **kwargs) io_registry.register_reader('ascii', Table, read_asciitable) def write_asciitable(table, filename, **kwargs): from .ui import write return write(table, filename, **kwargs) io_registry.register_writer('ascii', Table, write_asciitable) def io_read(format, filename, **kwargs): from .ui import read format = re.sub(r'^ascii\.', '', format) return read(filename, format=format, **kwargs) def io_write(format, table, filename, **kwargs): from .ui import write format = re.sub(r'^ascii\.', '', format) return write(table, filename, format=format, **kwargs) def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs): return filepath is not None and filepath.endswith(suffix) def _get_connectors_table(): from .core import FORMAT_CLASSES rows = [] rows.append(('ascii', '', 'Yes', 'ASCII table in any supported format (uses guessing)')) for format in sorted(FORMAT_CLASSES): cls = FORMAT_CLASSES[format] io_format = 'ascii.' + cls._format_name description = getattr(cls, '_description', '') class_link = ':class:`~{0}.{1}`'.format(cls.__module__, cls.__name__) suffix = getattr(cls, '_io_registry_suffix', '') can_write = 'Yes' if getattr(cls, '_io_registry_can_write', True) else '' rows.append((io_format, suffix, can_write, '{0}: {1}'.format(class_link, description))) out = Table(list(zip(*rows)), names=('Format', 'Suffix', 'Write', 'Description')) for colname in ('Format', 'Description'): width = max(len(x) for x in out[colname]) out[colname].format = '%-{0}s'.format(width) return out # Specific # ======== def read_csv(filename, **kwargs): from .ui import read kwargs['format'] = 'csv' return read(filename, **kwargs) def write_csv(table, filename, **kwargs): from .ui import write kwargs['format'] = 'csv' return write(table, filename, **kwargs) csv_identify = functools.partial(io_identify, '.csv') io_registry.register_reader('csv', Table, read_csv) io_registry.register_writer('csv', Table, write_csv) io_registry.register_identifier('csv', Table, csv_identify)
# Licensed under a 3-clause BSD style license - see PYFITS.rst from __future__ import division, with_statement import math import os import time import warnings import numpy as np from ....io import fits from ....utils.exceptions import (AstropyDeprecationWarning, AstropyPendingDeprecationWarning) from ....tests.helper import pytest, raises, catch_warnings from ..hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM from .test_table import comparerecords from . import FitsTestCase from .util import ignore_warnings class TestImageFunctions(FitsTestCase): def test_constructor_name_arg(self): """Like the test of the same name in test_table.py""" hdu = fits.ImageHDU() assert hdu.name == '' assert 'EXTNAME' not in hdu.header hdu.name = 'FOO' assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # Passing name to constructor hdu = fits.ImageHDU(name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # And overriding a header with a different extname hdr = fits.Header() hdr['EXTNAME'] = 'EVENTS' hdu = fits.ImageHDU(header=hdr, name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' def test_constructor_copies_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153 Ensure that a header from one HDU is copied when used to initialize new HDU. """ ifd = fits.HDUList(fits.PrimaryHDU()) phdr = ifd[0].header phdr['FILENAME'] = 'labq01i3q_rawtag.fits' primary_hdu = fits.PrimaryHDU(header=phdr) ofd = fits.HDUList(primary_hdu) ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits' # Original header should be unchanged assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits' @raises(ValueError) def test_open(self): # The function "open" reads a FITS file into an HDUList object. There # are three modes to open: "readonly" (the default), "append", and # "update". # Open a file read-only (the default mode), the content of the FITS # file are read into memory. r = fits.open(self.data('test0.fits')) # readonly # data parts are latent instantiation, so if we close the HDUList # without touching data, data can not be accessed. r.close() r[1].data[:2, :2] def test_open_2(self): r = fits.open(self.data('test0.fits')) info = ([(0, 'PRIMARY', 'PrimaryHDU', 138, (), '', '')] + [(x, 'SCI', 'ImageHDU', 61, (40, 40), 'int16', '') for x in range(1, 5)]) try: assert r.info(output=False) == info finally: r.close() def test_primary_with_extname(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151 Tests that the EXTNAME keyword works with Primary HDUs as well, and interacts properly with the .name attribute. For convenience hdulist['PRIMARY'] will still refer to the first HDU even if it has an EXTNAME not equal to 'PRIMARY'. """ prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)]) hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)]) assert 'EXTNAME' in hdul[0].header assert hdul[0].name == 'XPRIMARY' assert hdul[0].name == hdul[0].header['EXTNAME'] info = [(0, 'XPRIMARY', 'PrimaryHDU', 5, (), '', '')] assert hdul.info(output=False) == info assert hdul['PRIMARY'] is hdul['XPRIMARY'] assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)] hdul[0].name = 'XPRIMARY2' assert hdul[0].header['EXTNAME'] == 'XPRIMARY2' hdul.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[0].name == 'XPRIMARY2' @ignore_warnings(AstropyDeprecationWarning) def test_io_manipulation(self): # This legacy test also tests numerous deprecated interfaces for # backwards compatibility # Get a keyword value. An extension can be referred by name or by # number. Both extension and keyword names are case insensitive. with fits.open(self.data('test0.fits')) as r: assert r['primary'].header['naxis'] == 0 assert r[0].header['naxis'] == 0 # If there are more than one extension with the same EXTNAME value, # the EXTVER can be used (as the second argument) to distinguish # the extension. assert r['sci', 1].header['detector'] == 1 # append (using "update()") a new card r[0].header['xxx'] = 1.234e56 assert (str(r[0].header.ascard[-3:]) == "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n" "FILENAME= 'vtest3.fits' / File name \n" "XXX = 1.234E+56 ") # rename a keyword r[0].header.rename_key('filename', 'fname') pytest.raises(ValueError, r[0].header.rename_key, 'fname', 'history') pytest.raises(ValueError, r[0].header.rename_key, 'fname', 'simple') r[0].header.rename_key('fname', 'filename') # get a subsection of data assert np.array_equal(r[2].data[:3, :3], np.array([[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16)) # We can create a new FITS file by opening a new file with "append" # mode. with fits.open(self.temp('test_new.fits'), mode='append') as n: # Append the primary header and the 2nd extension to the new # file. n.append(r[0]) n.append(r[2]) # The flush method will write the current HDUList object back # to the newly created file on disk. The HDUList is still open # and can be further operated. n.flush() assert n[1].data[1, 1] == 349 # modify a data point n[1].data[1, 1] = 99 # When the file is closed, the most recent additions of # extension(s) since last flush() will be appended, but any HDU # already existed at the last flush will not be modified del n # If an existing file is opened with "append" mode, like the # readonly mode, the HDU's will be read into the HDUList which can # be modified in memory but can not be written back to the original # file. A file opened with append mode can only add new HDU's. os.rename(self.temp('test_new.fits'), self.temp('test_append.fits')) with fits.open(self.temp('test_append.fits'), mode='append') as a: # The above change did not take effect since this was made # after the flush(). assert a[1].data[1, 1] == 349 a.append(r[1]) del a # When changes are made to an HDUList which was opened with # "update" mode, they will be written back to the original file # when a flush/close is called. os.rename(self.temp('test_append.fits'), self.temp('test_update.fits')) with fits.open(self.temp('test_update.fits'), mode='update') as u: # When the changes do not alter the size structures of the # original (or since last flush) HDUList, the changes are # written back "in place". assert u[0].header['rootname'] == 'U2EQ0201T' u[0].header['rootname'] = 'abc' assert u[1].data[1, 1] == 349 u[1].data[1, 1] = 99 u.flush() # If the changes affect the size structure, e.g. adding or # deleting HDU(s), header was expanded or reduced beyond # existing number of blocks (2880 bytes in each block), or # change the data size, the HDUList is written to a temporary # file, the original file is deleted, and the temporary file is # renamed to the original file name and reopened in the update # mode. To a user, these two kinds of updating writeback seem # to be the same, unless the optional argument in flush or # close is set to 1. del u[2] u.flush() # the write method in HDUList class writes the current HDUList, # with all changes made up to now, to a new file. This method # works the same disregard the mode the HDUList was opened # with. u.append(r[3]) u.writeto(self.temp('test_new.fits')) del u # Another useful new HDUList method is readall. It will "touch" the # data parts in all HDUs, so even if the HDUList is closed, we can # still operate on the data. with fits.open(self.data('test0.fits')) as r: r.readall() assert r[1].data[1, 1] == 315 # create an HDU with data only data = np.ones((3, 5), dtype=np.float32) hdu = fits.ImageHDU(data=data, name='SCI') assert np.array_equal(hdu.data, np.array([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=np.float32)) # create an HDU with header and data # notice that the header has the right NAXIS's since it is constructed # with ImageHDU hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype='int32')) assert (str(hdu2.header.ascard[1:5]) == "BITPIX = 32 / array data type \n" "NAXIS = 1 / number of array dimensions \n" "NAXIS1 = 2 \n" "PCOUNT = 0 / number of parameters ") def test_memory_mapping(self): # memory mapping f1 = fits.open(self.data('test0.fits'), memmap=1) f1.close() def test_verification_on_output(self): # verification on output # make a defect HDUList first x = fits.ImageHDU() hdu = fits.HDUList(x) # HDUList can take a list or one single HDU with catch_warnings() as w: hdu.verify() text = "HDUList's 0th element is not a primary HDU." assert len(w) == 3 assert text in str(w[1].message) with catch_warnings() as w: hdu.writeto(self.temp('test_new2.fits'), 'fix') text = ("HDUList's 0th element is not a primary HDU. " "Fixed by inserting one as 0th HDU.") assert len(w) == 3 assert text in str(w[1].message) def test_section(self): # section testing fs = fits.open(self.data('arange.fits')) assert np.array_equal(fs[0].section[3, 2, 5], 357) assert np.array_equal( fs[0].section[3, 2, :], np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362])) assert np.array_equal(fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362])) assert np.array_equal(fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359])) assert np.array_equal(fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359])) assert np.array_equal( fs[0].section[3, 2:5, :], np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362], [363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373], [374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]])) assert np.array_equal(fs[0].section[3, :, :][:3, :3], np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]])) dat = fs[0].data assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8]) assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3]) assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3], np.array([[[330, 331, 332], [341, 342, 343], [352, 353, 354]], [[440, 441, 442], [451, 452, 453], [462, 463, 464]], [[550, 551, 552], [561, 562, 563], [572, 573, 574]]])) assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2], np.array([[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]])) assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :]) assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :]) assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :]) assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :]) assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2]) assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3]) assert np.array_equal( fs[0].section[:, np.array([True, False, True]), :], dat[:, np.array([True, False, True]), :]) assert np.array_equal( fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...]) assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2]) assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3]) def test_section_data_single(self): a = np.array([1]) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) sec = hdul[0].section dat = hdul[0].data assert np.array_equal(sec[0], dat[0]) assert np.array_equal(sec[...], dat[...]) assert np.array_equal(sec[..., 0], dat[..., 0]) assert np.array_equal(sec[0, ...], dat[0, ...]) def test_section_data_square(self): a = np.arange(4).reshape((2, 2)) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) d = hdul[0] dat = hdul[0].data assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() def test_section_data_cube(self): a = np.arange(18).reshape((2, 3, 3)) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) d = hdul[0] dat = hdul[0].data # TODO: Generate these perumtions instead of having them all written # out, yeesh! assert (d.section[:, :, :] == dat[:, :, :]).all() assert (d.section[:, :] == dat[:, :]).all() assert (d.section[:] == dat[:]).all() assert (d.section[0, :, :] == dat[0, :, :]).all() assert (d.section[1, :, :] == dat[1, :, :]).all() assert (d.section[0, 0, :] == dat[0, 0, :]).all() assert (d.section[0, 1, :] == dat[0, 1, :]).all() assert (d.section[0, 2, :] == dat[0, 2, :]).all() assert (d.section[1, 0, :] == dat[1, 0, :]).all() assert (d.section[1, 1, :] == dat[1, 1, :]).all() assert (d.section[1, 2, :] == dat[1, 2, :]).all() assert (d.section[0, 0, 0] == dat[0, 0, 0]).all() assert (d.section[0, 0, 1] == dat[0, 0, 1]).all() assert (d.section[0, 0, 2] == dat[0, 0, 2]).all() assert (d.section[0, 1, 0] == dat[0, 1, 0]).all() assert (d.section[0, 1, 1] == dat[0, 1, 1]).all() assert (d.section[0, 1, 2] == dat[0, 1, 2]).all() assert (d.section[0, 2, 0] == dat[0, 2, 0]).all() assert (d.section[0, 2, 1] == dat[0, 2, 1]).all() assert (d.section[0, 2, 2] == dat[0, 2, 2]).all() assert (d.section[1, 0, 0] == dat[1, 0, 0]).all() assert (d.section[1, 0, 1] == dat[1, 0, 1]).all() assert (d.section[1, 0, 2] == dat[1, 0, 2]).all() assert (d.section[1, 1, 0] == dat[1, 1, 0]).all() assert (d.section[1, 1, 1] == dat[1, 1, 1]).all() assert (d.section[1, 1, 2] == dat[1, 1, 2]).all() assert (d.section[1, 2, 0] == dat[1, 2, 0]).all() assert (d.section[1, 2, 1] == dat[1, 2, 1]).all() assert (d.section[1, 2, 2] == dat[1, 2, 2]).all() assert (d.section[:, 0, 0] == dat[:, 0, 0]).all() assert (d.section[:, 0, 1] == dat[:, 0, 1]).all() assert (d.section[:, 0, 2] == dat[:, 0, 2]).all() assert (d.section[:, 1, 0] == dat[:, 1, 0]).all() assert (d.section[:, 1, 1] == dat[:, 1, 1]).all() assert (d.section[:, 1, 2] == dat[:, 1, 2]).all() assert (d.section[:, 2, 0] == dat[:, 2, 0]).all() assert (d.section[:, 2, 1] == dat[:, 2, 1]).all() assert (d.section[:, 2, 2] == dat[:, 2, 2]).all() assert (d.section[0, :, 0] == dat[0, :, 0]).all() assert (d.section[0, :, 1] == dat[0, :, 1]).all() assert (d.section[0, :, 2] == dat[0, :, 2]).all() assert (d.section[1, :, 0] == dat[1, :, 0]).all() assert (d.section[1, :, 1] == dat[1, :, 1]).all() assert (d.section[1, :, 2] == dat[1, :, 2]).all() assert (d.section[:, :, 0] == dat[:, :, 0]).all() assert (d.section[:, :, 1] == dat[:, :, 1]).all() assert (d.section[:, :, 2] == dat[:, :, 2]).all() assert (d.section[:, 0, :] == dat[:, 0, :]).all() assert (d.section[:, 1, :] == dat[:, 1, :]).all() assert (d.section[:, 2, :] == dat[:, 2, :]).all() assert (d.section[:, :, 0:1] == dat[:, :, 0:1]).all() assert (d.section[:, :, 0:2] == dat[:, :, 0:2]).all() assert (d.section[:, :, 0:3] == dat[:, :, 0:3]).all() assert (d.section[:, :, 1:2] == dat[:, :, 1:2]).all() assert (d.section[:, :, 1:3] == dat[:, :, 1:3]).all() assert (d.section[:, :, 2:3] == dat[:, :, 2:3]).all() assert (d.section[0:1, 0:1, 0:1] == dat[0:1, 0:1, 0:1]).all() assert (d.section[0:1, 0:1, 0:2] == dat[0:1, 0:1, 0:2]).all() assert (d.section[0:1, 0:1, 0:3] == dat[0:1, 0:1, 0:3]).all() assert (d.section[0:1, 0:1, 1:2] == dat[0:1, 0:1, 1:2]).all() assert (d.section[0:1, 0:1, 1:3] == dat[0:1, 0:1, 1:3]).all() assert (d.section[0:1, 0:1, 2:3] == dat[0:1, 0:1, 2:3]).all() assert (d.section[0:1, 0:2, 0:1] == dat[0:1, 0:2, 0:1]).all() assert (d.section[0:1, 0:2, 0:2] == dat[0:1, 0:2, 0:2]).all() assert (d.section[0:1, 0:2, 0:3] == dat[0:1, 0:2, 0:3]).all() assert (d.section[0:1, 0:2, 1:2] == dat[0:1, 0:2, 1:2]).all() assert (d.section[0:1, 0:2, 1:3] == dat[0:1, 0:2, 1:3]).all() assert (d.section[0:1, 0:2, 2:3] == dat[0:1, 0:2, 2:3]).all() assert (d.section[0:1, 0:3, 0:1] == dat[0:1, 0:3, 0:1]).all() assert (d.section[0:1, 0:3, 0:2] == dat[0:1, 0:3, 0:2]).all() assert (d.section[0:1, 0:3, 0:3] == dat[0:1, 0:3, 0:3]).all() assert (d.section[0:1, 0:3, 1:2] == dat[0:1, 0:3, 1:2]).all() assert (d.section[0:1, 0:3, 1:3] == dat[0:1, 0:3, 1:3]).all() assert (d.section[0:1, 0:3, 2:3] == dat[0:1, 0:3, 2:3]).all() assert (d.section[0:1, 1:2, 0:1] == dat[0:1, 1:2, 0:1]).all() assert (d.section[0:1, 1:2, 0:2] == dat[0:1, 1:2, 0:2]).all() assert (d.section[0:1, 1:2, 0:3] == dat[0:1, 1:2, 0:3]).all() assert (d.section[0:1, 1:2, 1:2] == dat[0:1, 1:2, 1:2]).all() assert (d.section[0:1, 1:2, 1:3] == dat[0:1, 1:2, 1:3]).all() assert (d.section[0:1, 1:2, 2:3] == dat[0:1, 1:2, 2:3]).all() assert (d.section[0:1, 1:3, 0:1] == dat[0:1, 1:3, 0:1]).all() assert (d.section[0:1, 1:3, 0:2] == dat[0:1, 1:3, 0:2]).all() assert (d.section[0:1, 1:3, 0:3] == dat[0:1, 1:3, 0:3]).all() assert (d.section[0:1, 1:3, 1:2] == dat[0:1, 1:3, 1:2]).all() assert (d.section[0:1, 1:3, 1:3] == dat[0:1, 1:3, 1:3]).all() assert (d.section[0:1, 1:3, 2:3] == dat[0:1, 1:3, 2:3]).all() assert (d.section[1:2, 0:1, 0:1] == dat[1:2, 0:1, 0:1]).all() assert (d.section[1:2, 0:1, 0:2] == dat[1:2, 0:1, 0:2]).all() assert (d.section[1:2, 0:1, 0:3] == dat[1:2, 0:1, 0:3]).all() assert (d.section[1:2, 0:1, 1:2] == dat[1:2, 0:1, 1:2]).all() assert (d.section[1:2, 0:1, 1:3] == dat[1:2, 0:1, 1:3]).all() assert (d.section[1:2, 0:1, 2:3] == dat[1:2, 0:1, 2:3]).all() assert (d.section[1:2, 0:2, 0:1] == dat[1:2, 0:2, 0:1]).all() assert (d.section[1:2, 0:2, 0:2] == dat[1:2, 0:2, 0:2]).all() assert (d.section[1:2, 0:2, 0:3] == dat[1:2, 0:2, 0:3]).all() assert (d.section[1:2, 0:2, 1:2] == dat[1:2, 0:2, 1:2]).all() assert (d.section[1:2, 0:2, 1:3] == dat[1:2, 0:2, 1:3]).all() assert (d.section[1:2, 0:2, 2:3] == dat[1:2, 0:2, 2:3]).all() assert (d.section[1:2, 0:3, 0:1] == dat[1:2, 0:3, 0:1]).all() assert (d.section[1:2, 0:3, 0:2] == dat[1:2, 0:3, 0:2]).all() assert (d.section[1:2, 0:3, 0:3] == dat[1:2, 0:3, 0:3]).all() assert (d.section[1:2, 0:3, 1:2] == dat[1:2, 0:3, 1:2]).all() assert (d.section[1:2, 0:3, 1:3] == dat[1:2, 0:3, 1:3]).all() assert (d.section[1:2, 0:3, 2:3] == dat[1:2, 0:3, 2:3]).all() assert (d.section[1:2, 1:2, 0:1] == dat[1:2, 1:2, 0:1]).all() assert (d.section[1:2, 1:2, 0:2] == dat[1:2, 1:2, 0:2]).all() assert (d.section[1:2, 1:2, 0:3] == dat[1:2, 1:2, 0:3]).all() assert (d.section[1:2, 1:2, 1:2] == dat[1:2, 1:2, 1:2]).all() assert (d.section[1:2, 1:2, 1:3] == dat[1:2, 1:2, 1:3]).all() assert (d.section[1:2, 1:2, 2:3] == dat[1:2, 1:2, 2:3]).all() assert (d.section[1:2, 1:3, 0:1] == dat[1:2, 1:3, 0:1]).all() assert (d.section[1:2, 1:3, 0:2] == dat[1:2, 1:3, 0:2]).all() assert (d.section[1:2, 1:3, 0:3] == dat[1:2, 1:3, 0:3]).all() assert (d.section[1:2, 1:3, 1:2] == dat[1:2, 1:3, 1:2]).all() assert (d.section[1:2, 1:3, 1:3] == dat[1:2, 1:3, 1:3]).all() assert (d.section[1:2, 1:3, 2:3] == dat[1:2, 1:3, 2:3]).all() def test_section_data_four(self): a = np.arange(256).reshape((4, 4, 4, 4)) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) d = hdul[0] dat = hdul[0].data assert (d.section[:, :, :, :] == dat[:, :, :, :]).all() assert (d.section[:, :, :] == dat[:, :, :]).all() assert (d.section[:, :] == dat[:, :]).all() assert (d.section[:] == dat[:]).all() assert (d.section[0, :, :, :] == dat[0, :, :, :]).all() assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all() assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all() assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all() assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all() def test_section_data_scaled(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143 This is like test_section_data_square but uses a file containing scaled image data, to test that sections can work correctly with scaled data. """ hdul = fits.open(self.data('scale.fits')) d = hdul[0] dat = hdul[0].data assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() # Test without having accessed the full data first hdul = fits.open(self.data('scale.fits')) d = hdul[0] assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() assert not d._data_loaded def test_do_not_scale_image_data(self): hdul = fits.open(self.data('scale.fits'), do_not_scale_image_data=True) assert hdul[0].data.dtype == np.dtype('>i2') hdul = fits.open(self.data('scale.fits')) assert hdul[0].data.dtype == np.dtype('float32') def test_append_uint_data(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56 (BZERO and BSCALE added in the wrong location when appending scaled data) """ fits.writeto(self.temp('test_new.fits'), data=np.array([], dtype='uint8')) d = np.zeros([100, 100]).astype('uint16') fits.append(self.temp('test_new.fits'), data=d) f = fits.open(self.temp('test_new.fits'), uint=True) assert f[1].data.dtype == 'uint16' def test_uint_header_consistency(self): """ Regression test for https://github.com/astropy/astropy/issues/2305 This ensures that an HDU containing unsigned integer data always has the apppriate BZERO value in its header. """ for int_size in (16, 32, 64): # Just make an array of some unsigned ints that wouldn't fit in a # signed int array of the same bit width max_uint = (2 ** int_size) - 1 if int_size == 64: # Otherwise may get an overflow error, at least on Python 2 max_uint = np.uint64(int_size) dtype = 'uint%d' % int_size arr = np.empty(100, dtype=dtype) arr.fill(max_uint) arr -= np.arange(100, dtype=dtype) uint_hdu = fits.PrimaryHDU(data=arr) assert np.all(uint_hdu.data == arr) assert uint_hdu.data.dtype.name == 'uint%d' % int_size assert 'BZERO' in uint_hdu.header assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1)) filename = 'uint%d.fits' % int_size uint_hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename), uint=True) as hdul: new_uint_hdu = hdul[0] assert np.all(new_uint_hdu.data == arr) assert new_uint_hdu.data.dtype.name == 'uint%d' % int_size assert 'BZERO' in new_uint_hdu.header assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1)) def test_blanks(self): """Test image data with blank spots in it (which should show up as NaNs in the data array. """ arr = np.zeros((10, 10), dtype=np.int32) # One row will be blanks arr[1] = 999 hdu = fits.ImageHDU(data=arr) hdu.header['BLANK'] = 999 hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) assert np.isnan(hdul[1].data[1]).all() def test_invalid_blanks(self): """ Test that invalid use of the BLANK keyword leads to an appropriate warning, and that the BLANK keyword is ignored when returning the HDU data. Regression test for https://github.com/astropy/astropy/issues/3865 """ arr = np.arange(5, dtype=np.float64) hdu = fits.PrimaryHDU(data=arr) hdu.header['BLANK'] = 2 with catch_warnings() as w: hdu.writeto(self.temp('test_new.fits')) # Allow the HDU to be written, but there should be a warning # when writing a header with BLANK when then data is not # int assert len(w) == 1 assert "Invalid 'BLANK' keyword in header" in str(w[0].message) # Should also get a warning when opening the file, and the BLANK # value should not be applied with catch_warnings() as w: with fits.open(self.temp('test_new.fits')) as h: assert len(w) == 1 assert "Invalid 'BLANK' keyword in header" in str(w[0].message) assert np.all(arr == h[0].data) def test_scale_back_with_blanks(self): """ Test that when auto-rescaling integer data with "blank" values (where the blanks are replaced by NaN in the float data), that the "BLANK" keyword is removed from the header. Further, test that when using the ``scale_back=True`` option the blank values are restored properly. Regression test for https://github.com/astropy/astropy/issues/3865 """ # Make the sample file arr = np.arange(5, dtype=np.int32) hdu = fits.PrimaryHDU(data=arr) hdu.scale('int16', bscale=1.23) # Creating data that uses BLANK is currently kludgy--a separate issue # TODO: Rewrite this test when scaling with blank support is better # supported # Let's just add a value to the data that should be converted to NaN # when it is read back in: hdu.data[0] = 9999 hdu.header['BLANK'] = 9999 hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: data = hdul[0].data assert np.isnan(data[0]) hdul.writeto(self.temp('test2.fits')) # Now reopen the newly written file. It should not have a 'BLANK' # keyword with catch_warnings() as w: with fits.open(self.temp('test2.fits')) as hdul2: assert len(w) == 0 assert 'BLANK' not in hdul2[0].header data = hdul2[0].data assert np.isnan(data[0]) # Finally, test that scale_back keeps the BLANKs correctly with fits.open(self.temp('test.fits'), scale_back=True, mode='update') as hdul3: data = hdul3[0].data assert np.isnan(data[0]) with fits.open(self.temp('test.fits'), do_not_scale_image_data=True) as hdul4: assert hdul4[0].header['BLANK'] == 9999 assert hdul4[0].header['BSCALE'] == 1.23 assert hdul4[0].data[0] == 9999 def test_bzero_with_floats(self): """Test use of the BZERO keyword in an image HDU containing float data. """ arr = np.zeros((10, 10)) - 1 hdu = fits.ImageHDU(data=arr) hdu.header['BZERO'] = 1.0 hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) arr += 1 assert (hdul[1].data == arr).all() def test_rewriting_large_scaled_image(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101 """ hdul = fits.open(self.data('fixed-1890.fits')) orig_data = hdul[0].data with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), clobber=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[0].data == orig_data).all() hdul.close() # Just as before, but this time don't touch hdul[0].data before writing # back out--this is the case that failed in # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 hdul = fits.open(self.data('fixed-1890.fits')) with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), clobber=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[0].data == orig_data).all() hdul.close() # Test opening/closing/reopening a scaled file in update mode hdul = fits.open(self.data('fixed-1890.fits'), do_not_scale_image_data=True) hdul.writeto(self.temp('test_new.fits'), clobber=True, output_verify='silentfix') hdul.close() hdul = fits.open(self.temp('test_new.fits')) orig_data = hdul[0].data hdul.close() hdul = fits.open(self.temp('test_new.fits'), mode='update') hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[0].data == orig_data).all() hdul = fits.open(self.temp('test_new.fits')) hdul.close() def test_image_update_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105 Replacing the original header to an image HDU and saving should update the NAXISn keywords appropriately and save the image data correctly. """ # Copy the original file before saving to it self.copy_file('test0.fits') with fits.open(self.temp('test0.fits'), mode='update') as hdul: orig_data = hdul[1].data.copy() hdr_copy = hdul[1].header.copy() del hdr_copy['NAXIS*'] hdul[1].header = hdr_copy with fits.open(self.temp('test0.fits')) as hdul: assert (orig_data == hdul[1].data).all() def test_open_scaled_in_update_mode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119 (Don't update scaled image data if the data is not read) This ensures that merely opening and closing a file containing scaled image data does not cause any change to the data (or the header). Changes should only occur if the data is accessed. """ # Copy the original file before making any possible changes to it self.copy_file('scale.fits') mtime = os.stat(self.temp('scale.fits')).st_mtime time.sleep(1) fits.open(self.temp('scale.fits'), mode='update').close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('scale.fits')).st_mtime # Insert a slight delay to ensure the mtime does change when the file # is changed time.sleep(1) hdul = fits.open(self.temp('scale.fits'), 'update') orig_data = hdul[0].data hdul.close() # Now the file should be updated with the rescaled data assert mtime != os.stat(self.temp('scale.fits')).st_mtime hdul = fits.open(self.temp('scale.fits'), mode='update') assert hdul[0].data.dtype == np.dtype('>f4') assert hdul[0].header['BITPIX'] == -32 assert 'BZERO' not in hdul[0].header assert 'BSCALE' not in hdul[0].header assert (orig_data == hdul[0].data).all() # Try reshaping the data, then closing and reopening the file; let's # see if all the changes are preseved properly hdul[0].data.shape = (42, 10) hdul.close() hdul = fits.open(self.temp('scale.fits')) assert hdul[0].shape == (42, 10) assert hdul[0].data.dtype == np.dtype('>f4') assert hdul[0].header['BITPIX'] == -32 assert 'BZERO' not in hdul[0].header assert 'BSCALE' not in hdul[0].header def test_scale_back(self): """A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120 The scale_back feature for image HDUs. """ self.copy_file('scale.fits') with fits.open(self.temp('scale.fits'), mode='update', scale_back=True) as hdul: orig_bitpix = hdul[0].header['BITPIX'] orig_bzero = hdul[0].header['BZERO'] orig_bscale = hdul[0].header['BSCALE'] orig_data = hdul[0].data.copy() hdul[0].data[0] = 0 with fits.open(self.temp('scale.fits'), do_not_scale_image_data=True) as hdul: assert hdul[0].header['BITPIX'] == orig_bitpix assert hdul[0].header['BZERO'] == orig_bzero assert hdul[0].header['BSCALE'] == orig_bscale zero_point = int(math.floor(-orig_bzero / orig_bscale)) assert (hdul[0].data[0] == zero_point).all() with fits.open(self.temp('scale.fits')) as hdul: assert (hdul[0].data[1:] == orig_data[1:]).all() def test_image_none(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/27 """ with fits.open(self.data('test0.fits')) as h: h[1].data h[1].data = None h[1].writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].data is None assert h[1].header['NAXIS'] == 0 assert 'NAXIS1' not in h[1].header assert 'NAXIS2' not in h[1].header def test_invalid_blank(self): """ Regression test for https://github.com/astropy/astropy/issues/2711 If the BLANK keyword contains an invalid value it should be ignored for any calculations (though a warning should be issued). """ data = np.arange(100, dtype=np.float64) hdu = fits.PrimaryHDU(data) hdu.header['BLANK'] = 'nan' hdu.writeto(self.temp('test.fits')) with catch_warnings() as w: with fits.open(self.temp('test.fits')) as hdul: assert np.all(hdul[0].data == data) assert len(w) == 2 msg = "Invalid value for 'BLANK' keyword in header" assert msg in str(w[0].message) msg = "Invalid 'BLANK' keyword" assert msg in str(w[1].message) def test_scaled_image_fromfile(self): """ Regression test for https://github.com/astropy/astropy/issues/2710 """ # Make some sample data a = np.arange(100, dtype=np.float32) hdu = fits.PrimaryHDU(data=a.copy()) hdu.scale(bscale=1.1) hdu.writeto(self.temp('test.fits')) with open(self.temp('test.fits'), 'rb') as f: file_data = f.read() hdul = fits.HDUList.fromstring(file_data) assert np.allclose(hdul[0].data, a) class TestCompressedImage(FitsTestCase): def test_empty(self): """ Regression test for https://github.com/astropy/astropy/issues/2595 """ hdu = fits.CompImageHDU() assert hdu.data is None hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), mode='update') as hdul: assert len(hdul) == 2 assert isinstance(hdul[1], fits.CompImageHDU) assert hdul[1].data is None # Now test replacing the empty data with an array and see what # happens hdul[1].data = np.arange(100, dtype=np.int32) with fits.open(self.temp('test.fits')) as hdul: assert len(hdul) == 2 assert isinstance(hdul[1], fits.CompImageHDU) assert np.all(hdul[1].data == np.arange(100, dtype=np.int32)) @pytest.mark.parametrize( ('data', 'compression_type', 'quantize_level', 'byte_order'), sum([[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16, bo), (np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01, bo), (np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16, bo)] for bo in ('<', '>')], [])) def test_comp_image(self, data, compression_type, quantize_level, byte_order): data = data.newbyteorder(byte_order) primary_hdu = fits.PrimaryHDU() ofd = fits.HDUList(primary_hdu) chdu = fits.CompImageHDU(data, name='SCI', compressionType=compression_type, quantizeLevel=quantize_level) ofd.append(chdu) ofd.writeto(self.temp('test_new.fits'), clobber=True) ofd.close() with fits.open(self.temp('test_new.fits')) as fd: assert (fd[1].data == data).all() assert fd[1].header['NAXIS'] == chdu.header['NAXIS'] assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1'] assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2'] assert fd[1].header['BITPIX'] == chdu.header['BITPIX'] @ignore_warnings(AstropyPendingDeprecationWarning) def test_comp_image_hcompression_1_invalid_data(self): """ Tests compression with the HCOMPRESS_1 algorithm with data that is not 2D and has a non-2D tile size. """ pytest.raises(ValueError, fits.CompImageHDU, np.zeros((2, 10, 10), dtype=np.float32), name='SCI', compressionType='HCOMPRESS_1', quantizeLevel=16, tileSize=[2, 10, 10]) @ignore_warnings(AstropyPendingDeprecationWarning) def test_comp_image_hcompress_image_stack(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171 Tests that data containing more than two dimensions can be compressed with HCOMPRESS_1 so long as the user-supplied tile size can be flattened to two dimensions. """ cube = np.arange(300, dtype=np.float32).reshape((3, 10, 10)) hdu = fits.CompImageHDU(data=cube, name='SCI', compressionType='HCOMPRESS_1', quantizeLevel=16, tileSize=[5, 5, 1]) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert (hdul['SCI'].data == cube).all() def test_subtractive_dither_seed(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/32 Ensure that when floating point data is compressed with the SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed is added to the header, and that the data can be correctly decompressed. """ array = np.arange(100.0).reshape(10, 10) csum = (array[0].view('uint8').sum() % 10000) + 1 hdu = fits.CompImageHDU(data=array, quantize_method=SUBTRACTIVE_DITHER_1, dither_seed=DITHER_SEED_CHECKSUM) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert isinstance(hdul[1], fits.CompImageHDU) assert 'ZQUANTIZ' in hdul[1]._header assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1' assert 'ZDITHER0' in hdul[1]._header assert hdul[1]._header['ZDITHER0'] == csum assert np.all(hdul[1].data == array) def test_disable_image_compression(self): with catch_warnings(): # No warnings should be displayed in this case warnings.simplefilter('error') with fits.open(self.data('comp.fits'), disable_image_compression=True) as hdul: # The compressed image HDU should show up as a BinTableHDU, but # *not* a CompImageHDU assert isinstance(hdul[1], fits.BinTableHDU) assert not isinstance(hdul[1], fits.CompImageHDU) with fits.open(self.data('comp.fits')) as hdul: assert isinstance(hdul[1], fits.CompImageHDU) def test_open_comp_image_in_update_mode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167 Similar to test_open_scaled_in_update_mode(), but specifically for compressed images. """ # Copy the original file before making any possible changes to it self.copy_file('comp.fits') mtime = os.stat(self.temp('comp.fits')).st_mtime time.sleep(1) fits.open(self.temp('comp.fits'), mode='update').close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('comp.fits')).st_mtime def test_open_scaled_in_update_mode_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2 Identical to test_open_scaled_in_update_mode() but with a compressed version of the scaled image. """ # Copy+compress the original file before making any possible changes to # it with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp('scale.fits')) mtime = os.stat(self.temp('scale.fits')).st_mtime time.sleep(1) fits.open(self.temp('scale.fits'), mode='update').close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('scale.fits')).st_mtime # Insert a slight delay to ensure the mtime does change when the file # is changed time.sleep(1) hdul = fits.open(self.temp('scale.fits'), 'update') hdul[1].data hdul.close() # Now the file should be updated with the rescaled data assert mtime != os.stat(self.temp('scale.fits')).st_mtime hdul = fits.open(self.temp('scale.fits'), mode='update') assert hdul[1].data.dtype == np.dtype('float32') assert hdul[1].header['BITPIX'] == -32 assert 'BZERO' not in hdul[1].header assert 'BSCALE' not in hdul[1].header # Try reshaping the data, then closing and reopening the file; let's # see if all the changes are preseved properly hdul[1].data.shape = (42, 10) hdul.close() hdul = fits.open(self.temp('scale.fits')) assert hdul[1].shape == (42, 10) assert hdul[1].data.dtype == np.dtype('float32') assert hdul[1].header['BITPIX'] == -32 assert 'BZERO' not in hdul[1].header assert 'BSCALE' not in hdul[1].header def test_write_comp_hdu_direct_from_existing(self): with fits.open(self.data('comp.fits')) as hdul: hdul[1].writeto(self.temp('test.fits')) with fits.open(self.data('comp.fits')) as hdul1: with fits.open(self.temp('test.fits')) as hdul2: assert np.all(hdul1[1].data == hdul2[1].data) assert comparerecords(hdul1[1].compressed_data, hdul2[1].compressed_data) def test_rewriting_large_scaled_image_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1 Identical to test_rewriting_large_scaled_image() but with a compressed image. """ with fits.open(self.data('fixed-1890.fits'), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp('fixed-1890-z.fits')) hdul = fits.open(self.temp('fixed-1890-z.fits')) orig_data = hdul[1].data with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), clobber=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[1].data == orig_data).all() hdul.close() # Just as before, but this time don't touch hdul[0].data before writing # back out--this is the case that failed in # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 hdul = fits.open(self.temp('fixed-1890-z.fits')) with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), clobber=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[1].data == orig_data).all() hdul.close() # Test opening/closing/reopening a scaled file in update mode hdul = fits.open(self.temp('fixed-1890-z.fits'), do_not_scale_image_data=True) hdul.writeto(self.temp('test_new.fits'), clobber=True, output_verify='silentfix') hdul.close() hdul = fits.open(self.temp('test_new.fits')) orig_data = hdul[1].data hdul.close() hdul = fits.open(self.temp('test_new.fits'), mode='update') hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[1].data == orig_data).all() hdul = fits.open(self.temp('test_new.fits')) hdul.close() def test_scale_back_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3 Identical to test_scale_back() but uses a compressed image. """ # Create a compressed version of the scaled image with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp('scale.fits')) with fits.open(self.temp('scale.fits'), mode='update', scale_back=True) as hdul: orig_bitpix = hdul[1].header['BITPIX'] orig_bzero = hdul[1].header['BZERO'] orig_bscale = hdul[1].header['BSCALE'] orig_data = hdul[1].data.copy() hdul[1].data[0] = 0 with fits.open(self.temp('scale.fits'), do_not_scale_image_data=True) as hdul: assert hdul[1].header['BITPIX'] == orig_bitpix assert hdul[1].header['BZERO'] == orig_bzero assert hdul[1].header['BSCALE'] == orig_bscale zero_point = int(math.floor(-orig_bzero / orig_bscale)) assert (hdul[1].data[0] == zero_point).all() with fits.open(self.temp('scale.fits')) as hdul: assert (hdul[1].data[1:] == orig_data[1:]).all() # Extra test to ensure that after everything the data is still the # same as in the original uncompressed version of the image with fits.open(self.data('scale.fits')) as hdul2: # Recall we made the same modification to the data in hdul # above hdul2[0].data[0] = 0 assert (hdul[1].data == hdul2[0].data).all() def test_lossless_gzip_compression(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198""" noise = np.random.normal(size=(1000, 1000)) chdu1 = fits.CompImageHDU(data=noise, compressionType='GZIP_1') # First make a test image with lossy compression and make sure it # wasn't compressed perfectly. This shouldn't happen ever, but just to # make sure the test non-trivial. chdu1.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert np.abs(noise - h[1].data).max() > 0.0 del h chdu2 = fits.CompImageHDU(data=noise, compressionType='GZIP_1', quantizeLevel=0.0) # No quantization with ignore_warnings(): chdu2.writeto(self.temp('test.fits'), clobber=True) with fits.open(self.temp('test.fits')) as h: assert (noise == h[1].data).all() def test_compression_column_tforms(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199""" # Some interestingly tiled data so that some of it is quantized and # some of it ends up just getting gzip-compressed data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] + np.arange(1, 7)) np.random.seed(1337) data1 = np.random.uniform(size=(6 * 4, 7 * 4)) data1[:data2.shape[0], :data2.shape[1]] = data2 chdu = fits.CompImageHDU(data1, compressionType='RICE_1', tileSize=(6, 7)) chdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), disable_image_compression=True) as h: assert h[1].header['TFORM1'] == '1PB(30)' assert h[1].header['TFORM2'] == '1PB(359)' def test_compression_update_header(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/23 """ self.copy_file('comp.fits') with fits.open(self.temp('comp.fits'), mode='update') as hdul: assert isinstance(hdul[1], fits.CompImageHDU) hdul[1].header['test1'] = 'test' hdul[1]._header['test2'] = 'test2' with fits.open(self.temp('comp.fits')) as hdul: assert 'test1' in hdul[1].header assert hdul[1].header['test1'] == 'test' assert 'test2' in hdul[1].header assert hdul[1].header['test2'] == 'test2' # Test update via index now: with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdr = hdul[1].header hdr[hdr.index('TEST1')] = 'foo' with fits.open(self.temp('comp.fits')) as hdul: assert hdul[1].header['TEST1'] == 'foo' # Test slice updates with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdul[1].header['TEST*'] = 'qux' with fits.open(self.temp('comp.fits')) as hdul: assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux'] with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdr = hdul[1].header idx = hdr.index('TEST1') hdr[idx:idx + 2] = 'bar' with fits.open(self.temp('comp.fits')) as hdul: assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar'] # Test updating a specific COMMENT card duplicate with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!' with fits.open(self.temp('comp.fits')) as hdul: assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!' assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!' # Test deleting by keyword and by slice with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdr = hdul[1].header del hdr['COMMENT'] idx = hdr.index('TEST1') del hdr[idx:idx + 2] with fits.open(self.temp('comp.fits')) as hdul: assert 'COMMENT' not in hdul[1].header assert 'COMMENT' not in hdul[1]._header assert 'TEST1' not in hdul[1].header assert 'TEST1' not in hdul[1]._header assert 'TEST2' not in hdul[1].header assert 'TEST2' not in hdul[1]._header def test_compression_update_header_with_reserved(self): """ Ensure that setting reserved keywords related to the table data structure on CompImageHDU image headers fails. """ def test_set_keyword(hdr, keyword, value): with catch_warnings() as w: hdr[keyword] = value assert len(w) == 1 assert str(w[0].message).startswith( "Keyword %r is reserved" % keyword) assert keyword not in hdr with fits.open(self.data('comp.fits')) as hdul: hdr = hdul[1].header test_set_keyword(hdr, 'TFIELDS', 8) test_set_keyword(hdr, 'TTYPE1', 'Foo') test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF') test_set_keyword(hdr, 'ZVAL1', 'Foo') def test_compression_header_append(self): with fits.open(self.data('comp.fits')) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header with catch_warnings() as w: imghdr.append('TFIELDS') assert len(w) == 1 assert 'TFIELDS' not in imghdr imghdr.append(('FOO', 'bar', 'qux'), end=True) assert 'FOO' in imghdr assert imghdr[-1] == 'bar' assert 'FOO' in tblhdr assert tblhdr[-1] == 'bar' imghdr.append(('CHECKSUM', 'abcd1234')) assert 'CHECKSUM' in imghdr assert imghdr['CHECKSUM'] == 'abcd1234' assert 'CHECKSUM' not in tblhdr assert 'ZHECKSUM' in tblhdr assert tblhdr['ZHECKSUM'] == 'abcd1234' def test_compression_header_insert(self): with fits.open(self.data('comp.fits')) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header # First try inserting a restricted keyword with catch_warnings() as w: imghdr.insert(1000, 'TFIELDS') assert len(w) == 1 assert 'TFIELDS' not in imghdr assert tblhdr.count('TFIELDS') == 1 # First try keyword-relative insert imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait')) assert 'OBSERVER' in imghdr assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1 assert 'OBSERVER' in tblhdr assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1 # Next let's see if an index-relative insert winds up being # sensible idx = imghdr.index('OBSERVER') imghdr.insert('OBSERVER', ('FOO',)) assert 'FOO' in imghdr assert imghdr.index('FOO') == idx assert 'FOO' in tblhdr assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1 def test_compression_header_set_before_after(self): with fits.open(self.data('comp.fits')) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header with catch_warnings() as w: imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION') assert len(w) == 1 assert 'ZBITPIX' not in imghdr assert tblhdr.count('ZBITPIX') == 1 assert tblhdr['ZBITPIX'] != 77 # Move GCOUNT before PCOUNT (not that there's any reason you'd # *want* to do that, but it's just a test...) imghdr.set('GCOUNT', 99, before='PCOUNT') assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1 assert imghdr['GCOUNT'] == 99 assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1 assert tblhdr['ZGCOUNT'] == 99 assert tblhdr.index('PCOUNT') == 5 assert tblhdr.index('GCOUNT') == 6 assert tblhdr['GCOUNT'] == 1 imghdr.set('GCOUNT', 2, after='PCOUNT') assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1 assert imghdr['GCOUNT'] == 2 assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1 assert tblhdr['ZGCOUNT'] == 2 assert tblhdr.index('PCOUNT') == 5 assert tblhdr.index('GCOUNT') == 6 assert tblhdr['GCOUNT'] == 1 def test_compression_header_append_commentary(self): """ Regression test for https://github.com/astropy/astropy/issues/2363 """ hdu = fits.CompImageHDU(np.array([0], dtype=np.int32)) hdu.header['COMMENT'] = 'hello world' assert hdu.header['COMMENT'] == ['hello world'] hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[1].header['COMMENT'] == ['hello world'] def test_compression_with_gzip_column(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/71 """ arr = np.zeros((2, 7000), dtype='float32') # The first row (which will be the first compressed tile) has a very # wide range of values that will be difficult to quantize, and should # result in use of a GZIP_COMPRESSED_DATA column arr[0] = np.linspace(0, 1, 7000) arr[1] = np.random.normal(size=7000) hdu = fits.CompImageHDU(data=arr) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: comp_hdu = hdul[1] # GZIP-compressed tile should compare exactly assert np.all(comp_hdu.data[0] == arr[0]) # The second tile uses lossy compression and may be somewhat off, # so we don't bother comparing it exactly def test_duplicate_compression_header_keywords(self): """ Regression test for https://github.com/astropy/astropy/issues/2750 Tests that the fake header (for the compressed image) can still be read even if the real header contained a duplicate ZTENSION keyword (the issue applies to any keyword specific to the compression convention, however). """ arr = np.arange(100, dtype=np.int32) hdu = fits.CompImageHDU(data=arr) header = hdu._header # append the duplicate keyword hdu._header.append(('ZTENSION', 'IMAGE')) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert header == hdul[1]._header # There's no good reason to have a duplicate keyword, but # technically it isn't invalid either :/ assert hdul[1]._header.count('ZTENSION') == 2
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/io/fits/tests/test_image.py
scib/lib/python3.5/site-packages/astropy/io/ascii/connect.py
"""Support for fans through the SmartThings cloud API.""" from typing import Optional, Sequence from pysmartthings import Capability from homeassistant.components.fan import ( SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF, SUPPORT_SET_SPEED, FanEntity, ) from . import SmartThingsEntity from .const import DATA_BROKERS, DOMAIN VALUE_TO_SPEED = {0: SPEED_OFF, 1: SPEED_LOW, 2: SPEED_MEDIUM, 3: SPEED_HIGH} SPEED_TO_VALUE = {v: k for k, v in VALUE_TO_SPEED.items()} async def async_setup_entry(hass, config_entry, async_add_entities): """Add fans for a config entry.""" broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] async_add_entities( [ SmartThingsFan(device) for device in broker.devices.values() if broker.any_assigned(device.device_id, "fan") ] ) def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]: """Return all capabilities supported if minimum required are present.""" supported = [Capability.switch, Capability.fan_speed] # Must have switch and fan_speed if all(capability in capabilities for capability in supported): return supported class SmartThingsFan(SmartThingsEntity, FanEntity): """Define a SmartThings Fan.""" async def async_set_speed(self, speed: str): """Set the speed of the fan.""" value = SPEED_TO_VALUE[speed] await self._device.set_fan_speed(value, set_status=True) # State is set optimistically in the command above, therefore update # the entity state ahead of receiving the confirming push updates self.async_write_ha_state() async def async_turn_on(self, speed: str = None, **kwargs) -> None: """Turn the fan on.""" if speed is not None: value = SPEED_TO_VALUE[speed] await self._device.set_fan_speed(value, set_status=True) else: await self._device.switch_on(set_status=True) # State is set optimistically in the commands above, therefore update # the entity state ahead of receiving the confirming push updates self.async_write_ha_state() async def async_turn_off(self, **kwargs) -> None: """Turn the fan off.""" await self._device.switch_off(set_status=True) # State is set optimistically in the command above, therefore update # the entity state ahead of receiving the confirming push updates self.async_write_ha_state() @property def is_on(self) -> bool: """Return true if fan is on.""" return self._device.status.switch @property def speed(self) -> str: """Return the current speed.""" return VALUE_TO_SPEED[self._device.status.fan_speed] @property def speed_list(self) -> list: """Get the list of available speeds.""" return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH] @property def supported_features(self) -> int: """Flag supported features.""" return SUPPORT_SET_SPEED
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/smartthings/fan.py
"""Sorting helpers for ISY994 device classifications.""" from typing import Any, List, Optional, Union from pyisy.constants import ( ISY_VALUE_UNKNOWN, PROTO_GROUP, PROTO_INSTEON, PROTO_PROGRAM, PROTO_ZWAVE, TAG_FOLDER, ) from pyisy.nodes import Group, Node, Nodes from pyisy.programs import Programs from pyisy.variables import Variables from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR from homeassistant.components.climate.const import DOMAIN as CLIMATE from homeassistant.components.fan import DOMAIN as FAN from homeassistant.components.light import DOMAIN as LIGHT from homeassistant.components.sensor import DOMAIN as SENSOR from homeassistant.components.switch import DOMAIN as SWITCH from homeassistant.helpers.entity_registry import async_get_registry from homeassistant.helpers.typing import HomeAssistantType from .const import ( _LOGGER, DEFAULT_PROGRAM_STRING, DOMAIN, FILTER_INSTEON_TYPE, FILTER_NODE_DEF_ID, FILTER_STATES, FILTER_UOM, FILTER_ZWAVE_CAT, ISY994_NODES, ISY994_PROGRAMS, ISY994_VARIABLES, ISY_GROUP_PLATFORM, KEY_ACTIONS, KEY_STATUS, NODE_FILTERS, SUBNODE_CLIMATE_COOL, SUBNODE_CLIMATE_HEAT, SUBNODE_EZIO2X4_SENSORS, SUBNODE_FANLINC_LIGHT, SUBNODE_IOLINC_RELAY, SUPPORTED_PLATFORMS, SUPPORTED_PROGRAM_PLATFORMS, TYPE_CATEGORY_SENSOR_ACTUATORS, TYPE_EZIO2X4, UOM_DOUBLE_TEMP, UOM_ISYV4_DEGREES, ) BINARY_SENSOR_UOMS = ["2", "78"] BINARY_SENSOR_ISY_STATES = ["on", "off"] def _check_for_node_def( hass_isy_data: dict, node: Union[Group, Node], single_platform: str = None ) -> bool: """Check if the node matches the node_def_id for any platforms. This is only present on the 5.0 ISY firmware, and is the most reliable way to determine a device's type. """ if not hasattr(node, "node_def_id") or node.node_def_id is None: # Node doesn't have a node_def (pre 5.0 firmware most likely) return False node_def_id = node.node_def_id platforms = SUPPORTED_PLATFORMS if not single_platform else [single_platform] for platform in platforms: if node_def_id in NODE_FILTERS[platform][FILTER_NODE_DEF_ID]: hass_isy_data[ISY994_NODES][platform].append(node) return True return False def _check_for_insteon_type( hass_isy_data: dict, node: Union[Group, Node], single_platform: str = None ) -> bool: """Check if the node matches the Insteon type for any platforms. This is for (presumably) every version of the ISY firmware, but only works for Insteon device. "Node Server" (v5+) and Z-Wave and others will not have a type. """ if not hasattr(node, "protocol") or node.protocol != PROTO_INSTEON: return False if not hasattr(node, "type") or node.type is None: # Node doesn't have a type (non-Insteon device most likely) return False device_type = node.type platforms = SUPPORTED_PLATFORMS if not single_platform else [single_platform] for platform in platforms: if any( [ device_type.startswith(t) for t in set(NODE_FILTERS[platform][FILTER_INSTEON_TYPE]) ] ): # Hacky special-cases for certain devices with different platforms # included as subnodes. Note that special-cases are not necessary # on ISY 5.x firmware as it uses the superior NodeDefs method subnode_id = int(node.address.split(" ")[-1], 16) # FanLinc, which has a light module as one of its nodes. if platform == FAN and subnode_id == SUBNODE_FANLINC_LIGHT: hass_isy_data[ISY994_NODES][LIGHT].append(node) return True # Thermostats, which has a "Heat" and "Cool" sub-node on address 2 and 3 if platform == CLIMATE and subnode_id in [ SUBNODE_CLIMATE_COOL, SUBNODE_CLIMATE_HEAT, ]: hass_isy_data[ISY994_NODES][BINARY_SENSOR].append(node) return True # IOLincs which have a sensor and relay on 2 different nodes if ( platform == BINARY_SENSOR and device_type.startswith(TYPE_CATEGORY_SENSOR_ACTUATORS) and subnode_id == SUBNODE_IOLINC_RELAY ): hass_isy_data[ISY994_NODES][SWITCH].append(node) return True # Smartenit EZIO2X4 if ( platform == SWITCH and device_type.startswith(TYPE_EZIO2X4) and subnode_id in SUBNODE_EZIO2X4_SENSORS ): hass_isy_data[ISY994_NODES][BINARY_SENSOR].append(node) return True hass_isy_data[ISY994_NODES][platform].append(node) return True return False def _check_for_zwave_cat( hass_isy_data: dict, node: Union[Group, Node], single_platform: str = None ) -> bool: """Check if the node matches the ISY Z-Wave Category for any platforms. This is for (presumably) every version of the ISY firmware, but only works for Z-Wave Devices with the devtype.cat property. """ if not hasattr(node, "protocol") or node.protocol != PROTO_ZWAVE: return False if not hasattr(node, "zwave_props") or node.zwave_props is None: # Node doesn't have a device type category (non-Z-Wave device) return False device_type = node.zwave_props.category platforms = SUPPORTED_PLATFORMS if not single_platform else [single_platform] for platform in platforms: if any( [ device_type.startswith(t) for t in set(NODE_FILTERS[platform][FILTER_ZWAVE_CAT]) ] ): hass_isy_data[ISY994_NODES][platform].append(node) return True return False def _check_for_uom_id( hass_isy_data: dict, node: Union[Group, Node], single_platform: str = None, uom_list: list = None, ) -> bool: """Check if a node's uom matches any of the platforms uom filter. This is used for versions of the ISY firmware that report uoms as a single ID. We can often infer what type of device it is by that ID. """ if not hasattr(node, "uom") or node.uom in [None, ""]: # Node doesn't have a uom (Scenes for example) return False # Backwards compatibility for ISYv4 Firmware: node_uom = node.uom if isinstance(node.uom, list): node_uom = node.uom[0] if uom_list: if node_uom in uom_list: hass_isy_data[ISY994_NODES][single_platform].append(node) return True return False platforms = SUPPORTED_PLATFORMS if not single_platform else [single_platform] for platform in platforms: if node_uom in NODE_FILTERS[platform][FILTER_UOM]: hass_isy_data[ISY994_NODES][platform].append(node) return True return False def _check_for_states_in_uom( hass_isy_data: dict, node: Union[Group, Node], single_platform: str = None, states_list: list = None, ) -> bool: """Check if a list of uoms matches two possible filters. This is for versions of the ISY firmware that report uoms as a list of all possible "human readable" states. This filter passes if all of the possible states fit inside the given filter. """ if not hasattr(node, "uom") or node.uom in [None, ""]: # Node doesn't have a uom (Scenes for example) return False # This only works for ISYv4 Firmware where uom is a list of states: if not isinstance(node.uom, list): return False node_uom = set(map(str.lower, node.uom)) if states_list: if node_uom == set(states_list): hass_isy_data[ISY994_NODES][single_platform].append(node) return True return False platforms = SUPPORTED_PLATFORMS if not single_platform else [single_platform] for platform in platforms: if node_uom == set(NODE_FILTERS[platform][FILTER_STATES]): hass_isy_data[ISY994_NODES][platform].append(node) return True return False def _is_sensor_a_binary_sensor(hass_isy_data: dict, node: Union[Group, Node]) -> bool: """Determine if the given sensor node should be a binary_sensor.""" if _check_for_node_def(hass_isy_data, node, single_platform=BINARY_SENSOR): return True if _check_for_insteon_type(hass_isy_data, node, single_platform=BINARY_SENSOR): return True # For the next two checks, we're providing our own set of uoms that # represent on/off devices. This is because we can only depend on these # checks in the context of already knowing that this is definitely a # sensor device. if _check_for_uom_id( hass_isy_data, node, single_platform=BINARY_SENSOR, uom_list=BINARY_SENSOR_UOMS ): return True if _check_for_states_in_uom( hass_isy_data, node, single_platform=BINARY_SENSOR, states_list=BINARY_SENSOR_ISY_STATES, ): return True return False def _categorize_nodes( hass_isy_data: dict, nodes: Nodes, ignore_identifier: str, sensor_identifier: str ) -> None: """Sort the nodes to their proper platforms.""" for (path, node) in nodes: ignored = ignore_identifier in path or ignore_identifier in node.name if ignored: # Don't import this node as a device at all continue if hasattr(node, "protocol") and node.protocol == PROTO_GROUP: hass_isy_data[ISY994_NODES][ISY_GROUP_PLATFORM].append(node) continue if sensor_identifier in path or sensor_identifier in node.name: # User has specified to treat this as a sensor. First we need to # determine if it should be a binary_sensor. if _is_sensor_a_binary_sensor(hass_isy_data, node): continue hass_isy_data[ISY994_NODES][SENSOR].append(node) continue # We have a bunch of different methods for determining the device type, # each of which works with different ISY firmware versions or device # family. The order here is important, from most reliable to least. if _check_for_node_def(hass_isy_data, node): continue if _check_for_insteon_type(hass_isy_data, node): continue if _check_for_zwave_cat(hass_isy_data, node): continue if _check_for_uom_id(hass_isy_data, node): continue if _check_for_states_in_uom(hass_isy_data, node): continue # Fallback as as sensor, e.g. for un-sortable items like NodeServer nodes. hass_isy_data[ISY994_NODES][SENSOR].append(node) def _categorize_programs(hass_isy_data: dict, programs: Programs) -> None: """Categorize the ISY994 programs.""" for platform in SUPPORTED_PROGRAM_PLATFORMS: folder = programs.get_by_name(f"{DEFAULT_PROGRAM_STRING}{platform}") if not folder: continue for dtype, _, node_id in folder.children: if dtype != TAG_FOLDER: continue entity_folder = folder[node_id] actions = None status = entity_folder.get_by_name(KEY_STATUS) if not status or not status.protocol == PROTO_PROGRAM: _LOGGER.warning( "Program %s entity '%s' not loaded, invalid/missing status program", platform, entity_folder.name, ) continue if platform != BINARY_SENSOR: actions = entity_folder.get_by_name(KEY_ACTIONS) if not actions or not actions.protocol == PROTO_PROGRAM: _LOGGER.warning( "Program %s entity '%s' not loaded, invalid/missing actions program", platform, entity_folder.name, ) continue entity = (entity_folder.name, status, actions) hass_isy_data[ISY994_PROGRAMS][platform].append(entity) def _categorize_variables( hass_isy_data: dict, variables: Variables, identifier: str ) -> None: """Gather the ISY994 Variables to be added as sensors.""" try: var_to_add = [ (vtype, vname, vid) for (vtype, vname, vid) in variables.children if identifier in vname ] except KeyError as err: _LOGGER.error("Error adding ISY Variables: %s", err) return for vtype, vname, vid in var_to_add: hass_isy_data[ISY994_VARIABLES].append((vname, variables[vtype][vid])) async def migrate_old_unique_ids( hass: HomeAssistantType, platform: str, devices: Optional[List[Any]] ) -> None: """Migrate to new controller-specific unique ids.""" registry = await async_get_registry(hass) for device in devices: old_entity_id = registry.async_get_entity_id( platform, DOMAIN, device.old_unique_id ) if old_entity_id is not None: _LOGGER.debug( "Migrating unique_id from [%s] to [%s]", device.old_unique_id, device.unique_id, ) registry.async_update_entity(old_entity_id, new_unique_id=device.unique_id) old_entity_id_2 = registry.async_get_entity_id( platform, DOMAIN, device.unique_id.replace(":", "") ) if old_entity_id_2 is not None: _LOGGER.debug( "Migrating unique_id from [%s] to [%s]", device.unique_id.replace(":", ""), device.unique_id, ) registry.async_update_entity( old_entity_id_2, new_unique_id=device.unique_id ) def convert_isy_value_to_hass( value: Union[int, float, None], uom: str, precision: str, fallback_precision: Optional[int] = None, ) -> Union[float, int]: """Fix ISY Reported Values. ISY provides float values as an integer and precision component. Correct by shifting the decimal place left by the value of precision. (e.g. value=2345, prec="2" == 23.45) Insteon Thermostats report temperature in 0.5-deg precision as an int by sending a value of 2 times the Temp. Correct by dividing by 2 here. """ if value is None or value == ISY_VALUE_UNKNOWN: return None if uom in [UOM_DOUBLE_TEMP, UOM_ISYV4_DEGREES]: return round(float(value) / 2.0, 1) if precision != "0": return round(float(value) / 10 ** int(precision), int(precision)) if fallback_precision: return round(float(value), fallback_precision) return value
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/isy994/helpers.py
"""Counter for the days until an HTTPS (TLS) certificate will expire.""" from datetime import timedelta import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( CONF_HOST, CONF_PORT, DEVICE_CLASS_TIMESTAMP, EVENT_HOMEASSISTANT_START, TIME_DAYS, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import async_call_later from homeassistant.helpers.update_coordinator import CoordinatorEntity from homeassistant.util import dt from .const import DEFAULT_PORT, DOMAIN SCAN_INTERVAL = timedelta(hours=12) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up certificate expiry sensor.""" @callback def schedule_import(_): """Schedule delayed import after HA is fully started.""" async_call_later(hass, 10, do_import) @callback def do_import(_): """Process YAML import.""" hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config) ) ) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, schedule_import) async def async_setup_entry(hass, entry, async_add_entities): """Add cert-expiry entry.""" coordinator = hass.data[DOMAIN][entry.entry_id] sensors = [ SSLCertificateDays(coordinator), SSLCertificateTimestamp(coordinator), ] async_add_entities(sensors, True) class CertExpiryEntity(CoordinatorEntity): """Defines a base Cert Expiry entity.""" @property def icon(self): """Icon to use in the frontend, if any.""" return "mdi:certificate" @property def device_state_attributes(self): """Return additional sensor state attributes.""" return { "is_valid": self.coordinator.is_cert_valid, "error": str(self.coordinator.cert_error), } class SSLCertificateDays(CertExpiryEntity): """Implementation of the Cert Expiry days sensor.""" @property def name(self): """Return the name of the sensor.""" return f"Cert Expiry ({self.coordinator.name})" @property def state(self): """Return the state of the sensor.""" if not self.coordinator.is_cert_valid: return 0 expiry = self.coordinator.data - dt.utcnow() return expiry.days @property def unique_id(self): """Return a unique id for the sensor.""" return f"{self.coordinator.host}:{self.coordinator.port}" @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" return TIME_DAYS class SSLCertificateTimestamp(CertExpiryEntity): """Implementation of the Cert Expiry timestamp sensor.""" @property def device_class(self): """Return the device class of the sensor.""" return DEVICE_CLASS_TIMESTAMP @property def name(self): """Return the name of the sensor.""" return f"Cert Expiry Timestamp ({self.coordinator.name})" @property def state(self): """Return the state of the sensor.""" if self.coordinator.data: return self.coordinator.data.isoformat() return None @property def unique_id(self): """Return a unique id for the sensor.""" return f"{self.coordinator.host}:{self.coordinator.port}-timestamp"
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/cert_expiry/sensor.py
"""Platform for Time of Flight sensor VL53L1X from STMicroelectronics.""" import asyncio from functools import partial from VL53L1X2 import VL53L1X # pylint: disable=import-error import voluptuous as vol from homeassistant.components import rpi_gpio from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, LENGTH_MILLIMETERS import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity CONF_I2C_ADDRESS = "i2c_address" CONF_I2C_BUS = "i2c_bus" CONF_XSHUT = "xshut" DEFAULT_NAME = "VL53L1X" DEFAULT_I2C_ADDRESS = 0x29 DEFAULT_I2C_BUS = 1 DEFAULT_XSHUT = 16 DEFAULT_RANGE = 2 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int), vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int), vol.Optional(CONF_XSHUT, default=DEFAULT_XSHUT): cv.positive_int, } ) def init_tof_0(xshut, sensor): """XSHUT port LOW resets the device.""" sensor.open() rpi_gpio.setup_output(xshut) rpi_gpio.write_output(xshut, 0) def init_tof_1(xshut): """XSHUT port HIGH enables the device.""" rpi_gpio.setup_output(xshut) rpi_gpio.write_output(xshut, 1) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Reset and initialize the VL53L1X ToF Sensor from STMicroelectronics.""" name = config.get(CONF_NAME) bus_number = config.get(CONF_I2C_BUS) i2c_address = config.get(CONF_I2C_ADDRESS) unit = LENGTH_MILLIMETERS xshut = config.get(CONF_XSHUT) sensor = await hass.async_add_executor_job(partial(VL53L1X, bus_number)) await hass.async_add_executor_job(init_tof_0, xshut, sensor) await asyncio.sleep(0.01) await hass.async_add_executor_job(init_tof_1, xshut) await asyncio.sleep(0.01) dev = [VL53L1XSensor(sensor, name, unit, i2c_address)] async_add_entities(dev, True) class VL53L1XSensor(Entity): """Implementation of VL53L1X sensor.""" def __init__(self, vl53l1x_sensor, name, unit, i2c_address): """Initialize the sensor.""" self._name = name self._unit_of_measurement = unit self.vl53l1x_sensor = vl53l1x_sensor self.i2c_address = i2c_address self._state = None self.init = True @property def name(self) -> str: """Return the name of the sensor.""" return self._name @property def state(self) -> int: """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self) -> str: """Return the unit of measurement.""" return self._unit_of_measurement def update(self): """Get the latest measurement and update state.""" if self.init: self.vl53l1x_sensor.add_sensor(self.i2c_address, self.i2c_address) self.init = False self.vl53l1x_sensor.start_ranging(self.i2c_address, DEFAULT_RANGE) self.vl53l1x_sensor.update(self.i2c_address) self.vl53l1x_sensor.stop_ranging(self.i2c_address) self._state = self.vl53l1x_sensor.distance
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/tof/sensor.py
"""Support for MySensors sensors.""" from homeassistant.components import mysensors from homeassistant.components.sensor import DOMAIN from homeassistant.const import ( CONDUCTIVITY, DEGREE, ELECTRICAL_CURRENT_AMPERE, ELECTRICAL_VOLT_AMPERE, ENERGY_KILO_WATT_HOUR, FREQUENCY_HERTZ, LENGTH_METERS, LIGHT_LUX, MASS_KILOGRAMS, PERCENTAGE, POWER_WATT, TEMP_CELSIUS, TEMP_FAHRENHEIT, VOLT, VOLUME_CUBIC_METERS, ) SENSORS = { "V_TEMP": [None, "mdi:thermometer"], "V_HUM": [PERCENTAGE, "mdi:water-percent"], "V_DIMMER": [PERCENTAGE, "mdi:percent"], "V_PERCENTAGE": [PERCENTAGE, "mdi:percent"], "V_PRESSURE": [None, "mdi:gauge"], "V_FORECAST": [None, "mdi:weather-partly-cloudy"], "V_RAIN": [None, "mdi:weather-rainy"], "V_RAINRATE": [None, "mdi:weather-rainy"], "V_WIND": [None, "mdi:weather-windy"], "V_GUST": [None, "mdi:weather-windy"], "V_DIRECTION": [DEGREE, "mdi:compass"], "V_WEIGHT": [MASS_KILOGRAMS, "mdi:weight-kilogram"], "V_DISTANCE": [LENGTH_METERS, "mdi:ruler"], "V_IMPEDANCE": ["ohm", None], "V_WATT": [POWER_WATT, None], "V_KWH": [ENERGY_KILO_WATT_HOUR, None], "V_LIGHT_LEVEL": [PERCENTAGE, "mdi:white-balance-sunny"], "V_FLOW": [LENGTH_METERS, "mdi:gauge"], "V_VOLUME": [f"{VOLUME_CUBIC_METERS}", None], "V_LEVEL": { "S_SOUND": ["dB", "mdi:volume-high"], "S_VIBRATION": [FREQUENCY_HERTZ, None], "S_LIGHT_LEVEL": [LIGHT_LUX, "mdi:white-balance-sunny"], }, "V_VOLTAGE": [VOLT, "mdi:flash"], "V_CURRENT": [ELECTRICAL_CURRENT_AMPERE, "mdi:flash-auto"], "V_PH": ["pH", None], "V_ORP": ["mV", None], "V_EC": [CONDUCTIVITY, None], "V_VAR": ["var", None], "V_VA": [ELECTRICAL_VOLT_AMPERE, None], } async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the MySensors platform for sensors.""" mysensors.setup_mysensors_platform( hass, DOMAIN, discovery_info, MySensorsSensor, async_add_entities=async_add_entities, ) class MySensorsSensor(mysensors.device.MySensorsEntity): """Representation of a MySensors Sensor child node.""" @property def force_update(self): """Return True if state updates should be forced. If True, a state change will be triggered anytime the state property is updated, not just when the value changes. """ return True @property def state(self): """Return the state of the device.""" return self._values.get(self.value_type) @property def icon(self): """Return the icon to use in the frontend, if any.""" _, icon = self._get_sensor_type() return icon @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" set_req = self.gateway.const.SetReq if ( float(self.gateway.protocol_version) >= 1.5 and set_req.V_UNIT_PREFIX in self._values ): return self._values[set_req.V_UNIT_PREFIX] unit, _ = self._get_sensor_type() return unit def _get_sensor_type(self): """Return list with unit and icon of sensor type.""" pres = self.gateway.const.Presentation set_req = self.gateway.const.SetReq SENSORS[set_req.V_TEMP.name][0] = ( TEMP_CELSIUS if self.gateway.metric else TEMP_FAHRENHEIT ) sensor_type = SENSORS.get(set_req(self.value_type).name, [None, None]) if isinstance(sensor_type, dict): sensor_type = sensor_type.get(pres(self.child_type).name, [None, None]) return sensor_type
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/mysensors/sensor.py
"""Sensor for Suez Water Consumption data.""" from datetime import timedelta import logging from pysuez import SuezClient from pysuez.client import PySuezError import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, VOLUME_LITERS import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) CONF_COUNTER_ID = "counter_id" SCAN_INTERVAL = timedelta(hours=12) COMPONENT_ICON = "mdi:water-pump" COMPONENT_NAME = "Suez Water Client" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_COUNTER_ID): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the sensor platform.""" username = config[CONF_USERNAME] password = config[CONF_PASSWORD] counter_id = config[CONF_COUNTER_ID] try: client = SuezClient(username, password, counter_id) if not client.check_credentials(): _LOGGER.warning("Wrong username and/or password") return except PySuezError: _LOGGER.warning("Unable to create Suez Client") return add_entities([SuezSensor(client)], True) class SuezSensor(Entity): """Representation of a Sensor.""" def __init__(self, client): """Initialize the data object.""" self._attributes = {} self._state = None self._available = None self.client = client @property def name(self): """Return the name of the sensor.""" return COMPONENT_NAME @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement.""" return VOLUME_LITERS @property def device_state_attributes(self): """Return the state attributes.""" return self._attributes @property def icon(self): """Return the icon of the sensor.""" return COMPONENT_ICON def _fetch_data(self): """Fetch latest data from Suez.""" try: self.client.update() # _state holds the volume of consumed water during previous day self._state = self.client.state self._available = True self._attributes["attribution"] = self.client.attributes["attribution"] self._attributes["this_month_consumption"] = {} for item in self.client.attributes["thisMonthConsumption"]: self._attributes["this_month_consumption"][ item ] = self.client.attributes["thisMonthConsumption"][item] self._attributes["previous_month_consumption"] = {} for item in self.client.attributes["previousMonthConsumption"]: self._attributes["previous_month_consumption"][ item ] = self.client.attributes["previousMonthConsumption"][item] self._attributes["highest_monthly_consumption"] = self.client.attributes[ "highestMonthlyConsumption" ] self._attributes["last_year_overall"] = self.client.attributes[ "lastYearOverAll" ] self._attributes["this_year_overall"] = self.client.attributes[ "thisYearOverAll" ] self._attributes["history"] = {} for item in self.client.attributes["history"]: self._attributes["history"][item] = self.client.attributes["history"][ item ] except PySuezError: self._available = False _LOGGER.warning("Unable to fetch data") def update(self): """Return the latest collected data from Linky.""" self._fetch_data() _LOGGER.debug("Suez data state is: %s", self._state)
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/suez_water/sensor.py
"""Helpers for LCN component.""" import re import voluptuous as vol from homeassistant.const import CONF_NAME from .const import DEFAULT_NAME # Regex for address validation PATTERN_ADDRESS = re.compile( "^((?P<conn_id>\\w+)\\.)?s?(?P<seg_id>\\d+)\\.(?P<type>m|g)?(?P<id>\\d+)$" ) def get_connection(connections, connection_id=None): """Return the connection object from list.""" if connection_id is None: connection = connections[0] else: for connection in connections: if connection.connection_id == connection_id: break else: raise ValueError("Unknown connection_id.") return connection def has_unique_connection_names(connections): """Validate that all connection names are unique. Use 'pchk' as default connection_name (or add a numeric suffix if pchk' is already in use. """ for suffix, connection in enumerate(connections): connection_name = connection.get(CONF_NAME) if connection_name is None: if suffix == 0: connection[CONF_NAME] = DEFAULT_NAME else: connection[CONF_NAME] = f"{DEFAULT_NAME}{suffix:d}" schema = vol.Schema(vol.Unique()) schema([connection.get(CONF_NAME) for connection in connections]) return connections def is_address(value): """Validate the given address string. Examples for S000M005 at myhome: myhome.s000.m005 myhome.s0.m5 myhome.0.5 ("m" is implicit if missing) Examples for s000g011 myhome.0.g11 myhome.s0.g11 """ matcher = PATTERN_ADDRESS.match(value) if matcher: is_group = matcher.group("type") == "g" addr = (int(matcher.group("seg_id")), int(matcher.group("id")), is_group) conn_id = matcher.group("conn_id") return addr, conn_id raise vol.error.Invalid("Not a valid address string.") def is_relays_states_string(states_string): """Validate the given states string and return states list.""" if len(states_string) == 8: states = [] for state_string in states_string: if state_string == "1": state = "ON" elif state_string == "0": state = "OFF" elif state_string == "T": state = "TOGGLE" elif state_string == "-": state = "NOCHANGE" else: raise vol.error.Invalid("Not a valid relay state string.") states.append(state) return states raise vol.error.Invalid("Wrong length of relay state string.") def is_key_lock_states_string(states_string): """Validate the given states string and returns states list.""" if len(states_string) == 8: states = [] for state_string in states_string: if state_string == "1": state = "ON" elif state_string == "0": state = "OFF" elif state_string == "T": state = "TOGGLE" elif state_string == "-": state = "NOCHANGE" else: raise vol.error.Invalid("Not a valid key lock state string.") states.append(state) return states raise vol.error.Invalid("Wrong length of key lock state string.")
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/lcn/helpers.py
"""Util to handle processes.""" import subprocess def kill_subprocess(process: subprocess.Popen) -> None: """Force kill a subprocess and wait for it to exit.""" process.kill() process.communicate() process.wait() del process
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/util/process.py
"""Constants for the Coolmaster integration.""" from homeassistant.components.climate.const import ( HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, ) DATA_INFO = "info" DATA_COORDINATOR = "coordinator" DOMAIN = "coolmaster" DEFAULT_PORT = 10102 CONF_SUPPORTED_MODES = "supported_modes" AVAILABLE_MODES = [ HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_HEAT_COOL, HVAC_MODE_FAN_ONLY, ]
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/coolmaster/const.py
"""Adds config flow for AccuWeather.""" import asyncio from accuweather import AccuWeather, ApiError, InvalidApiKeyError, RequestsExceededError from aiohttp import ClientError from aiohttp.client_exceptions import ClientConnectorError from async_timeout import timeout import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from .const import CONF_FORECAST, DOMAIN # pylint:disable=unused-import class AccuWeatherFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Config flow for AccuWeather.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" # Under the terms of use of the API, one user can use one free API key. Due to # the small number of requests allowed, we only allow one integration instance. if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") errors = {} if user_input is not None: websession = async_get_clientsession(self.hass) try: async with timeout(10): accuweather = AccuWeather( user_input[CONF_API_KEY], websession, latitude=user_input[CONF_LATITUDE], longitude=user_input[CONF_LONGITUDE], ) await accuweather.async_get_location() except (ApiError, ClientConnectorError, asyncio.TimeoutError, ClientError): errors["base"] = "cannot_connect" except InvalidApiKeyError: errors[CONF_API_KEY] = "invalid_api_key" except RequestsExceededError: errors[CONF_API_KEY] = "requests_exceeded" else: await self.async_set_unique_id( accuweather.location_key, raise_on_progress=False ) return self.async_create_entry( title=user_input[CONF_NAME], data=user_input ) return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_API_KEY): str, vol.Optional( CONF_LATITUDE, default=self.hass.config.latitude ): cv.latitude, vol.Optional( CONF_LONGITUDE, default=self.hass.config.longitude ): cv.longitude, vol.Optional( CONF_NAME, default=self.hass.config.location_name ): str, } ), errors=errors, ) @staticmethod @callback def async_get_options_flow(config_entry): """Options callback for AccuWeather.""" return AccuWeatherOptionsFlowHandler(config_entry) class AccuWeatherOptionsFlowHandler(config_entries.OptionsFlow): """Config flow options for AccuWeather.""" def __init__(self, config_entry): """Initialize AccuWeather options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" return await self.async_step_user() async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Optional( CONF_FORECAST, default=self.config_entry.options.get(CONF_FORECAST, False), ): bool } ), )
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/accuweather/config_flow.py
"""Sensor for the CityBikes data.""" import asyncio from datetime import timedelta import logging import aiohttp import async_timeout import voluptuous as vol from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA from homeassistant.const import ( ATTR_ATTRIBUTION, ATTR_ID, ATTR_LATITUDE, ATTR_LOCATION, ATTR_LONGITUDE, ATTR_NAME, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, CONF_RADIUS, LENGTH_FEET, LENGTH_METERS, ) from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity, async_generate_entity_id from homeassistant.helpers.event import async_track_time_interval from homeassistant.util import distance, location _LOGGER = logging.getLogger(__name__) ATTR_EMPTY_SLOTS = "empty_slots" ATTR_EXTRA = "extra" ATTR_FREE_BIKES = "free_bikes" ATTR_NETWORK = "network" ATTR_NETWORKS_LIST = "networks" ATTR_STATIONS_LIST = "stations" ATTR_TIMESTAMP = "timestamp" ATTR_UID = "uid" CONF_NETWORK = "network" CONF_STATIONS_LIST = "stations" DEFAULT_ENDPOINT = "https://api.citybik.es/{uri}" PLATFORM = "citybikes" MONITORED_NETWORKS = "monitored-networks" NETWORKS_URI = "v2/networks" REQUEST_TIMEOUT = 5 # In seconds; argument to asyncio.timeout SCAN_INTERVAL = timedelta(minutes=5) # Timely, and doesn't suffocate the API STATIONS_URI = "v2/networks/{uid}?fields=network.stations" CITYBIKES_ATTRIBUTION = ( "Information provided by the CityBikes Project (https://citybik.es/#about)" ) CITYBIKES_NETWORKS = "citybikes_networks" PLATFORM_SCHEMA = vol.All( cv.has_at_least_one_key(CONF_RADIUS, CONF_STATIONS_LIST), PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=""): cv.string, vol.Optional(CONF_NETWORK): cv.string, vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude, vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude, vol.Optional(CONF_RADIUS, "station_filter"): cv.positive_int, vol.Optional(CONF_STATIONS_LIST, "station_filter"): vol.All( cv.ensure_list, vol.Length(min=1), [cv.string] ), } ), ) NETWORK_SCHEMA = vol.Schema( { vol.Required(ATTR_ID): cv.string, vol.Required(ATTR_NAME): cv.string, vol.Required(ATTR_LOCATION): vol.Schema( { vol.Required(ATTR_LATITUDE): cv.latitude, vol.Required(ATTR_LONGITUDE): cv.longitude, }, extra=vol.REMOVE_EXTRA, ), }, extra=vol.REMOVE_EXTRA, ) NETWORKS_RESPONSE_SCHEMA = vol.Schema( {vol.Required(ATTR_NETWORKS_LIST): [NETWORK_SCHEMA]} ) STATION_SCHEMA = vol.Schema( { vol.Required(ATTR_FREE_BIKES): cv.positive_int, vol.Required(ATTR_EMPTY_SLOTS): vol.Any(cv.positive_int, None), vol.Required(ATTR_LATITUDE): cv.latitude, vol.Required(ATTR_LONGITUDE): cv.longitude, vol.Required(ATTR_ID): cv.string, vol.Required(ATTR_NAME): cv.string, vol.Required(ATTR_TIMESTAMP): cv.string, vol.Optional(ATTR_EXTRA): vol.Schema( {vol.Optional(ATTR_UID): cv.string}, extra=vol.REMOVE_EXTRA ), }, extra=vol.REMOVE_EXTRA, ) STATIONS_RESPONSE_SCHEMA = vol.Schema( { vol.Required(ATTR_NETWORK): vol.Schema( {vol.Required(ATTR_STATIONS_LIST): [STATION_SCHEMA]}, extra=vol.REMOVE_EXTRA ) } ) class CityBikesRequestError(Exception): """Error to indicate a CityBikes API request has failed.""" async def async_citybikes_request(hass, uri, schema): """Perform a request to CityBikes API endpoint, and parse the response.""" try: session = async_get_clientsession(hass) with async_timeout.timeout(REQUEST_TIMEOUT): req = await session.get(DEFAULT_ENDPOINT.format(uri=uri)) json_response = await req.json() return schema(json_response) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Could not connect to CityBikes API endpoint") except ValueError: _LOGGER.error("Received non-JSON data from CityBikes API endpoint") except vol.Invalid as err: _LOGGER.error("Received unexpected JSON from CityBikes API endpoint: %s", err) raise CityBikesRequestError async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the CityBikes platform.""" if PLATFORM not in hass.data: hass.data[PLATFORM] = {MONITORED_NETWORKS: {}} latitude = config.get(CONF_LATITUDE, hass.config.latitude) longitude = config.get(CONF_LONGITUDE, hass.config.longitude) network_id = config.get(CONF_NETWORK) stations_list = set(config.get(CONF_STATIONS_LIST, [])) radius = config.get(CONF_RADIUS, 0) name = config[CONF_NAME] if not hass.config.units.is_metric: radius = distance.convert(radius, LENGTH_FEET, LENGTH_METERS) # Create a single instance of CityBikesNetworks. networks = hass.data.setdefault(CITYBIKES_NETWORKS, CityBikesNetworks(hass)) if not network_id: network_id = await networks.get_closest_network_id(latitude, longitude) if network_id not in hass.data[PLATFORM][MONITORED_NETWORKS]: network = CityBikesNetwork(hass, network_id) hass.data[PLATFORM][MONITORED_NETWORKS][network_id] = network hass.async_create_task(network.async_refresh()) async_track_time_interval(hass, network.async_refresh, SCAN_INTERVAL) else: network = hass.data[PLATFORM][MONITORED_NETWORKS][network_id] await network.ready.wait() devices = [] for station in network.stations: dist = location.distance( latitude, longitude, station[ATTR_LATITUDE], station[ATTR_LONGITUDE] ) station_id = station[ATTR_ID] station_uid = str(station.get(ATTR_EXTRA, {}).get(ATTR_UID, "")) if radius > dist or stations_list.intersection((station_id, station_uid)): if name: uid = "_".join([network.network_id, name, station_id]) else: uid = "_".join([network.network_id, station_id]) entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, uid, hass=hass) devices.append(CityBikesStation(network, station_id, entity_id)) async_add_entities(devices, True) class CityBikesNetworks: """Represent all CityBikes networks.""" def __init__(self, hass): """Initialize the networks instance.""" self.hass = hass self.networks = None self.networks_loading = asyncio.Condition() async def get_closest_network_id(self, latitude, longitude): """Return the id of the network closest to provided location.""" try: await self.networks_loading.acquire() if self.networks is None: networks = await async_citybikes_request( self.hass, NETWORKS_URI, NETWORKS_RESPONSE_SCHEMA ) self.networks = networks[ATTR_NETWORKS_LIST] result = None minimum_dist = None for network in self.networks: network_latitude = network[ATTR_LOCATION][ATTR_LATITUDE] network_longitude = network[ATTR_LOCATION][ATTR_LONGITUDE] dist = location.distance( latitude, longitude, network_latitude, network_longitude ) if minimum_dist is None or dist < minimum_dist: minimum_dist = dist result = network[ATTR_ID] return result except CityBikesRequestError as err: raise PlatformNotReady from err finally: self.networks_loading.release() class CityBikesNetwork: """Thin wrapper around a CityBikes network object.""" def __init__(self, hass, network_id): """Initialize the network object.""" self.hass = hass self.network_id = network_id self.stations = [] self.ready = asyncio.Event() async def async_refresh(self, now=None): """Refresh the state of the network.""" try: network = await async_citybikes_request( self.hass, STATIONS_URI.format(uid=self.network_id), STATIONS_RESPONSE_SCHEMA, ) self.stations = network[ATTR_NETWORK][ATTR_STATIONS_LIST] self.ready.set() except CityBikesRequestError as err: if now is not None: self.ready.clear() else: raise PlatformNotReady from err class CityBikesStation(Entity): """CityBikes API Sensor.""" def __init__(self, network, station_id, entity_id): """Initialize the sensor.""" self._network = network self._station_id = station_id self._station_data = {} self.entity_id = entity_id @property def state(self): """Return the state of the sensor.""" return self._station_data.get(ATTR_FREE_BIKES) @property def name(self): """Return the name of the sensor.""" return self._station_data.get(ATTR_NAME) async def async_update(self): """Update station state.""" for station in self._network.stations: if station[ATTR_ID] == self._station_id: self._station_data = station break @property def device_state_attributes(self): """Return the state attributes.""" if self._station_data: return { ATTR_ATTRIBUTION: CITYBIKES_ATTRIBUTION, ATTR_UID: self._station_data.get(ATTR_EXTRA, {}).get(ATTR_UID), ATTR_LATITUDE: self._station_data[ATTR_LATITUDE], ATTR_LONGITUDE: self._station_data[ATTR_LONGITUDE], ATTR_EMPTY_SLOTS: self._station_data[ATTR_EMPTY_SLOTS], ATTR_TIMESTAMP: self._station_data[ATTR_TIMESTAMP], } return {ATTR_ATTRIBUTION: CITYBIKES_ATTRIBUTION} @property def unit_of_measurement(self): """Return the unit of measurement.""" return "bikes" @property def icon(self): """Return the icon.""" return "mdi:bike"
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/citybikes/sensor.py
"""Config flow for NZBGet.""" import logging from typing import Any, Dict, Optional import voluptuous as vol from homeassistant.config_entries import CONN_CLASS_LOCAL_POLL, ConfigFlow, OptionsFlow from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_SCAN_INTERVAL, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL, ) from homeassistant.core import callback from homeassistant.helpers.typing import ConfigType, HomeAssistantType from .const import ( DEFAULT_NAME, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DEFAULT_SSL, DEFAULT_VERIFY_SSL, ) from .const import DOMAIN # pylint: disable=unused-import from .coordinator import NZBGetAPI, NZBGetAPIException _LOGGER = logging.getLogger(__name__) def validate_input(hass: HomeAssistantType, data: dict) -> Dict[str, Any]: """Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """ nzbget_api = NZBGetAPI( data[CONF_HOST], data.get(CONF_USERNAME), data.get(CONF_PASSWORD), data[CONF_SSL], data[CONF_VERIFY_SSL], data[CONF_PORT], ) nzbget_api.version() return True class NZBGetConfigFlow(ConfigFlow, domain=DOMAIN): """Handle a config flow for NZBGet.""" VERSION = 1 CONNECTION_CLASS = CONN_CLASS_LOCAL_POLL @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return NZBGetOptionsFlowHandler(config_entry) async def async_step_import( self, user_input: Optional[ConfigType] = None ) -> Dict[str, Any]: """Handle a flow initiated by configuration file.""" if CONF_SCAN_INTERVAL in user_input: user_input[CONF_SCAN_INTERVAL] = user_input[CONF_SCAN_INTERVAL].seconds return await self.async_step_user(user_input) async def async_step_user( self, user_input: Optional[ConfigType] = None ) -> Dict[str, Any]: """Handle a flow initiated by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") errors = {} if user_input is not None: if CONF_VERIFY_SSL not in user_input: user_input[CONF_VERIFY_SSL] = DEFAULT_VERIFY_SSL try: await self.hass.async_add_executor_job( validate_input, self.hass, user_input ) except NZBGetAPIException: errors["base"] = "cannot_connect" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") return self.async_abort(reason="unknown") else: return self.async_create_entry( title=user_input[CONF_HOST], data=user_input, ) data_schema = { vol.Required(CONF_HOST): str, vol.Optional(CONF_NAME, default=DEFAULT_NAME): str, vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str, vol.Optional(CONF_PORT, default=DEFAULT_PORT): int, vol.Optional(CONF_SSL, default=DEFAULT_SSL): bool, } if self.show_advanced_options: data_schema[ vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL) ] = bool return self.async_show_form( step_id="user", data_schema=vol.Schema(data_schema), errors=errors or {}, ) class NZBGetOptionsFlowHandler(OptionsFlow): """Handle NZBGet client options.""" def __init__(self, config_entry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input: Optional[ConfigType] = None): """Manage NZBGet options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) options = { vol.Optional( CONF_SCAN_INTERVAL, default=self.config_entry.options.get( CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL ), ): int, } return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/nzbget/config_flow.py
"""Support for LiteJet lights.""" import logging from homeassistant.components import litejet from homeassistant.components.light import ( ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, LightEntity, ) _LOGGER = logging.getLogger(__name__) ATTR_NUMBER = "number" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up lights for the LiteJet platform.""" litejet_ = hass.data["litejet_system"] devices = [] for i in litejet_.loads(): name = litejet_.get_load_name(i) if not litejet.is_ignored(hass, name): devices.append(LiteJetLight(hass, litejet_, i, name)) add_entities(devices, True) class LiteJetLight(LightEntity): """Representation of a single LiteJet light.""" def __init__(self, hass, lj, i, name): """Initialize a LiteJet light.""" self._hass = hass self._lj = lj self._index = i self._brightness = 0 self._name = name lj.on_load_activated(i, self._on_load_changed) lj.on_load_deactivated(i, self._on_load_changed) def _on_load_changed(self): """Handle state changes.""" _LOGGER.debug("Updating due to notification for %s", self._name) self.schedule_update_ha_state(True) @property def supported_features(self): """Flag supported features.""" return SUPPORT_BRIGHTNESS @property def name(self): """Return the light's name.""" return self._name @property def brightness(self): """Return the light's brightness.""" return self._brightness @property def is_on(self): """Return if the light is on.""" return self._brightness != 0 @property def should_poll(self): """Return that lights do not require polling.""" return False @property def device_state_attributes(self): """Return the device state attributes.""" return {ATTR_NUMBER: self._index} def turn_on(self, **kwargs): """Turn on the light.""" if ATTR_BRIGHTNESS in kwargs: brightness = int(kwargs[ATTR_BRIGHTNESS] / 255 * 99) self._lj.activate_load_at(self._index, brightness, 0) else: self._lj.activate_load(self._index) def turn_off(self, **kwargs): """Turn off the light.""" self._lj.deactivate_load(self._index) def update(self): """Retrieve the light's brightness from the LiteJet system.""" self._brightness = self._lj.get_load_level(self._index) / 99 * 255
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/litejet/light.py
"""Support for Tado sensors for each zone.""" import logging from homeassistant.config_entries import ConfigEntry from homeassistant.const import PERCENTAGE, TEMP_CELSIUS from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity import Entity from .const import ( DATA, DEFAULT_NAME, DOMAIN, SIGNAL_TADO_UPDATE_RECEIVED, TADO_BRIDGE, TYPE_AIR_CONDITIONING, TYPE_HEATING, TYPE_HOT_WATER, ) from .entity import TadoZoneEntity _LOGGER = logging.getLogger(__name__) ZONE_SENSORS = { TYPE_HEATING: [ "temperature", "humidity", "power", "link", "heating", "tado mode", "overlay", "early start", "open window", ], TYPE_AIR_CONDITIONING: [ "temperature", "humidity", "power", "link", "ac", "tado mode", "overlay", "open window", ], TYPE_HOT_WATER: ["power", "link", "tado mode", "overlay"], } DEVICE_SENSORS = ["tado bridge status"] async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities ): """Set up the Tado sensor platform.""" tado = hass.data[DOMAIN][entry.entry_id][DATA] # Create zone sensors zones = tado.zones devices = tado.devices entities = [] for zone in zones: zone_type = zone["type"] if zone_type not in ZONE_SENSORS: _LOGGER.warning("Unknown zone type skipped: %s", zone_type) continue entities.extend( [ TadoZoneSensor( tado, zone["name"], zone["id"], variable, zone["devices"][0] ) for variable in ZONE_SENSORS[zone_type] ] ) # Create device sensors for device in devices: entities.extend( [ TadoDeviceSensor(tado, device["name"], device["id"], variable, device) for variable in DEVICE_SENSORS ] ) if entities: async_add_entities(entities, True) class TadoZoneSensor(TadoZoneEntity, Entity): """Representation of a tado Sensor.""" def __init__(self, tado, zone_name, zone_id, zone_variable, device_info): """Initialize of the Tado Sensor.""" self._tado = tado super().__init__(zone_name, device_info, tado.device_id, zone_id) self.zone_id = zone_id self.zone_variable = zone_variable self._unique_id = f"{zone_variable} {zone_id} {tado.device_id}" self._state = None self._state_attributes = None self._tado_zone_data = None async def async_added_to_hass(self): """Register for sensor updates.""" self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_TADO_UPDATE_RECEIVED.format( self._tado.device_id, "zone", self.zone_id ), self._async_update_callback, ) ) self._async_update_zone_data() @property def unique_id(self): """Return the unique id.""" return self._unique_id @property def name(self): """Return the name of the sensor.""" return f"{self.zone_name} {self.zone_variable}" @property def state(self): """Return the state of the sensor.""" return self._state @property def device_state_attributes(self): """Return the state attributes.""" return self._state_attributes @property def unit_of_measurement(self): """Return the unit of measurement.""" if self.zone_variable == "temperature": return self.hass.config.units.temperature_unit if self.zone_variable == "humidity": return PERCENTAGE if self.zone_variable == "heating": return PERCENTAGE if self.zone_variable == "ac": return None @property def icon(self): """Icon for the sensor.""" if self.zone_variable == "temperature": return "mdi:thermometer" if self.zone_variable == "humidity": return "mdi:water-percent" @callback def _async_update_callback(self): """Update and write state.""" self._async_update_zone_data() self.async_write_ha_state() @callback def _async_update_zone_data(self): """Handle update callbacks.""" try: self._tado_zone_data = self._tado.data["zone"][self.zone_id] except KeyError: return if self.zone_variable == "temperature": self._state = self.hass.config.units.temperature( self._tado_zone_data.current_temp, TEMP_CELSIUS ) self._state_attributes = { "time": self._tado_zone_data.current_temp_timestamp, "setting": 0, # setting is used in climate device } elif self.zone_variable == "humidity": self._state = self._tado_zone_data.current_humidity self._state_attributes = { "time": self._tado_zone_data.current_humidity_timestamp } elif self.zone_variable == "power": self._state = self._tado_zone_data.power elif self.zone_variable == "link": self._state = self._tado_zone_data.link elif self.zone_variable == "heating": self._state = self._tado_zone_data.heating_power_percentage self._state_attributes = { "time": self._tado_zone_data.heating_power_timestamp } elif self.zone_variable == "ac": self._state = self._tado_zone_data.ac_power self._state_attributes = {"time": self._tado_zone_data.ac_power_timestamp} elif self.zone_variable == "tado bridge status": self._state = self._tado_zone_data.connection elif self.zone_variable == "tado mode": self._state = self._tado_zone_data.tado_mode elif self.zone_variable == "overlay": self._state = self._tado_zone_data.overlay_active self._state_attributes = ( {"termination": self._tado_zone_data.overlay_termination_type} if self._tado_zone_data.overlay_active else {} ) elif self.zone_variable == "early start": self._state = self._tado_zone_data.preparation elif self.zone_variable == "open window": self._state = bool( self._tado_zone_data.open_window or self._tado_zone_data.open_window_detected ) self._state_attributes = self._tado_zone_data.open_window_attr class TadoDeviceSensor(Entity): """Representation of a tado Sensor.""" def __init__(self, tado, device_name, device_id, device_variable, device_info): """Initialize of the Tado Sensor.""" self._tado = tado self._device_info = device_info self.device_name = device_name self.device_id = device_id self.device_variable = device_variable self._unique_id = f"{device_variable} {device_id} {tado.device_id}" self._state = None self._state_attributes = None self._tado_device_data = None async def async_added_to_hass(self): """Register for sensor updates.""" self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_TADO_UPDATE_RECEIVED.format( self._tado.device_id, "device", self.device_id ), self._async_update_callback, ) ) self._async_update_device_data() @property def unique_id(self): """Return the unique id.""" return self._unique_id @property def name(self): """Return the name of the sensor.""" return f"{self.device_name} {self.device_variable}" @property def state(self): """Return the state of the sensor.""" return self._state @property def should_poll(self): """Do not poll.""" return False @callback def _async_update_callback(self): """Update and write state.""" self._async_update_device_data() self.async_write_ha_state() @callback def _async_update_device_data(self): """Handle update callbacks.""" try: data = self._tado.data["device"][self.device_id] except KeyError: return if self.device_variable == "tado bridge status": self._state = data.get("connectionState", {}).get("value", False) @property def device_info(self): """Return the device_info of the device.""" return { "identifiers": {(DOMAIN, self.device_id)}, "name": self.device_name, "manufacturer": DEFAULT_NAME, "model": TADO_BRIDGE, }
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/tado/sensor.py
"""Support for HomeMatic devices.""" from datetime import datetime from functools import partial import logging from pyhomematic import HMConnection import voluptuous as vol from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_MODE, ATTR_NAME, CONF_HOST, CONF_HOSTS, CONF_PASSWORD, CONF_PLATFORM, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv from .const import ( ATTR_ADDRESS, ATTR_CHANNEL, ATTR_DEVICE_TYPE, ATTR_DISCOVER_DEVICES, ATTR_DISCOVERY_TYPE, ATTR_ERRORCODE, ATTR_INTERFACE, ATTR_LOW_BAT, ATTR_LOWBAT, ATTR_MESSAGE, ATTR_PARAM, ATTR_PARAMSET, ATTR_PARAMSET_KEY, ATTR_RX_MODE, ATTR_TIME, ATTR_UNIQUE_ID, ATTR_VALUE, ATTR_VALUE_TYPE, CONF_CALLBACK_IP, CONF_CALLBACK_PORT, CONF_INTERFACES, CONF_JSONPORT, CONF_LOCAL_IP, CONF_LOCAL_PORT, CONF_PATH, CONF_PORT, CONF_RESOLVENAMES, CONF_RESOLVENAMES_OPTIONS, DATA_CONF, DATA_HOMEMATIC, DATA_STORE, DISCOVER_BATTERY, DISCOVER_BINARY_SENSORS, DISCOVER_CLIMATE, DISCOVER_COVER, DISCOVER_LIGHTS, DISCOVER_LOCKS, DISCOVER_SENSORS, DISCOVER_SWITCHES, DOMAIN, EVENT_ERROR, EVENT_IMPULSE, EVENT_KEYPRESS, HM_DEVICE_TYPES, HM_IGNORE_DISCOVERY_NODE, HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS, HM_IMPULSE_EVENTS, HM_PRESS_EVENTS, SERVICE_PUT_PARAMSET, SERVICE_RECONNECT, SERVICE_SET_DEVICE_VALUE, SERVICE_SET_INSTALL_MODE, SERVICE_SET_VARIABLE_VALUE, SERVICE_VIRTUALKEY, ) from .entity import HMHub _LOGGER = logging.getLogger(__name__) DEFAULT_LOCAL_IP = "0.0.0.0" DEFAULT_LOCAL_PORT = 0 DEFAULT_RESOLVENAMES = False DEFAULT_JSONPORT = 80 DEFAULT_PORT = 2001 DEFAULT_PATH = "" DEFAULT_USERNAME = "Admin" DEFAULT_PASSWORD = "" DEFAULT_SSL = False DEFAULT_VERIFY_SSL = False DEFAULT_CHANNEL = 1 DEVICE_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): "homematic", vol.Required(ATTR_NAME): cv.string, vol.Required(ATTR_ADDRESS): cv.string, vol.Required(ATTR_INTERFACE): cv.string, vol.Optional(ATTR_DEVICE_TYPE): cv.string, vol.Optional(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int), vol.Optional(ATTR_PARAM): cv.string, vol.Optional(ATTR_UNIQUE_ID): cv.string, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_INTERFACES, default={}): { cv.match_all: { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string, vol.Optional( CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES ): vol.In(CONF_RESOLVENAMES_OPTIONS), vol.Optional(CONF_JSONPORT, default=DEFAULT_JSONPORT): cv.port, vol.Optional( CONF_USERNAME, default=DEFAULT_USERNAME ): cv.string, vol.Optional( CONF_PASSWORD, default=DEFAULT_PASSWORD ): cv.string, vol.Optional(CONF_CALLBACK_IP): cv.string, vol.Optional(CONF_CALLBACK_PORT): cv.port, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, vol.Optional( CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL ): cv.boolean, } }, vol.Optional(CONF_HOSTS, default={}): { cv.match_all: { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional( CONF_USERNAME, default=DEFAULT_USERNAME ): cv.string, vol.Optional( CONF_PASSWORD, default=DEFAULT_PASSWORD ): cv.string, } }, vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string, vol.Optional(CONF_LOCAL_PORT): cv.port, } ) }, extra=vol.ALLOW_EXTRA, ) SCHEMA_SERVICE_VIRTUALKEY = vol.Schema( { vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper), vol.Required(ATTR_CHANNEL): vol.Coerce(int), vol.Required(ATTR_PARAM): cv.string, vol.Optional(ATTR_INTERFACE): cv.string, } ) SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema( { vol.Required(ATTR_NAME): cv.string, vol.Required(ATTR_VALUE): cv.match_all, vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, } ) SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema( { vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper), vol.Required(ATTR_CHANNEL): vol.Coerce(int), vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper), vol.Required(ATTR_VALUE): cv.match_all, vol.Optional(ATTR_VALUE_TYPE): vol.In( ["boolean", "dateTime.iso8601", "double", "int", "string"] ), vol.Optional(ATTR_INTERFACE): cv.string, } ) SCHEMA_SERVICE_RECONNECT = vol.Schema({}) SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema( { vol.Required(ATTR_INTERFACE): cv.string, vol.Optional(ATTR_TIME, default=60): cv.positive_int, vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])), vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper), } ) SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema( { vol.Required(ATTR_INTERFACE): cv.string, vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper), vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper), vol.Required(ATTR_PARAMSET): dict, vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper), } ) def setup(hass, config): """Set up the Homematic component.""" conf = config[DOMAIN] hass.data[DATA_CONF] = remotes = {} hass.data[DATA_STORE] = set() # Create hosts-dictionary for pyhomematic for rname, rconfig in conf[CONF_INTERFACES].items(): remotes[rname] = { "ip": rconfig.get(CONF_HOST), "port": rconfig.get(CONF_PORT), "path": rconfig.get(CONF_PATH), "resolvenames": rconfig.get(CONF_RESOLVENAMES), "jsonport": rconfig.get(CONF_JSONPORT), "username": rconfig.get(CONF_USERNAME), "password": rconfig.get(CONF_PASSWORD), "callbackip": rconfig.get(CONF_CALLBACK_IP), "callbackport": rconfig.get(CONF_CALLBACK_PORT), "ssl": rconfig[CONF_SSL], "verify_ssl": rconfig.get(CONF_VERIFY_SSL), "connect": True, } for sname, sconfig in conf[CONF_HOSTS].items(): remotes[sname] = { "ip": sconfig.get(CONF_HOST), "port": sconfig[CONF_PORT], "username": sconfig.get(CONF_USERNAME), "password": sconfig.get(CONF_PASSWORD), "connect": False, } # Create server thread bound_system_callback = partial(_system_callback_handler, hass, config) hass.data[DATA_HOMEMATIC] = homematic = HMConnection( local=config[DOMAIN].get(CONF_LOCAL_IP), localport=config[DOMAIN].get(CONF_LOCAL_PORT, DEFAULT_LOCAL_PORT), remotes=remotes, systemcallback=bound_system_callback, interface_id="homeassistant", ) # Start server thread, connect to hosts, initialize to receive events homematic.start() # Stops server when Home Assistant is shutting down hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop) # Init homematic hubs entity_hubs = [] for hub_name in conf[CONF_HOSTS].keys(): entity_hubs.append(HMHub(hass, homematic, hub_name)) def _hm_service_virtualkey(service): """Service to handle virtualkey servicecalls.""" address = service.data.get(ATTR_ADDRESS) channel = service.data.get(ATTR_CHANNEL) param = service.data.get(ATTR_PARAM) # Device not found hmdevice = _device_from_servicecall(hass, service) if hmdevice is None: _LOGGER.error("%s not found for service virtualkey!", address) return # Parameter doesn't exist for device if param not in hmdevice.ACTIONNODE: _LOGGER.error("%s not datapoint in hm device %s", param, address) return # Channel doesn't exist for device if channel not in hmdevice.ACTIONNODE[param]: _LOGGER.error("%i is not a channel in hm device %s", channel, address) return # Call parameter hmdevice.actionNodeData(param, True, channel) hass.services.register( DOMAIN, SERVICE_VIRTUALKEY, _hm_service_virtualkey, schema=SCHEMA_SERVICE_VIRTUALKEY, ) def _service_handle_value(service): """Service to call setValue method for HomeMatic system variable.""" entity_ids = service.data.get(ATTR_ENTITY_ID) name = service.data[ATTR_NAME] value = service.data[ATTR_VALUE] if entity_ids: entities = [ entity for entity in entity_hubs if entity.entity_id in entity_ids ] else: entities = entity_hubs if not entities: _LOGGER.error("No HomeMatic hubs available") return for hub in entities: hub.hm_set_variable(name, value) hass.services.register( DOMAIN, SERVICE_SET_VARIABLE_VALUE, _service_handle_value, schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE, ) def _service_handle_reconnect(service): """Service to reconnect all HomeMatic hubs.""" homematic.reconnect() hass.services.register( DOMAIN, SERVICE_RECONNECT, _service_handle_reconnect, schema=SCHEMA_SERVICE_RECONNECT, ) def _service_handle_device(service): """Service to call setValue method for HomeMatic devices.""" address = service.data.get(ATTR_ADDRESS) channel = service.data.get(ATTR_CHANNEL) param = service.data.get(ATTR_PARAM) value = service.data.get(ATTR_VALUE) value_type = service.data.get(ATTR_VALUE_TYPE) # Convert value into correct XML-RPC Type. # https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy if value_type: if value_type == "int": value = int(value) elif value_type == "double": value = float(value) elif value_type == "boolean": value = bool(value) elif value_type == "dateTime.iso8601": value = datetime.strptime(value, "%Y%m%dT%H:%M:%S") else: # Default is 'string' value = str(value) # Device not found hmdevice = _device_from_servicecall(hass, service) if hmdevice is None: _LOGGER.error("%s not found!", address) return hmdevice.setValue(param, value, channel) hass.services.register( DOMAIN, SERVICE_SET_DEVICE_VALUE, _service_handle_device, schema=SCHEMA_SERVICE_SET_DEVICE_VALUE, ) def _service_handle_install_mode(service): """Service to set interface into install mode.""" interface = service.data.get(ATTR_INTERFACE) mode = service.data.get(ATTR_MODE) time = service.data.get(ATTR_TIME) address = service.data.get(ATTR_ADDRESS) homematic.setInstallMode(interface, t=time, mode=mode, address=address) hass.services.register( DOMAIN, SERVICE_SET_INSTALL_MODE, _service_handle_install_mode, schema=SCHEMA_SERVICE_SET_INSTALL_MODE, ) def _service_put_paramset(service): """Service to call the putParamset method on a HomeMatic connection.""" interface = service.data.get(ATTR_INTERFACE) address = service.data.get(ATTR_ADDRESS) paramset_key = service.data.get(ATTR_PARAMSET_KEY) # When passing in the paramset from a YAML file we get an OrderedDict # here instead of a dict, so add this explicit cast. # The service schema makes sure that this cast works. paramset = dict(service.data.get(ATTR_PARAMSET)) rx_mode = service.data.get(ATTR_RX_MODE) _LOGGER.debug( "Calling putParamset: %s, %s, %s, %s, %s", interface, address, paramset_key, paramset, rx_mode, ) homematic.putParamset(interface, address, paramset_key, paramset, rx_mode) hass.services.register( DOMAIN, SERVICE_PUT_PARAMSET, _service_put_paramset, schema=SCHEMA_SERVICE_PUT_PARAMSET, ) return True def _system_callback_handler(hass, config, src, *args): """System callback handler.""" # New devices available at hub if src == "newDevices": (interface_id, dev_descriptions) = args interface = interface_id.split("-")[-1] # Device support active? if not hass.data[DATA_CONF][interface]["connect"]: return addresses = [] for dev in dev_descriptions: address = dev["ADDRESS"].split(":")[0] if address not in hass.data[DATA_STORE]: hass.data[DATA_STORE].add(address) addresses.append(address) # Register EVENTS # Search all devices with an EVENTNODE that includes data bound_event_callback = partial(_hm_event_handler, hass, interface) for dev in addresses: hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev) if hmdevice.EVENTNODE: hmdevice.setEventCallback(callback=bound_event_callback, bequeath=True) # Create Home Assistant entities if addresses: for component_name, discovery_type in ( ("switch", DISCOVER_SWITCHES), ("light", DISCOVER_LIGHTS), ("cover", DISCOVER_COVER), ("binary_sensor", DISCOVER_BINARY_SENSORS), ("sensor", DISCOVER_SENSORS), ("climate", DISCOVER_CLIMATE), ("lock", DISCOVER_LOCKS), ("binary_sensor", DISCOVER_BATTERY), ): # Get all devices of a specific type found_devices = _get_devices(hass, discovery_type, addresses, interface) # When devices of this type are found # they are setup in Home Assistant and a discovery event is fired if found_devices: discovery.load_platform( hass, component_name, DOMAIN, { ATTR_DISCOVER_DEVICES: found_devices, ATTR_DISCOVERY_TYPE: discovery_type, }, config, ) # Homegear error message elif src == "error": _LOGGER.error("Error: %s", args) (interface_id, errorcode, message) = args hass.bus.fire(EVENT_ERROR, {ATTR_ERRORCODE: errorcode, ATTR_MESSAGE: message}) def _get_devices(hass, discovery_type, keys, interface): """Get the HomeMatic devices for given discovery_type.""" device_arr = [] for key in keys: device = hass.data[DATA_HOMEMATIC].devices[interface][key] class_name = device.__class__.__name__ metadata = {} # Class not supported by discovery type if ( discovery_type != DISCOVER_BATTERY and class_name not in HM_DEVICE_TYPES[discovery_type] ): continue # Load metadata needed to generate a parameter list if discovery_type == DISCOVER_SENSORS: metadata.update(device.SENSORNODE) elif discovery_type == DISCOVER_BINARY_SENSORS: metadata.update(device.BINARYNODE) elif discovery_type == DISCOVER_BATTERY: if ATTR_LOWBAT in device.ATTRIBUTENODE: metadata.update({ATTR_LOWBAT: device.ATTRIBUTENODE[ATTR_LOWBAT]}) elif ATTR_LOW_BAT in device.ATTRIBUTENODE: metadata.update({ATTR_LOW_BAT: device.ATTRIBUTENODE[ATTR_LOW_BAT]}) else: continue else: metadata.update({None: device.ELEMENT}) # Generate options for 1...n elements with 1...n parameters for param, channels in metadata.items(): if ( param in HM_IGNORE_DISCOVERY_NODE and class_name not in HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, []) ): continue if discovery_type == DISCOVER_SWITCHES and class_name == "IPKeySwitchLevel": channels.remove(8) channels.remove(12) if discovery_type == DISCOVER_LIGHTS and class_name == "IPKeySwitchLevel": channels.remove(4) # Add devices _LOGGER.debug( "%s: Handling %s: %s: %s", discovery_type, key, param, channels ) for channel in channels: name = _create_ha_id( name=device.NAME, channel=channel, param=param, count=len(channels) ) unique_id = _create_ha_id( name=key, channel=channel, param=param, count=len(channels) ) device_dict = { CONF_PLATFORM: "homematic", ATTR_ADDRESS: key, ATTR_INTERFACE: interface, ATTR_NAME: name, ATTR_DEVICE_TYPE: class_name, ATTR_CHANNEL: channel, ATTR_UNIQUE_ID: unique_id, } if param is not None: device_dict[ATTR_PARAM] = param # Add new device try: DEVICE_SCHEMA(device_dict) device_arr.append(device_dict) except vol.MultipleInvalid as err: _LOGGER.error("Invalid device config: %s", str(err)) return device_arr def _create_ha_id(name, channel, param, count): """Generate a unique entity id.""" # HMDevice is a simple device if count == 1 and param is None: return name # Has multiple elements/channels if count > 1 and param is None: return f"{name} {channel}" # With multiple parameters on first channel if count == 1 and param is not None: return f"{name} {param}" # Multiple parameters with multiple channels if count > 1 and param is not None: return f"{name} {channel} {param}" def _hm_event_handler(hass, interface, device, caller, attribute, value): """Handle all pyhomematic device events.""" try: channel = int(device.split(":")[1]) address = device.split(":")[0] hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address) except (TypeError, ValueError): _LOGGER.error("Event handling channel convert error!") return # Return if not an event supported by device if attribute not in hmdevice.EVENTNODE: return _LOGGER.debug("Event %s for %s channel %i", attribute, hmdevice.NAME, channel) # Keypress event if attribute in HM_PRESS_EVENTS: hass.bus.fire( EVENT_KEYPRESS, {ATTR_NAME: hmdevice.NAME, ATTR_PARAM: attribute, ATTR_CHANNEL: channel}, ) return # Impulse event if attribute in HM_IMPULSE_EVENTS: hass.bus.fire(EVENT_IMPULSE, {ATTR_NAME: hmdevice.NAME, ATTR_CHANNEL: channel}) return _LOGGER.warning("Event is unknown and not forwarded") def _device_from_servicecall(hass, service): """Extract HomeMatic device from service call.""" address = service.data.get(ATTR_ADDRESS) interface = service.data.get(ATTR_INTERFACE) if address == "BIDCOS-RF": address = "BidCoS-RF" if interface: return hass.data[DATA_HOMEMATIC].devices[interface].get(address) for devices in hass.data[DATA_HOMEMATIC].devices.values(): if address in devices: return devices[address]
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/homematic/__init__.py
"""Locks on Zigbee Home Automation networks.""" import functools from zigpy.zcl.foundation import Status from homeassistant.components.lock import ( DOMAIN, STATE_LOCKED, STATE_UNLOCKED, LockEntity, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .core import discovery from .core.const import ( CHANNEL_DOORLOCK, DATA_ZHA, DATA_ZHA_DISPATCHERS, SIGNAL_ADD_ENTITIES, SIGNAL_ATTR_UPDATED, ) from .core.registries import ZHA_ENTITIES from .entity import ZhaEntity # The first state is Zigbee 'Not fully locked' STATE_LIST = [STATE_UNLOCKED, STATE_LOCKED, STATE_UNLOCKED] STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN) VALUE_TO_STATE = dict(enumerate(STATE_LIST)) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Zigbee Home Automation Door Lock from config entry.""" entities_to_create = hass.data[DATA_ZHA][DOMAIN] unsub = async_dispatcher_connect( hass, SIGNAL_ADD_ENTITIES, functools.partial( discovery.async_add_entities, async_add_entities, entities_to_create ), ) hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub) @STRICT_MATCH(channel_names=CHANNEL_DOORLOCK) class ZhaDoorLock(ZhaEntity, LockEntity): """Representation of a ZHA lock.""" def __init__(self, unique_id, zha_device, channels, **kwargs): """Init this sensor.""" super().__init__(unique_id, zha_device, channels, **kwargs) self._doorlock_channel = self.cluster_channels.get(CHANNEL_DOORLOCK) async def async_added_to_hass(self): """Run when about to be added to hass.""" await super().async_added_to_hass() self.async_accept_signal( self._doorlock_channel, SIGNAL_ATTR_UPDATED, self.async_set_state ) @callback def async_restore_last_state(self, last_state): """Restore previous state.""" self._state = VALUE_TO_STATE.get(last_state.state, last_state.state) @property def is_locked(self) -> bool: """Return true if entity is locked.""" if self._state is None: return False return self._state == STATE_LOCKED @property def device_state_attributes(self): """Return state attributes.""" return self.state_attributes async def async_lock(self, **kwargs): """Lock the lock.""" result = await self._doorlock_channel.lock_door() if not isinstance(result, list) or result[0] is not Status.SUCCESS: self.error("Error with lock_door: %s", result) return self.async_write_ha_state() async def async_unlock(self, **kwargs): """Unlock the lock.""" result = await self._doorlock_channel.unlock_door() if not isinstance(result, list) or result[0] is not Status.SUCCESS: self.error("Error with unlock_door: %s", result) return self.async_write_ha_state() async def async_update(self): """Attempt to retrieve state from the lock.""" await super().async_update() await self.async_get_state() @callback def async_set_state(self, attr_id, attr_name, value): """Handle state update from channel.""" self._state = VALUE_TO_STATE.get(value, self._state) self.async_write_ha_state() async def async_get_state(self, from_cache=True): """Attempt to retrieve state from the lock.""" if self._doorlock_channel: state = await self._doorlock_channel.get_attribute_value( "lock_state", from_cache=from_cache ) if state is not None: self._state = VALUE_TO_STATE.get(state, self._state) async def refresh(self, time): """Call async_get_state at an interval.""" await self.async_get_state(from_cache=False)
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/zha/lock.py
"""Component for the Portuguese weather service - IPMA.""" from homeassistant.core import Config, HomeAssistant from .config_flow import IpmaFlowHandler # noqa: F401 from .const import DOMAIN # noqa: F401 DEFAULT_NAME = "ipma" async def async_setup(hass: HomeAssistant, config: Config) -> bool: """Set up configured IPMA.""" return True async def async_setup_entry(hass, config_entry): """Set up IPMA station as config entry.""" hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, "weather") ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" await hass.config_entries.async_forward_entry_unload(config_entry, "weather") return True
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/ipma/__init__.py
"""Support for LIFX Cloud scenes.""" import asyncio import logging from typing import Any import aiohttp from aiohttp.hdrs import AUTHORIZATION import async_timeout import voluptuous as vol from homeassistant.components.scene import Scene from homeassistant.const import ( CONF_PLATFORM, CONF_TIMEOUT, CONF_TOKEN, HTTP_OK, HTTP_UNAUTHORIZED, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_TIMEOUT = 10 PLATFORM_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): "lifx_cloud", vol.Required(CONF_TOKEN): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the scenes stored in the LIFX Cloud.""" token = config.get(CONF_TOKEN) timeout = config.get(CONF_TIMEOUT) headers = {AUTHORIZATION: f"Bearer {token}"} url = "https://api.lifx.com/v1/scenes" try: httpsession = async_get_clientsession(hass) with async_timeout.timeout(timeout): scenes_resp = await httpsession.get(url, headers=headers) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.exception("Error on %s", url) return False status = scenes_resp.status if status == HTTP_OK: data = await scenes_resp.json() devices = [LifxCloudScene(hass, headers, timeout, scene) for scene in data] async_add_entities(devices) return True if status == HTTP_UNAUTHORIZED: _LOGGER.error("Unauthorized (bad token?) on %s", url) return False _LOGGER.error("HTTP error %d on %s", scenes_resp.status, url) return False class LifxCloudScene(Scene): """Representation of a LIFX Cloud scene.""" def __init__(self, hass, headers, timeout, scene_data): """Initialize the scene.""" self.hass = hass self._headers = headers self._timeout = timeout self._name = scene_data["name"] self._uuid = scene_data["uuid"] @property def name(self): """Return the name of the scene.""" return self._name async def async_activate(self, **kwargs: Any) -> None: """Activate the scene.""" url = f"https://api.lifx.com/v1/scenes/scene_id:{self._uuid}/activate" try: httpsession = async_get_clientsession(self.hass) with async_timeout.timeout(self._timeout): await httpsession.put(url, headers=self._headers) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.exception("Error on %s", url)
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/lifx_cloud/scene.py
"""Support for Toon switches.""" from typing import Any from toonapi import ( ACTIVE_STATE_AWAY, ACTIVE_STATE_HOLIDAY, PROGRAM_STATE_OFF, PROGRAM_STATE_ON, ) from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from .const import ( ATTR_DEFAULT_ENABLED, ATTR_ICON, ATTR_INVERTED, ATTR_MEASUREMENT, ATTR_NAME, ATTR_SECTION, DOMAIN, SWITCH_ENTITIES, ) from .coordinator import ToonDataUpdateCoordinator from .helpers import toon_exception_handler from .models import ToonDisplayDeviceEntity, ToonEntity async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up a Toon switches based on a config entry.""" coordinator = hass.data[DOMAIN][entry.entry_id] async_add_entities( [ToonProgramSwitch(coordinator), ToonHolidayModeSwitch(coordinator)] ) class ToonSwitch(ToonEntity, SwitchEntity): """Defines an Toon switch.""" def __init__(self, coordinator: ToonDataUpdateCoordinator, *, key: str) -> None: """Initialize the Toon switch.""" self.key = key super().__init__( coordinator, enabled_default=SWITCH_ENTITIES[key][ATTR_DEFAULT_ENABLED], icon=SWITCH_ENTITIES[key][ATTR_ICON], name=SWITCH_ENTITIES[key][ATTR_NAME], ) @property def unique_id(self) -> str: """Return the unique ID for this binary sensor.""" agreement_id = self.coordinator.data.agreement.agreement_id return f"{agreement_id}_{self.key}" @property def is_on(self) -> bool: """Return the status of the binary sensor.""" section = getattr( self.coordinator.data, SWITCH_ENTITIES[self.key][ATTR_SECTION] ) value = getattr(section, SWITCH_ENTITIES[self.key][ATTR_MEASUREMENT]) if SWITCH_ENTITIES[self.key][ATTR_INVERTED]: return not value return value class ToonProgramSwitch(ToonSwitch, ToonDisplayDeviceEntity): """Defines a Toon program switch.""" def __init__(self, coordinator: ToonDataUpdateCoordinator) -> None: """Initialize the Toon program switch.""" super().__init__(coordinator, key="thermostat_program") @toon_exception_handler async def async_turn_off(self, **kwargs: Any) -> None: """Turn off the Toon program switch.""" await self.coordinator.toon.set_active_state( ACTIVE_STATE_AWAY, PROGRAM_STATE_OFF ) @toon_exception_handler async def async_turn_on(self, **kwargs: Any) -> None: """Turn on the Toon program switch.""" await self.coordinator.toon.set_active_state( ACTIVE_STATE_AWAY, PROGRAM_STATE_ON ) class ToonHolidayModeSwitch(ToonSwitch, ToonDisplayDeviceEntity): """Defines a Toon Holiday mode switch.""" def __init__(self, coordinator: ToonDataUpdateCoordinator) -> None: """Initialize the Toon holiday switch.""" super().__init__(coordinator, key="thermostat_holiday_mode") @toon_exception_handler async def async_turn_off(self, **kwargs: Any) -> None: """Turn off the Toon holiday mode switch.""" await self.coordinator.toon.set_active_state( ACTIVE_STATE_AWAY, PROGRAM_STATE_ON ) @toon_exception_handler async def async_turn_on(self, **kwargs: Any) -> None: """Turn on the Toon holiday mode switch.""" await self.coordinator.toon.set_active_state( ACTIVE_STATE_HOLIDAY, PROGRAM_STATE_OFF )
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/toon/switch.py
"""Support for raspihats board binary sensors.""" import logging import voluptuous as vol from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity from homeassistant.const import ( CONF_ADDRESS, CONF_DEVICE_CLASS, CONF_NAME, DEVICE_DEFAULT_NAME, ) import homeassistant.helpers.config_validation as cv from . import ( CONF_BOARD, CONF_CHANNELS, CONF_I2C_HATS, CONF_INDEX, CONF_INVERT_LOGIC, I2C_HAT_NAMES, I2C_HATS_MANAGER, I2CHatsException, ) _LOGGER = logging.getLogger(__name__) DEFAULT_INVERT_LOGIC = False DEFAULT_DEVICE_CLASS = None _CHANNELS_SCHEMA = vol.Schema( [ { vol.Required(CONF_INDEX): cv.positive_int, vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean, vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS): cv.string, } ] ) _I2C_HATS_SCHEMA = vol.Schema( [ { vol.Required(CONF_BOARD): vol.In(I2C_HAT_NAMES), vol.Required(CONF_ADDRESS): vol.Coerce(int), vol.Required(CONF_CHANNELS): _CHANNELS_SCHEMA, } ] ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_I2C_HATS): _I2C_HATS_SCHEMA} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the raspihats binary_sensor devices.""" I2CHatBinarySensor.I2C_HATS_MANAGER = hass.data[I2C_HATS_MANAGER] binary_sensors = [] i2c_hat_configs = config.get(CONF_I2C_HATS) for i2c_hat_config in i2c_hat_configs: address = i2c_hat_config[CONF_ADDRESS] board = i2c_hat_config[CONF_BOARD] try: I2CHatBinarySensor.I2C_HATS_MANAGER.register_board(board, address) for channel_config in i2c_hat_config[CONF_CHANNELS]: binary_sensors.append( I2CHatBinarySensor( address, channel_config[CONF_INDEX], channel_config[CONF_NAME], channel_config[CONF_INVERT_LOGIC], channel_config[CONF_DEVICE_CLASS], ) ) except I2CHatsException as ex: _LOGGER.error( "Failed to register %s I2CHat@%s %s", board, hex(address), str(ex) ) add_entities(binary_sensors) class I2CHatBinarySensor(BinarySensorEntity): """Representation of a binary sensor that uses a I2C-HAT digital input.""" I2C_HATS_MANAGER = None def __init__(self, address, channel, name, invert_logic, device_class): """Initialize the raspihats sensor.""" self._address = address self._channel = channel self._name = name or DEVICE_DEFAULT_NAME self._invert_logic = invert_logic self._device_class = device_class self._state = self.I2C_HATS_MANAGER.read_di(self._address, self._channel) def online_callback(): """Call fired when board is online.""" self.schedule_update_ha_state() self.I2C_HATS_MANAGER.register_online_callback( self._address, self._channel, online_callback ) def edge_callback(state): """Read digital input state.""" self._state = state self.schedule_update_ha_state() self.I2C_HATS_MANAGER.register_di_callback( self._address, self._channel, edge_callback ) @property def device_class(self): """Return the class of this sensor.""" return self._device_class @property def name(self): """Return the name of this sensor.""" return self._name @property def should_poll(self): """No polling needed for this sensor.""" return False @property def is_on(self): """Return the state of this sensor.""" return self._state != self._invert_logic
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/raspihats/binary_sensor.py
"""Support for Android IP Webcam sensors.""" from homeassistant.helpers.icon import icon_for_battery_level from . import ( CONF_HOST, CONF_NAME, CONF_SENSORS, DATA_IP_WEBCAM, ICON_MAP, KEY_MAP, AndroidIPCamEntity, ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the IP Webcam Sensor.""" if discovery_info is None: return host = discovery_info[CONF_HOST] name = discovery_info[CONF_NAME] sensors = discovery_info[CONF_SENSORS] ipcam = hass.data[DATA_IP_WEBCAM][host] all_sensors = [] for sensor in sensors: all_sensors.append(IPWebcamSensor(name, host, ipcam, sensor)) async_add_entities(all_sensors, True) class IPWebcamSensor(AndroidIPCamEntity): """Representation of a IP Webcam sensor.""" def __init__(self, name, host, ipcam, sensor): """Initialize the sensor.""" super().__init__(host, ipcam) self._sensor = sensor self._mapped_name = KEY_MAP.get(self._sensor, self._sensor) self._name = f"{name} {self._mapped_name}" self._state = None self._unit = None @property def name(self): """Return the name of the sensor, if any.""" return self._name @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit @property def state(self): """Return the state of the sensor.""" return self._state async def async_update(self): """Retrieve latest state.""" if self._sensor in ("audio_connections", "video_connections"): if not self._ipcam.status_data: return self._state = self._ipcam.status_data.get(self._sensor) self._unit = "Connections" else: self._state, self._unit = self._ipcam.export_sensor(self._sensor) @property def icon(self): """Return the icon for the sensor.""" if self._sensor == "battery_level" and self._state is not None: return icon_for_battery_level(int(self._state)) return ICON_MAP.get(self._sensor, "mdi:eye")
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/android_ip_webcam/sensor.py
"""Support for particulate matter sensors connected to a serial port.""" import logging from pmsensor import serial_pm as pm import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) CONF_BRAND = "brand" CONF_SERIAL_DEVICE = "serial_device" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_BRAND): cv.string, vol.Required(CONF_SERIAL_DEVICE): cv.string, vol.Optional(CONF_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the available PM sensors.""" try: coll = pm.PMDataCollector( config.get(CONF_SERIAL_DEVICE), pm.SUPPORTED_SENSORS[config.get(CONF_BRAND)] ) except KeyError: _LOGGER.error( "Brand %s not supported\n supported brands: %s", config.get(CONF_BRAND), pm.SUPPORTED_SENSORS.keys(), ) return except OSError as err: _LOGGER.error( "Could not open serial connection to %s (%s)", config.get(CONF_SERIAL_DEVICE), err, ) return dev = [] for pmname in coll.supported_values(): if config.get(CONF_NAME) is not None: name = "{} PM{}".format(config.get(CONF_NAME), pmname) else: name = f"PM{pmname}" dev.append(ParticulateMatterSensor(coll, name, pmname)) add_entities(dev) class ParticulateMatterSensor(Entity): """Representation of an Particulate matter sensor.""" def __init__(self, pmDataCollector, name, pmname): """Initialize a new PM sensor.""" self._name = name self._pmname = pmname self._state = None self._collector = pmDataCollector @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return CONCENTRATION_MICROGRAMS_PER_CUBIC_METER def update(self): """Read from sensor and update the state.""" _LOGGER.debug("Reading data from PM sensor") try: self._state = self._collector.read_data()[self._pmname] except KeyError: _LOGGER.error("Could not read PM%s value", self._pmname)
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/serial_pm/sensor.py
"""Support for Vera devices.""" import asyncio from collections import defaultdict import logging from typing import Any, Dict, Generic, List, Optional, Type, TypeVar import pyvera as veraApi from requests.exceptions import RequestException import voluptuous as vol from homeassistant import config_entries from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_ARMED, ATTR_BATTERY_LEVEL, ATTR_LAST_TRIP_TIME, ATTR_TRIPPED, CONF_EXCLUDE, CONF_LIGHTS, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import convert, slugify from homeassistant.util.dt import utc_from_timestamp from .common import ( ControllerData, SubscriptionRegistry, get_configured_platforms, get_controller_data, set_controller_data, ) from .config_flow import fix_device_id_list, new_options from .const import ( ATTR_CURRENT_ENERGY_KWH, ATTR_CURRENT_POWER_W, CONF_CONTROLLER, CONF_LEGACY_UNIQUE_ID, DOMAIN, VERA_ID_FORMAT, ) _LOGGER = logging.getLogger(__name__) VERA_ID_LIST_SCHEMA = vol.Schema([int]) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_CONTROLLER): cv.url, vol.Optional(CONF_EXCLUDE, default=[]): VERA_ID_LIST_SCHEMA, vol.Optional(CONF_LIGHTS, default=[]): VERA_ID_LIST_SCHEMA, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistant, base_config: dict) -> bool: """Set up for Vera controllers.""" hass.data[DOMAIN] = {} config = base_config.get(DOMAIN) if not config: return True hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config, ) ) return True async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Do setup of vera.""" # Use options entered during initial config flow or provided from configuration.yml if config_entry.data.get(CONF_LIGHTS) or config_entry.data.get(CONF_EXCLUDE): hass.config_entries.async_update_entry( entry=config_entry, data=config_entry.data, options=new_options( config_entry.data.get(CONF_LIGHTS, []), config_entry.data.get(CONF_EXCLUDE, []), ), ) saved_light_ids = config_entry.options.get(CONF_LIGHTS, []) saved_exclude_ids = config_entry.options.get(CONF_EXCLUDE, []) base_url = config_entry.data[CONF_CONTROLLER] light_ids = fix_device_id_list(saved_light_ids) exclude_ids = fix_device_id_list(saved_exclude_ids) # If the ids were corrected. Update the config entry. if light_ids != saved_light_ids or exclude_ids != saved_exclude_ids: hass.config_entries.async_update_entry( entry=config_entry, options=new_options(light_ids, exclude_ids) ) # Initialize the Vera controller. subscription_registry = SubscriptionRegistry(hass) controller = veraApi.VeraController(base_url, subscription_registry) await hass.async_add_executor_job(controller.start) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, controller.stop) try: all_devices = await hass.async_add_executor_job(controller.get_devices) all_scenes = await hass.async_add_executor_job(controller.get_scenes) except RequestException as exception: # There was a network related error connecting to the Vera controller. _LOGGER.exception("Error communicating with Vera API") raise ConfigEntryNotReady from exception # Exclude devices unwanted by user. devices = [device for device in all_devices if device.device_id not in exclude_ids] vera_devices = defaultdict(list) for device in devices: device_type = map_vera_device(device, light_ids) if device_type is not None: vera_devices[device_type].append(device) vera_scenes = [] for scene in all_scenes: vera_scenes.append(scene) controller_data = ControllerData( controller=controller, devices=vera_devices, scenes=vera_scenes, config_entry=config_entry, ) set_controller_data(hass, config_entry, controller_data) # Forward the config data to the necessary platforms. for platform in get_configured_platforms(controller_data): hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, platform) ) return True async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Unload Withings config entry.""" controller_data: ControllerData = get_controller_data(hass, config_entry) tasks = [ hass.config_entries.async_forward_entry_unload(config_entry, platform) for platform in get_configured_platforms(controller_data) ] tasks.append(hass.async_add_executor_job(controller_data.controller.stop)) await asyncio.gather(*tasks) return True def map_vera_device(vera_device: veraApi.VeraDevice, remap: List[int]) -> str: """Map vera classes to Home Assistant types.""" type_map = { veraApi.VeraDimmer: "light", veraApi.VeraBinarySensor: "binary_sensor", veraApi.VeraSensor: "sensor", veraApi.VeraArmableDevice: "switch", veraApi.VeraLock: "lock", veraApi.VeraThermostat: "climate", veraApi.VeraCurtain: "cover", veraApi.VeraSceneController: "sensor", veraApi.VeraSwitch: "switch", } def map_special_case(instance_class: Type, entity_type: str) -> str: if instance_class is veraApi.VeraSwitch and vera_device.device_id in remap: return "light" return entity_type return next( iter( map_special_case(instance_class, entity_type) for instance_class, entity_type in type_map.items() if isinstance(vera_device, instance_class) ), None, ) DeviceType = TypeVar("DeviceType", bound=veraApi.VeraDevice) class VeraDevice(Generic[DeviceType], Entity): """Representation of a Vera device entity.""" def __init__(self, vera_device: DeviceType, controller_data: ControllerData): """Initialize the device.""" self.vera_device = vera_device self.controller = controller_data.controller self._name = self.vera_device.name # Append device id to prevent name clashes in HA. self.vera_id = VERA_ID_FORMAT.format( slugify(vera_device.name), vera_device.vera_device_id ) if controller_data.config_entry.data.get(CONF_LEGACY_UNIQUE_ID): self._unique_id = str(self.vera_device.vera_device_id) else: self._unique_id = f"vera_{controller_data.config_entry.unique_id}_{self.vera_device.vera_device_id}" async def async_added_to_hass(self) -> None: """Subscribe to updates.""" self.controller.register(self.vera_device, self._update_callback) def _update_callback(self, _device: DeviceType) -> None: """Update the state.""" self.schedule_update_ha_state(True) @property def name(self) -> str: """Return the name of the device.""" return self._name @property def should_poll(self) -> bool: """Get polling requirement from vera device.""" return self.vera_device.should_poll @property def device_state_attributes(self) -> Optional[Dict[str, Any]]: """Return the state attributes of the device.""" attr = {} if self.vera_device.has_battery: attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level if self.vera_device.is_armable: armed = self.vera_device.is_armed attr[ATTR_ARMED] = "True" if armed else "False" if self.vera_device.is_trippable: last_tripped = self.vera_device.last_trip if last_tripped is not None: utc_time = utc_from_timestamp(int(last_tripped)) attr[ATTR_LAST_TRIP_TIME] = utc_time.isoformat() else: attr[ATTR_LAST_TRIP_TIME] = None tripped = self.vera_device.is_tripped attr[ATTR_TRIPPED] = "True" if tripped else "False" power = self.vera_device.power if power: attr[ATTR_CURRENT_POWER_W] = convert(power, float, 0.0) energy = self.vera_device.energy if energy: attr[ATTR_CURRENT_ENERGY_KWH] = convert(energy, float, 0.0) attr["Vera Device Id"] = self.vera_device.vera_device_id return attr @property def unique_id(self) -> str: """Return a unique ID. The Vera assigns a unique and immutable ID number to each device. """ return self._unique_id
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/vera/__init__.py
"""Config flow to configure the OVO Energy integration.""" import aiohttp from ovoenergy.ovoenergy import OVOEnergy import voluptuous as vol from homeassistant import config_entries from homeassistant.config_entries import ConfigFlow from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from .const import CONF_ACCOUNT_ID, DOMAIN # pylint: disable=unused-import USER_SCHEMA = vol.Schema( {vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str} ) class OVOEnergyFlowHandler(ConfigFlow, domain=DOMAIN): """Handle a OVO Energy config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL async def async_step_user(self, user_input=None): """Handle a flow initiated by the user.""" errors = {} if user_input is not None: client = OVOEnergy() try: authenticated = await client.authenticate( user_input[CONF_USERNAME], user_input[CONF_PASSWORD] ) except aiohttp.ClientError: errors["base"] = "cannot_connect" else: if authenticated: await self.async_set_unique_id(user_input[CONF_USERNAME]) self._abort_if_unique_id_configured() return self.async_create_entry( title=client.account_id, data={ CONF_USERNAME: user_input[CONF_USERNAME], CONF_PASSWORD: user_input[CONF_PASSWORD], CONF_ACCOUNT_ID: client.account_id, }, ) errors["base"] = "invalid_auth" return self.async_show_form( step_id="user", data_schema=USER_SCHEMA, errors=errors )
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/ovo_energy/config_flow.py
"""UpCloud constants.""" from datetime import timedelta DOMAIN = "upcloud" DEFAULT_SCAN_INTERVAL = timedelta(seconds=60) CONFIG_ENTRY_UPDATE_SIGNAL_TEMPLATE = f"{DOMAIN}_config_entry_update:" "{}"
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/upcloud/const.py
"""Support for OpenTherm Gateway sensors.""" import logging from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.const import CONF_ID from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity import Entity, async_generate_entity_id from . import DOMAIN from .const import DATA_GATEWAYS, DATA_OPENTHERM_GW, SENSOR_INFO _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the OpenTherm Gateway sensors.""" sensors = [] for var, info in SENSOR_INFO.items(): device_class = info[0] unit = info[1] friendly_name_format = info[2] sensors.append( OpenThermSensor( hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]], var, device_class, unit, friendly_name_format, ) ) async_add_entities(sensors) class OpenThermSensor(Entity): """Representation of an OpenTherm Gateway sensor.""" def __init__(self, gw_dev, var, device_class, unit, friendly_name_format): """Initialize the OpenTherm Gateway sensor.""" self.entity_id = async_generate_entity_id( ENTITY_ID_FORMAT, f"{var}_{gw_dev.gw_id}", hass=gw_dev.hass ) self._gateway = gw_dev self._var = var self._value = None self._device_class = device_class self._unit = unit self._friendly_name = friendly_name_format.format(gw_dev.name) self._unsub_updates = None async def async_added_to_hass(self): """Subscribe to updates from the component.""" _LOGGER.debug("Added OpenTherm Gateway sensor %s", self._friendly_name) self._unsub_updates = async_dispatcher_connect( self.hass, self._gateway.update_signal, self.receive_report ) async def async_will_remove_from_hass(self): """Unsubscribe from updates from the component.""" _LOGGER.debug("Removing OpenTherm Gateway sensor %s", self._friendly_name) self._unsub_updates() @property def available(self): """Return availability of the sensor.""" return self._value is not None @property def entity_registry_enabled_default(self): """Disable sensors by default.""" return False @callback def receive_report(self, status): """Handle status updates from the component.""" value = status.get(self._var) if isinstance(value, float): value = f"{value:2.1f}" self._value = value self.async_write_ha_state() @property def name(self): """Return the friendly name of the sensor.""" return self._friendly_name @property def device_info(self): """Return device info.""" return { "identifiers": {(DOMAIN, self._gateway.gw_id)}, "name": self._gateway.name, "manufacturer": "Schelte Bron", "model": "OpenTherm Gateway", "sw_version": self._gateway.gw_version, } @property def unique_id(self): """Return a unique ID.""" return f"{self._gateway.gw_id}-{self._var}" @property def device_class(self): """Return the device class.""" return self._device_class @property def state(self): """Return the state of the device.""" return self._value @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit @property def should_poll(self): """Return False because entity pushes its state.""" return False
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
homeassistant/components/opentherm_gw/sensor.py
"""Test reproduce state for Switch.""" from homeassistant.core import State from tests.common import async_mock_service async def test_reproducing_states(hass, caplog): """Test reproducing Switch states.""" hass.states.async_set("switch.entity_off", "off", {}) hass.states.async_set("switch.entity_on", "on", {}) turn_on_calls = async_mock_service(hass, "switch", "turn_on") turn_off_calls = async_mock_service(hass, "switch", "turn_off") # These calls should do nothing as entities already in desired state await hass.helpers.state.async_reproduce_state( [State("switch.entity_off", "off"), State("switch.entity_on", "on", {})], ) assert len(turn_on_calls) == 0 assert len(turn_off_calls) == 0 # Test invalid state is handled await hass.helpers.state.async_reproduce_state( [State("switch.entity_off", "not_supported")] ) assert "not_supported" in caplog.text assert len(turn_on_calls) == 0 assert len(turn_off_calls) == 0 # Make sure correct services are called await hass.helpers.state.async_reproduce_state( [ State("switch.entity_on", "off"), State("switch.entity_off", "on", {}), # Should not raise State("switch.non_existing", "on"), ] ) assert len(turn_on_calls) == 1 assert turn_on_calls[0].domain == "switch" assert turn_on_calls[0].data == {"entity_id": "switch.entity_off"} assert len(turn_off_calls) == 1 assert turn_off_calls[0].domain == "switch" assert turn_off_calls[0].data == {"entity_id": "switch.entity_on"}
"""The tests for the Switch component.""" import pytest from homeassistant import core from homeassistant.components import switch from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.components.switch import common @pytest.fixture(autouse=True) def entities(hass): """Initialize the test switch.""" platform = getattr(hass.components, "test.switch") platform.init() yield platform.ENTITIES async def test_methods(hass, entities): """Test is_on, turn_on, turn_off methods.""" switch_1, switch_2, switch_3 = entities assert await async_setup_component( hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}} ) await hass.async_block_till_done() assert switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) await common.async_turn_off(hass, switch_1.entity_id) await common.async_turn_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) # Turn all off await common.async_turn_off(hass) assert not switch.is_on(hass, switch_1.entity_id) assert not switch.is_on(hass, switch_2.entity_id) assert not switch.is_on(hass, switch_3.entity_id) # Turn all on await common.async_turn_on(hass) assert switch.is_on(hass, switch_1.entity_id) assert switch.is_on(hass, switch_2.entity_id) assert switch.is_on(hass, switch_3.entity_id) async def test_switch_context(hass, entities, hass_admin_user): """Test that switch context works.""" assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}}) await hass.async_block_till_done() state = hass.states.get("switch.ac") assert state is not None await hass.services.async_call( "switch", "toggle", {"entity_id": state.entity_id}, True, core.Context(user_id=hass_admin_user.id), ) state2 = hass.states.get("switch.ac") assert state2 is not None assert state.state != state2.state assert state2.context.user_id == hass_admin_user.id def test_deprecated_base_class(caplog): """Test deprecated base class.""" class CustomSwitch(switch.SwitchDevice): pass CustomSwitch() assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
GenericStudent/home-assistant
tests/components/switch/test_init.py
tests/components/switch/test_reproduce_state.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import numpy as np from numpy.testing import assert_array_equal from asdf import yamlutil from astropy import modeling from .basic import TransformType __all__ = ['ShiftType', 'ScaleType', 'PolynomialType'] class ShiftType(TransformType): name = "transform/shift" types = ['astropy.modeling.models.Shift'] @classmethod def from_tree_transform(cls, node, ctx): offset = node['offset'] if not np.isscalar(offset): raise NotImplementedError( "Asdf currently only supports scalar inputs to Shift transform.") return modeling.models.Shift(offset) @classmethod def to_tree_transform(cls, model, ctx): return {'offset': model.offset.value} #return yamlutil.custom_tree_to_tagged_tree(node, ctx) @classmethod def assert_equal(cls, a, b): # TODO: If models become comparable themselves, remove this. TransformType.assert_equal(a, b) assert (isinstance(a, modeling.models.Shift) and isinstance(b, modeling.models.Shift)) assert_array_equal(a.offset.value, b.offset.value) class ScaleType(TransformType): name = "transform/scale" types = ['astropy.modeling.models.Scale'] @classmethod def from_tree_transform(cls, node, ctx): factor = node['factor'] if not np.isscalar(factor): raise NotImplementedError( "Asdf currently only supports scalar inputs to Scale transform.") return modeling.models.Scale(factor) @classmethod def to_tree_transform(cls, model, ctx): node = {'factor': model.factor.value} return yamlutil.custom_tree_to_tagged_tree(node, ctx) @classmethod def assert_equal(cls, a, b): # TODO: If models become comparable themselves, remove this. TransformType.assert_equal(a, b) assert (isinstance(a, modeling.models.Scale) and isinstance(b, modeling.models.Scale)) assert_array_equal(a.factor, b.factor) class PolynomialType(TransformType): name = "transform/polynomial" types = ['astropy.modeling.models.Polynomial1D', 'astropy.modeling.models.Polynomial2D'] @classmethod def from_tree_transform(cls, node, ctx): coefficients = np.asarray(node['coefficients']) n_dim = coefficients.ndim if n_dim == 1: model = modeling.models.Polynomial1D(coefficients.size - 1) model.parameters = coefficients elif n_dim == 2: shape = coefficients.shape degree = shape[0] - 1 if shape[0] != shape[1]: raise TypeError("Coefficients must be an (n+1, n+1) matrix") coeffs = {} for i in range(shape[0]): for j in range(shape[0]): if i + j < degree + 1: name = 'c' + str(i) + '_' +str(j) coeffs[name] = coefficients[i, j] model = modeling.models.Polynomial2D(degree, **coeffs) else: raise NotImplementedError( "Asdf currently only supports 1D or 2D polynomial transform.") return model @classmethod def to_tree_transform(cls, model, ctx): if isinstance(model, modeling.models.Polynomial1D): coefficients = np.array(model.parameters) elif isinstance(model, modeling.models.Polynomial2D): degree = model.degree coefficients = np.zeros((degree + 1, degree + 1)) for i in range(degree + 1): for j in range(degree + 1): if i + j < degree + 1: name = 'c' + str(i) + '_' +str(j) coefficients[i, j] = getattr(model, name).value node = {'coefficients': coefficients} return yamlutil.custom_tree_to_tagged_tree(node, ctx) @classmethod def assert_equal(cls, a, b): # TODO: If models become comparable themselves, remove this. TransformType.assert_equal(a, b) assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D))) assert_array_equal(a.parameters, b.parameters)
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDData class. import textwrap import numpy as np import pytest from ...io import fits from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException from ... import units as u from ... import log from ...wcs import WCS, FITSFixedWarning from ...tests.helper import catch_warnings from ...utils import NumpyRNGContext from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from ..ccddata import CCDData # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. DEFAULTS = { 'seed': 123, 'data_size': 100, 'data_scale': 1.0, 'data_mean': 0.0 } DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 DEFAULT_DATA_SCALE = 1.0 def value_from_markers(key, request): try: val = request.keywords[key].args[0] except KeyError: val = DEFAULTS[key] return val @pytest.fixture def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([100, 100])) def test_ccddata_unit_cannot_be_set_to_none(ccd_data): with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc) @pytest.mark.data_size(10) def test_ccddata_simple(ccd_data): assert ccd_data.shape == (10, 10) assert ccd_data.size == 100 assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros((10, 10)), unit="electron") assert ccd.unit is u.electron @pytest.mark.data_size(10) def test_initialize_from_FITS(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (10, 10) assert cd.size == 100 assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) with catch_warnings(FITSFixedWarning) as w: ccd = CCDData.read(filename, unit='adu') assert len(w) == 0 # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.random.random(size=(100, 100)) fake_img2 = np.random.random(size=(100, 100)) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(ccd_data, tmpdir): ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(ccd_data, tmpdir): filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(ccd_data): key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(ccd_data): ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(ccd_data): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(ccd_data): with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(ccd_data): ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(ccd_data): with pytest.raises(ValueError): ccd_data.uncertainty = np.random.random(size=(3, 4)) def test_to_hdu(ccd_data): ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(ccd_data): ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_mult_div_overload(ccd_data, operand, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(ccd_data): with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_diff_smaller_3(first, second): return abs(first - second) <= 3 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 def test_arithmetic_with_wcs_compare_fail(): def return_diff_smaller_1(first, second): return abs(first - second) <= 1 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs def test_arithmetic_overload_ccddata_operand(ccd_data): ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(ccd_data, tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') ccd = CCDData.read(data_file1, unit='count') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removall works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header """ from ..ccddata import _generate_wcs_and_update_header from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr: continue header_string = get_pkg_data_contents(hdr) wcs = WCS(header_string) header = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) # Make sure all of the WCS-related keywords have been removed. assert not (set(new_header) & set(new_wcs.to_header(relax=True)) - keepers) # Check that the new wcs is the same as the old. new_wcs_header = new_wcs.to_header(relax=True) for k, v in new_wcs_header.items(): if isinstance(v, str): assert header[k] == v else: np.testing.assert_almost_equal(header[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') with catch_warnings(FITSFixedWarning): hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(ccd_data): a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(ccd_data): ccd_data.wcs = 5 result = ccd_data.multiply(1.0) assert result.wcs == 5 @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.wcs = 5 method = ccd_data.__getattribute__(operation) result = method(ccd_data2) assert result.wcs == ccd_data.wcs assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = ccd_data.__getattribute__(operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(ccd_data, tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_wcs(ccd_data): ccd_data.wcs = 5 assert ccd_data.wcs == 5 def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir): # These are the extensions that are supposed to be supported. supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join("test.{}".format(ext)) ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None
funbaker/astropy
astropy/nddata/tests/test_ccddata.py
astropy/io/misc/asdf/tags/transform/polynomial.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Here are all the test parameters and values for the each `~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a dictionary for 2D models. Explanation of keywords of the dictionaries: "parameters" : list or dict Model parameters, the model is tested with. Make sure you keep the right order. For polynomials you can also use a dict to specify the coefficients. See examples below. "x_values" : list x values where the model is evaluated. "y_values" : list Reference y values for the in x_values given positions. "z_values" : list Reference z values for the in x_values and y_values given positions. (2D model option) "x_lim" : list x test range for the model fitter. Depending on the model this can differ e.g. the PowerLaw model should be tested over a few magnitudes. "y_lim" : list y test range for the model fitter. Depending on the model this can differ e.g. the PowerLaw model should be tested over a few magnitudes. (2D model option) "log_fit" : bool PowerLaw models should be tested over a few magnitudes. So log_fit should be true. "requires_scipy" : bool If a model requires scipy (Bessel functions etc.) set this flag. "integral" : float Approximate value of the integral in the range x_lim (and y_lim). "deriv_parameters" : list If given the test of the derivative will use these parameters to create a model (optional) "deriv_initial" : list If given the test of the derivative will use these parameters as initial values for the fit (optional) """ from ..functional_models import ( Gaussian1D, Sine1D, Box1D, Linear1D, Lorentz1D, MexicanHat1D, Trapezoid1D, Const1D, Moffat1D, Gaussian2D, Const2D, Box2D, MexicanHat2D, TrapezoidDisk2D, AiryDisk2D, Moffat2D, Disk2D, Ring2D, Sersic1D, Sersic2D, Voigt1D, Planar2D) from ..polynomial import Polynomial1D, Polynomial2D from ..powerlaws import ( PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D) import numpy as np # 1D Models models_1D = { Gaussian1D: { 'parameters': [1, 0, 1], 'x_values': [0, np.sqrt(2), -np.sqrt(2)], 'y_values': [1.0, 0.367879, 0.367879], 'x_lim': [-10, 10], 'integral': np.sqrt(2 * np.pi) }, Sine1D: { 'parameters': [1, 0.1, 0], 'x_values': [0, 2.5], 'y_values': [0, 1], 'x_lim': [-10, 10], 'integral': 0 }, Box1D: { 'parameters': [1, 0, 10], 'x_values': [-5, 5, 0, -10, 10], 'y_values': [1, 1, 1, 0, 0], 'x_lim': [-10, 10], 'integral': 10 }, Linear1D: { 'parameters': [1, 0], 'x_values': [0, np.pi, 42, -1], 'y_values': [0, np.pi, 42, -1], 'x_lim': [-10, 10], 'integral': 0 }, Lorentz1D: { 'parameters': [1, 0, 1], 'x_values': [0, -1, 1, 0.5, -0.5], 'y_values': [1., 0.2, 0.2, 0.5, 0.5], 'x_lim': [-10, 10], 'integral': 1 }, MexicanHat1D: { 'parameters': [1, 0, 1], 'x_values': [0, 1, -1, 3, -3], 'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872], 'x_lim': [-20, 20], 'integral': 0 }, Trapezoid1D: { 'parameters': [1, 0, 2, 1], 'x_values': [0, 1, -1, 1.5, -1.5, 2, 2], 'y_values': [1, 1, 1, 0.5, 0.5, 0, 0], 'x_lim': [-10, 10], 'integral': 3 }, Const1D: { 'parameters': [1], 'x_values': [-1, 1, np.pi, -42., 0], 'y_values': [1, 1, 1, 1, 1], 'x_lim': [-10, 10], 'integral': 20 }, Moffat1D: { 'parameters': [1, 0, 1, 2], 'x_values': [0, 1, -1, 3, -3], 'y_values': [1.0, 0.25, 0.25, 0.01, 0.01], 'x_lim': [-10, 10], 'integral': 1, 'deriv_parameters': [23.4, 1.2, 2.1, 2.3], 'deriv_initial': [10, 1, 1, 1] }, PowerLaw1D: { 'parameters': [1, 1, 2], 'constraints': {'fixed': {'x_0': True}}, 'x_values': [1, 10, 100], 'y_values': [1.0, 0.01, 0.0001], 'x_lim': [1, 10], 'log_fit': True, 'integral': 0.99 }, BrokenPowerLaw1D: { 'parameters': [1, 1, 2, 3], 'constraints': {'fixed': {'x_break': True}}, 'x_values': [0.1, 1, 10, 100], 'y_values': [1e2, 1.0, 1e-3, 1e-6], 'x_lim': [0.1, 100], 'log_fit': True }, SmoothlyBrokenPowerLaw1D: { 'parameters': [1, 1, -2, 2, 0.5], 'constraints': {'fixed': {'x_break': True, 'delta': True}}, 'x_values': [0.01, 1, 100], 'y_values': [3.99920012e-04, 1.0, 3.99920012e-04], 'x_lim': [0.01, 100], 'log_fit': True }, ExponentialCutoffPowerLaw1D: { 'parameters': [1, 1, 2, 3], 'constraints': {'fixed': {'x_0': True}}, 'x_values': [0.1, 1, 10, 100], 'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04, 3.33823780e-19], 'x_lim': [0.01, 100], 'log_fit': True }, LogParabola1D: { 'parameters': [1, 2, 3, 0.1], 'constraints': {'fixed': {'x_0': True}}, 'x_values': [0.1, 1, 10, 100], 'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03, 1.73160572e-06], 'x_lim': [0.1, 100], 'log_fit': True }, Polynomial1D: { 'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.}, 'x_values': [1, 10, 100], 'y_values': [3, 111, 10101], 'x_lim': [-3, 3] }, Sersic1D: { 'parameters': [1, 20, 4], 'x_values': [0.1, 1, 10, 100], 'y_values': [2.78629391e+02, 5.69791430e+01, 3.38788244e+00, 2.23941982e-02], 'requires_scipy': True, 'x_lim': [0, 10], 'log_fit': True }, Voigt1D: { 'parameters': [0, 1, 0.5, 0.9], 'x_values': [0, 2, 4, 8, 10], 'y_values': [0.520935, 0.017205, 0.003998, 0.000983, 0.000628], 'x_lim': [-3, 3] } } # 2D Models models_2D = { Gaussian2D: { 'parameters': [1, 0, 0, 1, 1], 'constraints': {'fixed': {'theta': True}}, 'x_values': [0, np.sqrt(2), -np.sqrt(2)], 'y_values': [0, np.sqrt(2), -np.sqrt(2)], 'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 2 * np.pi, 'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4], 'deriv_initial': [10, 5, 5, 4, 4, .5] }, Const2D: { 'parameters': [1], 'x_values': [-1, 1, np.pi, -42., 0], 'y_values': [0, 1, 42, np.pi, -1], 'z_values': [1, 1, 1, 1, 1], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 400 }, Box2D: { 'parameters': [1, 0, 0, 10, 10], 'x_values': [-5, 5, -5, 5, 0, -10, 10], 'y_values': [-5, 5, 0, 0, 0, -10, 10], 'z_values': [1, 1, 1, 1, 1, 0, 0], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 100 }, MexicanHat2D: { 'parameters': [1, 0, 0, 1], 'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3], 'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0], 'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881, 0.303265, 0.303265, -0.038881, -0.038881], 'x_lim': [-10, 11], 'y_lim': [-10, 11], 'integral': 0 }, TrapezoidDisk2D: { 'parameters': [1, 0, 0, 1, 1], 'x_values': [0, 0.5, 0, 1.5], 'y_values': [0, 0.5, 1.5, 0], 'z_values': [1, 1, 0.5, 0.5], 'x_lim': [-3, 3], 'y_lim': [-3, 3] }, AiryDisk2D: { 'parameters': [7, 0, 0, 10], 'x_values': [0, 1, -1, -0.5, -0.5], 'y_values': [0, -1, 0.5, 0.5, -0.5], 'z_values': [7., 6.50158267, 6.68490643, 6.87251093, 6.87251093], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'requires_scipy': True }, Moffat2D: { 'parameters': [1, 0, 0, 1, 2], 'x_values': [0, 1, -1, 3, -3], 'y_values': [0, -1, 3, 1, -3], 'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277], 'x_lim': [-3, 3], 'y_lim': [-3, 3] }, Polynomial2D: { 'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.}, 'x_values': [1, 2, 3], 'y_values': [1, 3, 2], 'z_values': [3, 6, 6], 'x_lim': [1, 100], 'y_lim': [1, 100] }, Disk2D: { 'parameters': [1, 0, 0, 5], 'x_values': [-5, 5, -5, 5, 0, -10, 10], 'y_values': [-5, 5, 0, 0, 0, -10, 10], 'z_values': [0, 0, 1, 1, 1, 0, 0], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': np.pi * 5 ** 2 }, Ring2D: { 'parameters': [1, 0, 0, 5, 5], 'x_values': [-5, 5, -5, 5, 0, -10, 10], 'y_values': [-5, 5, 0, 0, 0, -10, 10], 'z_values': [1, 1, 1, 1, 0, 0, 0], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': np.pi * (10 ** 2 - 5 ** 2) }, Sersic2D: { 'parameters': [1, 25, 4, 50, 50, 0.5, -1], 'x_values': [0.0, 1, 10, 100], 'y_values': [1, 100, 0.0, 10], 'z_values': [1.686398e-02, 9.095221e-02, 2.341879e-02, 9.419231e-02], 'requires_scipy': True, 'x_lim': [1, 1e10], 'y_lim': [1, 1e10] }, Planar2D: { 'parameters': [1, 1, 0], 'x_values': [0, np.pi, 42, -1], 'y_values': [np.pi, 0, -1, 42], 'z_values': [np.pi, np.pi, 41, 41], 'x_lim': [-10, 10], 'y_lim': [-10, 10], 'integral': 0 } }
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDData class. import textwrap import numpy as np import pytest from ...io import fits from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException from ... import units as u from ... import log from ...wcs import WCS, FITSFixedWarning from ...tests.helper import catch_warnings from ...utils import NumpyRNGContext from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from ..ccddata import CCDData # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. DEFAULTS = { 'seed': 123, 'data_size': 100, 'data_scale': 1.0, 'data_mean': 0.0 } DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 DEFAULT_DATA_SCALE = 1.0 def value_from_markers(key, request): try: val = request.keywords[key].args[0] except KeyError: val = DEFAULTS[key] return val @pytest.fixture def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([100, 100])) def test_ccddata_unit_cannot_be_set_to_none(ccd_data): with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc) @pytest.mark.data_size(10) def test_ccddata_simple(ccd_data): assert ccd_data.shape == (10, 10) assert ccd_data.size == 100 assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros((10, 10)), unit="electron") assert ccd.unit is u.electron @pytest.mark.data_size(10) def test_initialize_from_FITS(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (10, 10) assert cd.size == 100 assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) with catch_warnings(FITSFixedWarning) as w: ccd = CCDData.read(filename, unit='adu') assert len(w) == 0 # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.random.random(size=(100, 100)) fake_img2 = np.random.random(size=(100, 100)) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(ccd_data, tmpdir): ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(ccd_data, tmpdir): filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(ccd_data): key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(ccd_data): ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(ccd_data): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(ccd_data): with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(ccd_data): ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(ccd_data): with pytest.raises(ValueError): ccd_data.uncertainty = np.random.random(size=(3, 4)) def test_to_hdu(ccd_data): ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(ccd_data): ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_mult_div_overload(ccd_data, operand, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(ccd_data): with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_diff_smaller_3(first, second): return abs(first - second) <= 3 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 def test_arithmetic_with_wcs_compare_fail(): def return_diff_smaller_1(first, second): return abs(first - second) <= 1 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs def test_arithmetic_overload_ccddata_operand(ccd_data): ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(ccd_data, tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') ccd = CCDData.read(data_file1, unit='count') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removall works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header """ from ..ccddata import _generate_wcs_and_update_header from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr: continue header_string = get_pkg_data_contents(hdr) wcs = WCS(header_string) header = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) # Make sure all of the WCS-related keywords have been removed. assert not (set(new_header) & set(new_wcs.to_header(relax=True)) - keepers) # Check that the new wcs is the same as the old. new_wcs_header = new_wcs.to_header(relax=True) for k, v in new_wcs_header.items(): if isinstance(v, str): assert header[k] == v else: np.testing.assert_almost_equal(header[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') with catch_warnings(FITSFixedWarning): hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(ccd_data): a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(ccd_data): ccd_data.wcs = 5 result = ccd_data.multiply(1.0) assert result.wcs == 5 @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.wcs = 5 method = ccd_data.__getattribute__(operation) result = method(ccd_data2) assert result.wcs == ccd_data.wcs assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = ccd_data.__getattribute__(operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(ccd_data, tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_wcs(ccd_data): ccd_data.wcs = 5 assert ccd_data.wcs == 5 def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir): # These are the extensions that are supposed to be supported. supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join("test.{}".format(ext)) ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None
funbaker/astropy
astropy/nddata/tests/test_ccddata.py
astropy/modeling/tests/example_models.py
""" Utilities for computing periodogram statistics. This is an internal module; users should access this functionality via the ``false_alarm_probability`` and ``false_alarm_level`` methods of the ``astropy.stats.LombScargle`` API. """ from functools import wraps import numpy as np def _weighted_sum(val, dy): if dy is not None: return (val / dy ** 2).sum() else: return val.sum() def _weighted_mean(val, dy): if dy is None: return val.mean() else: return _weighted_sum(val, dy) / _weighted_sum(np.ones_like(val), dy) def _weighted_var(val, dy): return _weighted_mean(val ** 2, dy) - _weighted_mean(val, dy) ** 2 def _gamma(N): from scipy.special import gammaln # Note: this is closely approximated by (1 - 0.75 / N) for large N return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2)) def _log_gamma(N): from scipy.special import gammaln return 0.5 * np.log(2 / N) + gammaln(N / 2) - gammaln((N - 1) / 2) def vectorize_first_argument(func): @wraps(func) def new_func(x, *args, **kwargs): x = np.asarray(x) return np.array([func(xi, *args, **kwargs) for xi in x.flat]).reshape(x.shape) return new_func def pdf_single(z, N, normalization, dH=1, dK=3): """Probability density function for Lomb-Scargle periodogram Compute the expected probability density function of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- pdf : np.ndarray The expected probability density function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == 'psd': return np.exp(-z) elif normalization == 'standard': return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1) elif normalization == 'model': return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1) elif normalization == 'log': return 0.5 * Nk * np.exp(-0.5 * Nk * z) else: raise ValueError("normalization='{0}' is not recognized" "".format(normalization)) def fap_single(z, N, normalization, dH=1, dK=3): """Single-frequency false alarm probability for the Lomb-Scargle periodogram This is equal to 1 - cdf, where cdf is the cumulative distribution. The single-frequency false alarm probability should not be confused with the false alarm probability for the largest peak. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- false_alarm_probability : np.ndarray The single-frequency false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == 'psd': return np.exp(-z) elif normalization == 'standard': return (1 - z) ** (0.5 * Nk) elif normalization == 'model': return (1 + z) ** (-0.5 * Nk) elif normalization == 'log': return np.exp(-0.5 * Nk * z) else: raise ValueError("normalization='{0}' is not recognized" "".format(normalization)) def inv_fap_single(fap, N, normalization, dH=1, dK=3): """Single-frequency inverse false alarm probability This function computes the periodogram value associated with the specified single-frequency false alarm probability. This should not be confused with the false alarm level of the largest peak. Parameters ---------- fap : array-like The false alarm probability. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- z : np.ndarray The periodogram power corresponding to the single-peak false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ fap = np.asarray(fap) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == 'psd': return -np.log(fap) elif normalization == 'standard': return 1 - fap ** (2 / Nk) elif normalization == 'model': return -1 + fap ** (-2 / Nk) elif normalization == 'log': return -2 / Nk * np.log(fap) else: raise ValueError("normalization='{0}' is not recognized" "".format(normalization)) def cdf_single(z, N, normalization, dH=1, dK=3): """Cumulative distribution for the Lomb-Scargle periodogram Compute the expected cumulative distribution of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : integers, optional The number of parameters in the null hypothesis and the model. Returns ------- cdf : np.ndarray The expected cumulative distribution function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK) def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3): """tau factor for estimating Davies bound (Baluev 2008, Table 1)""" N = len(t) NH = N - dH # DOF for null hypothesis NK = N - dK # DOF for periodic hypothesis Dt = _weighted_var(t, dy) Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline W = fmax * Teff Z = np.asarray(Z) if normalization == 'psd': # 'psd' normalization is same as Baluev's z return W * np.exp(-Z) * np.sqrt(Z) elif normalization == 'standard': # 'standard' normalization is Z = 2/NH * z_1 return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1)) * np.sqrt(0.5 * NH * Z)) elif normalization == 'model': # 'model' normalization is Z = 2/NK * z_2 return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z)) elif normalization == 'log': # 'log' normalization is Z = 2/NK * z_3 return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5)) * np.sqrt(NK * np.sinh(0.5 * Z))) else: raise NotImplementedError("normalization={0}".format(normalization)) def fap_naive(Z, fmax, t, y, dy, normalization='standard'): """False Alarm Probability based on estimated number of indep frequencies""" N = len(t) T = max(t) - min(t) N_eff = fmax * T fap_s = fap_single(Z, N, normalization=normalization) # result is 1 - (1 - fap_s) ** N_eff # this is much more precise for small Z / large N return -np.expm1(N_eff * np.log1p(-fap_s)) def inv_fap_naive(fap, fmax, t, y, dy, normalization='standard'): """Inverse FAP based on estimated number of indep frequencies""" fap = np.asarray(fap) N = len(t) T = max(t) - min(t) N_eff = fmax * T #fap_s = 1 - (1 - fap) ** (1 / N_eff) fap_s = -np.expm1(np.log(1 - fap) / N_eff) return inv_fap_single(fap_s, N, normalization) def fap_davies(Z, fmax, t, y, dy, normalization='standard'): """Davies upper-bound to the false alarm probability (Eqn 5 of Baluev 2008) """ N = len(t) fap_s = fap_single(Z, N, normalization=normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) return fap_s + tau @vectorize_first_argument def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'): """Inverse of the davies upper-bound""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_davies(z, *args) - p res = optimize.root(func, z0, args=args, method='lm') if not res.success: raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p)) return res.x def fap_baluev(Z, fmax, t, y, dy, normalization='standard'): """Alias-free approximation to false alarm probability (Eqn 6 of Baluev 2008) """ fap_s = fap_single(Z, len(t), normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) # result is 1 - (1 - fap_s) * np.exp(-tau) # this is much more precise for small numbers return -np.expm1(-tau) + fap_s * np.exp(-tau) @vectorize_first_argument def inv_fap_baluev(p, fmax, t, y, dy, normalization='standard'): """Inverse of the Baluev alias-free approximation""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_baluev(z, *args) - p res = optimize.root(func, z0, args=args, method='lm') if not res.success: raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p)) return res.x def _bootstrap_max(t, y, dy, fmax, normalization, random_seed): """Generate a sequence of bootstrap estimates of the max""" from .core import LombScargle rng = np.random.RandomState(random_seed) while True: s = rng.randint(0, len(y), len(y)) # sample with replacement ls_boot = LombScargle(t, y[s], dy if dy is None else dy[s], normalization=normalization) freq, power = ls_boot.autopower(maximum_frequency=fmax) yield power.max() def fap_bootstrap(Z, fmax, t, y, dy, normalization='standard', n_bootstraps=1000, random_seed=None): """Bootstrap estimate of the false alarm probability""" pmax = np.fromiter(_bootstrap_max(t, y, dy, fmax, normalization, random_seed), float, n_bootstraps) pmax.sort() return 1 - np.searchsorted(pmax, Z) / len(pmax) def inv_fap_bootstrap(fap, fmax, t, y, dy, normalization='standard', n_bootstraps=1000, random_seed=None): """Bootstrap estimate of the inverse false alarm probability""" fap = np.asarray(fap) pmax = np.fromiter(_bootstrap_max(t, y, dy, fmax, normalization, random_seed), float, n_bootstraps) pmax.sort() return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int), 0, len(pmax) - 1)] METHODS = {'single': fap_single, 'naive': fap_naive, 'davies': fap_davies, 'baluev': fap_baluev, 'bootstrap': fap_bootstrap} def false_alarm_probability(Z, fmax, t, y, dy, normalization='standard', method='baluev', method_kwds=None): """Compute the approximate false alarm probability for periodogram peaks Z This gives an estimate of the false alarm probability for the largest value in a periodogram, based on the null hypothesis of non-varying data with Gaussian noise. The true probability cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- Z : array-like The periodogram value. fmax : float The maximum frequency of the periodogram. t, y, dy : array-like The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_level : compute the periodogram level for a particular fap References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == 'single': return fap_single(Z, len(t), normalization) elif method not in METHODS: raise ValueError("Unrecognized method: {0}".format(method)) method = METHODS[method] method_kwds = method_kwds or {} return method(Z, fmax, t, y, dy, normalization, **method_kwds) INV_METHODS = {'single': inv_fap_single, 'naive': inv_fap_naive, 'davies': inv_fap_davies, 'baluev': inv_fap_baluev, 'bootstrap': inv_fap_bootstrap} def false_alarm_level(p, fmax, t, y, dy, normalization, method='baluev', method_kwds=None): """Compute the approximate periodogram level given a false alarm probability This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. The true level cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- p : array-like The false alarm probability (0 < p < 1). fmax : float The maximum frequency of the periodogram. t, y, dy : arrays The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- z : np.ndarray The periodogram level. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_probability : compute the fap for a given periodogram level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == 'single': return inv_fap_single(p, len(t), normalization) elif method not in INV_METHODS: raise ValueError("Unrecognized method: {0}".format(method)) method = INV_METHODS[method] method_kwds = method_kwds or {} return method(p, fmax, t, y, dy, normalization, **method_kwds)
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDData class. import textwrap import numpy as np import pytest from ...io import fits from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException from ... import units as u from ... import log from ...wcs import WCS, FITSFixedWarning from ...tests.helper import catch_warnings from ...utils import NumpyRNGContext from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from ..ccddata import CCDData # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. DEFAULTS = { 'seed': 123, 'data_size': 100, 'data_scale': 1.0, 'data_mean': 0.0 } DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 DEFAULT_DATA_SCALE = 1.0 def value_from_markers(key, request): try: val = request.keywords[key].args[0] except KeyError: val = DEFAULTS[key] return val @pytest.fixture def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([100, 100])) def test_ccddata_unit_cannot_be_set_to_none(ccd_data): with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc) @pytest.mark.data_size(10) def test_ccddata_simple(ccd_data): assert ccd_data.shape == (10, 10) assert ccd_data.size == 100 assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros((10, 10)), unit="electron") assert ccd.unit is u.electron @pytest.mark.data_size(10) def test_initialize_from_FITS(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (10, 10) assert cd.size == 100 assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) with catch_warnings(FITSFixedWarning) as w: ccd = CCDData.read(filename, unit='adu') assert len(w) == 0 # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.random.random(size=(100, 100)) fake_img2 = np.random.random(size=(100, 100)) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(ccd_data, tmpdir): ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(ccd_data, tmpdir): filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(ccd_data): key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(ccd_data): ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(ccd_data): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(ccd_data): with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(ccd_data): ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(ccd_data): with pytest.raises(ValueError): ccd_data.uncertainty = np.random.random(size=(3, 4)) def test_to_hdu(ccd_data): ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(ccd_data): ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_mult_div_overload(ccd_data, operand, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(ccd_data): with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_diff_smaller_3(first, second): return abs(first - second) <= 3 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 def test_arithmetic_with_wcs_compare_fail(): def return_diff_smaller_1(first, second): return abs(first - second) <= 1 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs def test_arithmetic_overload_ccddata_operand(ccd_data): ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(ccd_data, tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') ccd = CCDData.read(data_file1, unit='count') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removall works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header """ from ..ccddata import _generate_wcs_and_update_header from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr: continue header_string = get_pkg_data_contents(hdr) wcs = WCS(header_string) header = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) # Make sure all of the WCS-related keywords have been removed. assert not (set(new_header) & set(new_wcs.to_header(relax=True)) - keepers) # Check that the new wcs is the same as the old. new_wcs_header = new_wcs.to_header(relax=True) for k, v in new_wcs_header.items(): if isinstance(v, str): assert header[k] == v else: np.testing.assert_almost_equal(header[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') with catch_warnings(FITSFixedWarning): hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(ccd_data): a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(ccd_data): ccd_data.wcs = 5 result = ccd_data.multiply(1.0) assert result.wcs == 5 @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.wcs = 5 method = ccd_data.__getattribute__(operation) result = method(ccd_data2) assert result.wcs == ccd_data.wcs assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = ccd_data.__getattribute__(operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(ccd_data, tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_wcs(ccd_data): ccd_data.wcs = 5 assert ccd_data.wcs == 5 def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir): # These are the extensions that are supposed to be supported. supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join("test.{}".format(ext)) ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None
funbaker/astropy
astropy/nddata/tests/test_ccddata.py
astropy/stats/lombscargle/_statistics.py
"""Implements the Astropy TestRunner which is a thin wrapper around py.test.""" import inspect import os import glob import copy import shlex import sys import tempfile import warnings import importlib from collections import OrderedDict from importlib.util import find_spec from ..config.paths import set_temp_config, set_temp_cache from ..utils import wraps, find_current_module from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning __all__ = ['TestRunner', 'TestRunnerBase', 'keyword'] def _has_test_dependencies(): # pragma: no cover # Using the test runner will not work without these dependencies, but # pytest-openfiles is optional, so it's not listed here. required = ['pytest', 'pytest_remotedata', 'pytest_doctestplus'] for module in required: spec = find_spec(module) # Checking loader accounts for packages that were uninstalled if spec is None or spec.loader is None: return False return True class keyword: """ A decorator to mark a method as keyword argument for the ``TestRunner``. Parameters ---------- default_value : `object` The default value for the keyword argument. (Default: `None`) priority : `int` keyword argument methods are executed in order of descending priority. """ def __init__(self, default_value=None, priority=0): self.default_value = default_value self.priority = priority def __call__(self, f): def keyword(*args, **kwargs): return f(*args, **kwargs) keyword._default_value = self.default_value keyword._priority = self.priority # Set __doc__ explicitly here rather than using wraps because we want # to keep the function name as keyword so we can inspect it later. keyword.__doc__ = f.__doc__ return keyword class TestRunnerBase: """ The base class for the TestRunner. A test runner can be constructed by creating a subclass of this class and defining 'keyword' methods. These are methods that have the `~astropy.tests.runner.keyword` decorator, these methods are used to construct allowed keyword arguments to the `~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow customization of individual keyword arguments (and associated logic) without having to re-implement the whole `~astropy.tests.runner.TestRunnerBase.run_tests` method. Examples -------- A simple keyword method:: class MyRunner(TestRunnerBase): @keyword('default_value'): def spam(self, spam, kwargs): \"\"\" spam : `str` The parameter description for the run_tests docstring. \"\"\" # Return value must be a list with a CLI parameter for pytest. return ['--spam={}'.format(spam)] """ def __init__(self, base_path): self.base_path = os.path.abspath(base_path) def __new__(cls, *args, **kwargs): # Before constructing the class parse all the methods that have been # decorated with ``keyword``. # The objective of this method is to construct a default set of keyword # arguments to the ``run_tests`` method. It does this by inspecting the # methods of the class for functions with the name ``keyword`` which is # the name of the decorator wrapping function. Once it has created this # dictionary, it also formats the docstring of ``run_tests`` to be # comprised of the docstrings for the ``keyword`` methods. # To add a keyword argument to the ``run_tests`` method, define a new # method decorated with ``@keyword`` and with the ``self, name, kwargs`` # signature. # Get all 'function' members as the wrapped methods are functions functions = inspect.getmembers(cls, predicate=inspect.isfunction) # Filter out anything that's not got the name 'keyword' keywords = filter(lambda func: func[1].__name__ == 'keyword', functions) # Sort all keywords based on the priority flag. sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True) cls.keywords = OrderedDict() doc_keywords = "" for name, func in sorted_keywords: # Here we test if the function has been overloaded to return # NotImplemented which is the way to disable arguments on # subclasses. If it has been disabled we need to remove it from the # default keywords dict. We do it in the try except block because # we do not have access to an instance of the class, so this is # going to error unless the method is just doing `return # NotImplemented`. try: # Second argument is False, as it is normally a bool. # The other two are placeholders for objects. if func(None, False, None) is NotImplemented: continue except Exception: pass # Construct the default kwargs dict and docstring cls.keywords[name] = func._default_value if func.__doc__: doc_keywords += ' '*8 doc_keywords += func.__doc__.strip() doc_keywords += '\n\n' cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) return super(TestRunnerBase, cls).__new__(cls) def _generate_args(self, **kwargs): # Update default values with passed kwargs # but don't modify the defaults keywords = copy.deepcopy(self.keywords) keywords.update(kwargs) # Iterate through the keywords (in order of priority) args = [] for keyword in keywords.keys(): func = getattr(self, keyword) result = func(keywords[keyword], keywords) # Allow disabling of options in a subclass if result is NotImplemented: raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword)) # keyword methods must return a list if not isinstance(result, list): raise TypeError("{} keyword method must return a list".format(keyword)) args += result return args RUN_TESTS_DOCSTRING = \ """ Run the tests for the package. Parameters ---------- {keywords} See Also -------- pytest.main : This method builds arguments for and then calls this function. """ def run_tests(self, **kwargs): # The following option will include eggs inside a .eggs folder in # sys.path when running the tests. This is possible so that when # runnning python setup.py test, test dependencies installed via e.g. # tests_requires are available here. This is not an advertised option # since it is only for internal use if kwargs.pop('add_local_eggs_to_path', False): # Add each egg to sys.path individually for egg in glob.glob(os.path.join('.eggs', '*.egg')): sys.path.insert(0, egg) # We now need to force reload pkg_resources in case any pytest # plugins were added above, so that their entry points are picked up import pkg_resources importlib.reload(pkg_resources) if not _has_test_dependencies(): # pragma: no cover msg = "Test dependencies are missing. You should install the 'pytest-astropy' package." raise RuntimeError(msg) # The docstring for this method is defined as a class variable. # This allows it to be built for each subclass in __new__. # Don't import pytest until it's actually needed to run the tests import pytest # Raise error for undefined kwargs allowed_kwargs = set(self.keywords.keys()) passed_kwargs = set(kwargs.keys()) if not passed_kwargs.issubset(allowed_kwargs): wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs)) raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0])) args = self._generate_args(**kwargs) if 'plugins' not in self.keywords or self.keywords['plugins'] is None: self.keywords['plugins'] = [] # Make plugins available to test runner without registering them self.keywords['plugins'].extend([ 'astropy.tests.plugins.display', 'astropy.tests.plugins.config' ]) # override the config locations to not make a new directory nor use # existing cache or config astropy_config = tempfile.mkdtemp('astropy_config') astropy_cache = tempfile.mkdtemp('astropy_cache') # Have to use nested with statements for cross-Python support # Note, using these context managers here is superfluous if the # config_dir or cache_dir options to py.test are in use, but it's # also harmless to nest the contexts with set_temp_config(astropy_config, delete=True): with set_temp_cache(astropy_cache, delete=True): return pytest.main(args=args, plugins=self.keywords['plugins']) @classmethod def make_test_runner_in(cls, path): """ Constructs a `TestRunner` to run in the given path, and returns a ``test()`` function which takes the same arguments as `TestRunner.run_tests`. The returned ``test()`` function will be defined in the module this was called from. This is used to implement the ``astropy.test()`` function (or the equivalent for affiliated packages). """ runner = cls(path) @wraps(runner.run_tests, ('__doc__',), exclude_args=('self',)) def test(**kwargs): return runner.run_tests(**kwargs) module = find_current_module(2) if module is not None: test.__module__ = module.__name__ # A somewhat unusual hack, but delete the attached __wrapped__ # attribute--although this is normally used to tell if the function # was wrapped with wraps, on some version of Python this is also # used to determine the signature to display in help() which is # not useful in this case. We don't really care in this case if the # function was wrapped either if hasattr(test, '__wrapped__'): del test.__wrapped__ return test class TestRunner(TestRunnerBase): """ A test runner for astropy tests """ # Increase priority so this warning is displayed first. @keyword(priority=1000) def coverage(self, coverage, kwargs): if coverage: warnings.warn( "The coverage option is ignored on run_tests, since it " "can not be made to work in that context. Use " "'python setup.py test --coverage' instead.", AstropyWarning) return [] # test_path depends on self.package_path so make sure this runs before # test_path. @keyword(priority=1) def package(self, package, kwargs): """ package : str, optional The name of a specific package to test, e.g. 'io.fits' or 'utils'. If nothing is specified all default Astropy tests are run. """ if package is None: self.package_path = self.base_path else: self.package_path = os.path.join(self.base_path, package.replace('.', os.path.sep)) if not os.path.isdir(self.package_path): raise ValueError('Package not found: {0}'.format(package)) if not kwargs['test_path']: return [self.package_path] return [] @keyword() def test_path(self, test_path, kwargs): """ test_path : str, optional Specify location to test by path. May be a single file or directory. Must be specified absolutely or relative to the calling directory. """ all_args = [] # Ensure that the package kwarg has been run. self.package(kwargs['package'], kwargs) if test_path: base, ext = os.path.splitext(test_path) if ext in ('.rst', ''): if kwargs['docs_path'] is None: # This shouldn't happen from "python setup.py test" raise ValueError( "Can not test .rst files without a docs_path " "specified.") abs_docs_path = os.path.abspath(kwargs['docs_path']) abs_test_path = os.path.abspath( os.path.join(abs_docs_path, os.pardir, test_path)) common = os.path.commonprefix((abs_docs_path, abs_test_path)) if os.path.exists(abs_test_path) and common == abs_docs_path: # Turn on the doctest_rst plugin all_args.append('--doctest-rst') test_path = abs_test_path if not (os.path.isdir(test_path) or ext in ('.py', '.rst')): raise ValueError("Test path must be a directory or a path to " "a .py or .rst file") return all_args + [test_path] return [] @keyword() def args(self, args, kwargs): """ args : str, optional Additional arguments to be passed to ``pytest.main`` in the ``args`` keyword argument. """ if args: return shlex.split(args, posix=not sys.platform.startswith('win')) return [] @keyword() def plugins(self, plugins, kwargs): """ plugins : list, optional Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword argument. """ return [] @keyword() def verbose(self, verbose, kwargs): """ verbose : bool, optional Convenience option to turn on verbose output from py.test. Passing True is the same as specifying ``-v`` in ``args``. """ if verbose: return ['-v'] return [] @keyword() def pastebin(self, pastebin, kwargs): """ pastebin : ('failed', 'all', None), optional Convenience option for turning on py.test pastebin output. Set to 'failed' to upload info for failed tests, or 'all' to upload info for all tests. """ if pastebin is not None: if pastebin in ['failed', 'all']: return ['--pastebin={0}'.format(pastebin)] else: raise ValueError("pastebin should be 'failed' or 'all'") return [] @keyword(default_value='none') def remote_data(self, remote_data, kwargs): """ remote_data : {'none', 'astropy', 'any'}, optional Controls whether to run tests marked with @pytest.mark.remote_data. This can be set to run no tests with remote data (``none``), only ones that use data from http://data.astropy.org (``astropy``), or all tests that use remote data (``any``). The default is ``none``. """ if remote_data is True: remote_data = 'any' elif remote_data is False: remote_data = 'none' elif remote_data not in ('none', 'astropy', 'any'): warnings.warn("The remote_data option should be one of " "none/astropy/any (found {0}). For backward-compatibility, " "assuming 'any', but you should change the option to be " "one of the supported ones to avoid issues in " "future.".format(remote_data), AstropyDeprecationWarning) remote_data = 'any' return ['--remote-data={0}'.format(remote_data)] @keyword() def pep8(self, pep8, kwargs): """ pep8 : bool, optional Turn on PEP8 checking via the pytest-pep8 plugin and disable normal tests. Same as specifying ``--pep8 -k pep8`` in ``args``. """ if pep8: try: import pytest_pep8 # pylint: disable=W0611 except ImportError: raise ImportError('PEP8 checking requires pytest-pep8 plugin: ' 'http://pypi.python.org/pypi/pytest-pep8') else: return ['--pep8', '-k', 'pep8'] return [] @keyword() def pdb(self, pdb, kwargs): """ pdb : bool, optional Turn on PDB post-mortem analysis for failing tests. Same as specifying ``--pdb`` in ``args``. """ if pdb: return ['--pdb'] return [] @keyword() def open_files(self, open_files, kwargs): """ open_files : bool, optional Fail when any tests leave files open. Off by default, because this adds extra run time to the test suite. Requires the ``psutil`` package. """ if open_files: if kwargs['parallel'] != 0: raise SystemError( "open file detection may not be used in conjunction with " "parallel testing.") try: import psutil # pylint: disable=W0611 except ImportError: raise SystemError( "open file detection requested, but psutil package " "is not installed.") return ['--open-files'] print("Checking for unclosed files") return [] @keyword(0) def parallel(self, parallel, kwargs): """ parallel : int, optional When provided, run the tests in parallel on the specified number of CPUs. If parallel is negative, it will use the all the cores on the machine. Requires the ``pytest-xdist`` plugin. """ if parallel != 0: try: from xdist import plugin # noqa except ImportError: raise SystemError( "running tests in parallel requires the pytest-xdist package") return ['-n', str(parallel)] return [] @keyword() def docs_path(self, docs_path, kwargs): """ docs_path : str, optional The path to the documentation .rst files. """ if docs_path is not None and not kwargs['skip_docs']: if kwargs['package'] is not None: docs_path = os.path.join( docs_path, kwargs['package'].replace('.', os.path.sep)) if not os.path.exists(docs_path): warnings.warn( "Can not test .rst docs, since docs path " "({0}) does not exist.".format(docs_path)) docs_path = None if docs_path and not kwargs['skip_docs'] and not kwargs['test_path']: return [docs_path, '--doctest-rst'] return [] @keyword() def skip_docs(self, skip_docs, kwargs): """ skip_docs : `bool`, optional When `True`, skips running the doctests in the .rst files. """ # Skip docs is a bool used by docs_path only. return [] @keyword() def repeat(self, repeat, kwargs): """ repeat : `int`, optional If set, specifies how many times each test should be run. This is useful for diagnosing sporadic failures. """ if repeat: return ['--repeat={0}'.format(repeat)] return [] # Override run_tests for astropy-specific fixes def run_tests(self, **kwargs): # This prevents cyclical import problems that make it # impossible to test packages that define Table types on their # own. from ..table import Table # pylint: disable=W0611 return super(TestRunner, self).run_tests(**kwargs)
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDData class. import textwrap import numpy as np import pytest from ...io import fits from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException from ... import units as u from ... import log from ...wcs import WCS, FITSFixedWarning from ...tests.helper import catch_warnings from ...utils import NumpyRNGContext from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from ..ccddata import CCDData # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. DEFAULTS = { 'seed': 123, 'data_size': 100, 'data_scale': 1.0, 'data_mean': 0.0 } DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 DEFAULT_DATA_SCALE = 1.0 def value_from_markers(key, request): try: val = request.keywords[key].args[0] except KeyError: val = DEFAULTS[key] return val @pytest.fixture def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([100, 100])) def test_ccddata_unit_cannot_be_set_to_none(ccd_data): with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc) @pytest.mark.data_size(10) def test_ccddata_simple(ccd_data): assert ccd_data.shape == (10, 10) assert ccd_data.size == 100 assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros((10, 10)), unit="electron") assert ccd.unit is u.electron @pytest.mark.data_size(10) def test_initialize_from_FITS(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (10, 10) assert cd.size == 100 assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) with catch_warnings(FITSFixedWarning) as w: ccd = CCDData.read(filename, unit='adu') assert len(w) == 0 # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.random.random(size=(100, 100)) fake_img2 = np.random.random(size=(100, 100)) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(ccd_data, tmpdir): ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(ccd_data, tmpdir): filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(ccd_data): key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(ccd_data): ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(ccd_data): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(ccd_data): with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(ccd_data): ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(ccd_data): with pytest.raises(ValueError): ccd_data.uncertainty = np.random.random(size=(3, 4)) def test_to_hdu(ccd_data): ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(ccd_data): ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_mult_div_overload(ccd_data, operand, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(ccd_data): with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_diff_smaller_3(first, second): return abs(first - second) <= 3 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 def test_arithmetic_with_wcs_compare_fail(): def return_diff_smaller_1(first, second): return abs(first - second) <= 1 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs def test_arithmetic_overload_ccddata_operand(ccd_data): ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(ccd_data, tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') ccd = CCDData.read(data_file1, unit='count') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removall works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header """ from ..ccddata import _generate_wcs_and_update_header from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr: continue header_string = get_pkg_data_contents(hdr) wcs = WCS(header_string) header = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) # Make sure all of the WCS-related keywords have been removed. assert not (set(new_header) & set(new_wcs.to_header(relax=True)) - keepers) # Check that the new wcs is the same as the old. new_wcs_header = new_wcs.to_header(relax=True) for k, v in new_wcs_header.items(): if isinstance(v, str): assert header[k] == v else: np.testing.assert_almost_equal(header[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') with catch_warnings(FITSFixedWarning): hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(ccd_data): a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(ccd_data): ccd_data.wcs = 5 result = ccd_data.multiply(1.0) assert result.wcs == 5 @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.wcs = 5 method = ccd_data.__getattribute__(operation) result = method(ccd_data2) assert result.wcs == ccd_data.wcs assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = ccd_data.__getattribute__(operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(ccd_data, tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_wcs(ccd_data): ccd_data.wcs = 5 assert ccd_data.wcs == 5 def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir): # These are the extensions that are supposed to be supported. supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join("test.{}".format(ext)) ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None
funbaker/astropy
astropy/nddata/tests/test_ccddata.py
astropy/tests/runner.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is a collection of monkey patches and workarounds for bugs in earlier versions of Numpy. """ from ...utils import minversion __all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_11_2', 'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14'] # TODO: It might also be nice to have aliases to these named for specific # features/bugs we're checking for (ex: # astropy.table.table._BROKEN_UNICODE_TABLE_SORT) NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4') NUMPY_LT_1_11 = not minversion('numpy', '1.11.0') NUMPY_LT_1_11_2 = not minversion('numpy', '1.11.2') NUMPY_LT_1_12 = not minversion('numpy', '1.12') NUMPY_LT_1_13 = not minversion('numpy', '1.13') NUMPY_LT_1_14 = not minversion('numpy', '1.14dev')
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDData class. import textwrap import numpy as np import pytest from ...io import fits from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException from ... import units as u from ... import log from ...wcs import WCS, FITSFixedWarning from ...tests.helper import catch_warnings from ...utils import NumpyRNGContext from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from ..ccddata import CCDData # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. DEFAULTS = { 'seed': 123, 'data_size': 100, 'data_scale': 1.0, 'data_mean': 0.0 } DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 DEFAULT_DATA_SCALE = 1.0 def value_from_markers(key, request): try: val = request.keywords[key].args[0] except KeyError: val = DEFAULTS[key] return val @pytest.fixture def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([100, 100])) def test_ccddata_unit_cannot_be_set_to_none(ccd_data): with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc) @pytest.mark.data_size(10) def test_ccddata_simple(ccd_data): assert ccd_data.shape == (10, 10) assert ccd_data.size == 100 assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros((10, 10)), unit="electron") assert ccd.unit is u.electron @pytest.mark.data_size(10) def test_initialize_from_FITS(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (10, 10) assert cd.size == 100 assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) with catch_warnings(FITSFixedWarning) as w: ccd = CCDData.read(filename, unit='adu') assert len(w) == 0 # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.random.random(size=(100, 100)) fake_img2 = np.random.random(size=(100, 100)) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(ccd_data, tmpdir): ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(ccd_data, tmpdir): filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(ccd_data): key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(ccd_data): ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(ccd_data): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(ccd_data): with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(ccd_data): ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(ccd_data): with pytest.raises(ValueError): ccd_data.uncertainty = np.random.random(size=(3, 4)) def test_to_hdu(ccd_data): ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(ccd_data): ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_mult_div_overload(ccd_data, operand, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(ccd_data): with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_diff_smaller_3(first, second): return abs(first - second) <= 3 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 def test_arithmetic_with_wcs_compare_fail(): def return_diff_smaller_1(first, second): return abs(first - second) <= 1 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs def test_arithmetic_overload_ccddata_operand(ccd_data): ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(ccd_data, tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') ccd = CCDData.read(data_file1, unit='count') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removall works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header """ from ..ccddata import _generate_wcs_and_update_header from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr: continue header_string = get_pkg_data_contents(hdr) wcs = WCS(header_string) header = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) # Make sure all of the WCS-related keywords have been removed. assert not (set(new_header) & set(new_wcs.to_header(relax=True)) - keepers) # Check that the new wcs is the same as the old. new_wcs_header = new_wcs.to_header(relax=True) for k, v in new_wcs_header.items(): if isinstance(v, str): assert header[k] == v else: np.testing.assert_almost_equal(header[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') with catch_warnings(FITSFixedWarning): hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(ccd_data): a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(ccd_data): ccd_data.wcs = 5 result = ccd_data.multiply(1.0) assert result.wcs == 5 @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.wcs = 5 method = ccd_data.__getattribute__(operation) result = method(ccd_data2) assert result.wcs == ccd_data.wcs assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = ccd_data.__getattribute__(operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(ccd_data, tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_wcs(ccd_data): ccd_data.wcs = 5 assert ccd_data.wcs == 5 def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir): # These are the extensions that are supposed to be supported. supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join("test.{}".format(ext)) ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None
funbaker/astropy
astropy/nddata/tests/test_ccddata.py
astropy/utils/compat/numpycompat.py
from typing import List, cast import numpy as np from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency import pandas as pd from pandas.io.excel._base import BaseExcelReader class ODFReader(BaseExcelReader): """ Read tables out of OpenDocument formatted files. Parameters ---------- filepath_or_buffer : string, path to be parsed or an open readable stream. storage_options : dict, optional passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``) """ def __init__( self, filepath_or_buffer: FilePathOrBuffer, storage_options: StorageOptions = None, ): import_optional_dependency("odf") super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): from odf.opendocument import OpenDocument return OpenDocument def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): from odf.opendocument import load return load(filepath_or_buffer) @property def empty_value(self) -> str: """Property for compat with other readers.""" return "" @property def sheet_names(self) -> List[str]: """Return a list of sheet names present in the document""" from odf.table import Table tables = self.book.getElementsByType(Table) return [t.getAttribute("name") for t in tables] def get_sheet_by_index(self, index: int): from odf.table import Table tables = self.book.getElementsByType(Table) return tables[index] def get_sheet_by_name(self, name: str): from odf.table import Table tables = self.book.getElementsByType(Table) for table in tables: if table.getAttribute("name") == name: return table self.close() raise ValueError(f"sheet {name} not found") def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: """ Parse an ODF Table into a list of lists """ from odf.table import CoveredTableCell, TableCell, TableRow covered_cell_name = CoveredTableCell().qname table_cell_name = TableCell().qname cell_names = {covered_cell_name, table_cell_name} sheet_rows = sheet.getElementsByType(TableRow) empty_rows = 0 max_row_len = 0 table: List[List[Scalar]] = [] for i, sheet_row in enumerate(sheet_rows): sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names] empty_cells = 0 table_row: List[Scalar] = [] for j, sheet_cell in enumerate(sheet_cells): if sheet_cell.qname == table_cell_name: value = self._get_cell_value(sheet_cell, convert_float) else: value = self.empty_value column_repeat = self._get_column_repeat(sheet_cell) # Queue up empty values, writing only if content succeeds them if value == self.empty_value: empty_cells += column_repeat else: table_row.extend([self.empty_value] * empty_cells) empty_cells = 0 table_row.extend([value] * column_repeat) if max_row_len < len(table_row): max_row_len = len(table_row) row_repeat = self._get_row_repeat(sheet_row) if self._is_empty_row(sheet_row): empty_rows += row_repeat else: # add blank rows to our table table.extend([[self.empty_value]] * empty_rows) empty_rows = 0 for _ in range(row_repeat): table.append(table_row) # Make our table square for row in table: if len(row) < max_row_len: row.extend([self.empty_value] * (max_row_len - len(row))) return table def _get_row_repeat(self, row) -> int: """ Return number of times this row was repeated Repeating an empty row appeared to be a common way of representing sparse rows in the table. """ from odf.namespaces import TABLENS return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1)) def _get_column_repeat(self, cell) -> int: from odf.namespaces import TABLENS return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1)) def _is_empty_row(self, row) -> bool: """ Helper function to find empty rows """ for column in row.childNodes: if len(column.childNodes) > 0: return False return True def _get_cell_value(self, cell, convert_float: bool) -> Scalar: from odf.namespaces import OFFICENS if str(cell) == "#N/A": return np.nan cell_type = cell.attributes.get((OFFICENS, "value-type")) if cell_type == "boolean": if str(cell) == "TRUE": return True return False if cell_type is None: return self.empty_value elif cell_type == "float": # GH5394 cell_value = float(cell.attributes.get((OFFICENS, "value"))) if convert_float: val = int(cell_value) if val == cell_value: return val return cell_value elif cell_type == "percentage": cell_value = cell.attributes.get((OFFICENS, "value")) return float(cell_value) elif cell_type == "string": return self._get_cell_string_value(cell) elif cell_type == "currency": cell_value = cell.attributes.get((OFFICENS, "value")) return float(cell_value) elif cell_type == "date": cell_value = cell.attributes.get((OFFICENS, "date-value")) return pd.to_datetime(cell_value) elif cell_type == "time": result = pd.to_datetime(str(cell)) result = cast(pd.Timestamp, result) return result.time() else: self.close() raise ValueError(f"Unrecognized type {cell_type}") def _get_cell_string_value(self, cell) -> str: """ Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters. """ from odf.element import Element from odf.namespaces import TEXTNS from odf.text import S text_s = S().qname value = [] for fragment in cell.childNodes: if isinstance(fragment, Element): if fragment.qname == text_s: spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) value.append(" " * spaces) else: # recursive impl needed in case of nested fragments # with multiple spaces # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704 value.append(self._get_cell_string_value(fragment)) else: value.append(str(fragment)) return "".join(value)
from datetime import date, datetime from hypothesis import given, strategies as st import numpy as np import pytest from pandas._libs.tslibs import ccalendar import pandas as pd @pytest.mark.parametrize( "date_tuple,expected", [ ((2001, 3, 1), 60), ((2004, 3, 1), 61), ((1907, 12, 31), 365), # End-of-year, non-leap year. ((2004, 12, 31), 366), # End-of-year, leap year. ], ) def test_get_day_of_year_numeric(date_tuple, expected): assert ccalendar.get_day_of_year(*date_tuple) == expected def test_get_day_of_year_dt(): dt = datetime.fromordinal(1 + np.random.randint(365 * 4000)) result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day) expected = (dt - dt.replace(month=1, day=1)).days + 1 assert result == expected @pytest.mark.parametrize( "input_date_tuple, expected_iso_tuple", [ [(2020, 1, 1), (2020, 1, 3)], [(2019, 12, 31), (2020, 1, 2)], [(2019, 12, 30), (2020, 1, 1)], [(2009, 12, 31), (2009, 53, 4)], [(2010, 1, 1), (2009, 53, 5)], [(2010, 1, 3), (2009, 53, 7)], [(2010, 1, 4), (2010, 1, 1)], [(2006, 1, 1), (2005, 52, 7)], [(2005, 12, 31), (2005, 52, 6)], [(2008, 12, 28), (2008, 52, 7)], [(2008, 12, 29), (2009, 1, 1)], ], ) def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple): result = ccalendar.get_iso_calendar(*input_date_tuple) expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() assert result == expected_from_date_isocalendar assert result == expected_iso_tuple @given( st.datetimes( min_value=pd.Timestamp.min.to_pydatetime(warn=False), max_value=pd.Timestamp.max.to_pydatetime(warn=False), ) ) def test_isocalendar(dt): expected = dt.isocalendar() result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) assert result == expected
jreback/pandas
pandas/tests/tslibs/test_ccalendar.py
pandas/io/excel/_odfreader.py
""" Helpers for configuring locale settings. Name `localization` is chosen to avoid overlap with builtin `locale` module. """ from contextlib import contextmanager import locale import re import subprocess from pandas._config.config import options @contextmanager def set_locale(new_locale, lc_var: int = locale.LC_ALL): """ Context manager for temporarily setting a locale. Parameters ---------- new_locale : str or tuple A string of the form <language_country>.<encoding>. For example to set the current locale to US English with a UTF8 encoding, you would pass "en_US.UTF-8". lc_var : int, default `locale.LC_ALL` The category of the locale being set. Notes ----- This is useful when you want to run a particular block of code under a particular locale, without globally setting the locale. This probably isn't thread-safe. """ current_locale = locale.getlocale() try: locale.setlocale(lc_var, new_locale) normalized_locale = locale.getlocale() if all(x is not None for x in normalized_locale): yield ".".join(normalized_locale) else: yield new_locale finally: locale.setlocale(lc_var, current_locale) def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool: """ Check to see if we can set a locale, and subsequently get the locale, without raising an Exception. Parameters ---------- lc : str The locale to attempt to set. lc_var : int, default `locale.LC_ALL` The category of the locale being set. Returns ------- bool Whether the passed locale can be set """ try: with set_locale(lc, lc_var=lc_var): pass except (ValueError, locale.Error): # horrible name for a Exception subclass return False else: return True def _valid_locales(locales, normalize): """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ return [ loc for loc in ( locale.normalize(loc.strip()) if normalize else loc.strip() for loc in locales ) if can_set_locale(loc) ] def _default_locale_getter(): return subprocess.check_output(["locale -a"], shell=True) def get_locales(prefix=None, normalize=True, locale_getter=_default_locale_getter): """ Get all the locales that are available on the system. Parameters ---------- prefix : str If not ``None`` then return only those locales with the prefix provided. For example to get all English language locales (those that start with ``"en"``), pass ``prefix="en"``. normalize : bool Call ``locale.normalize`` on the resulting list of available locales. If ``True``, only locales that can be set without throwing an ``Exception`` are returned. locale_getter : callable The function to use to retrieve the current locales. This should return a string with each locale separated by a newline character. Returns ------- locales : list of strings A list of locale strings that can be set with ``locale.setlocale()``. For example:: locale.setlocale(locale.LC_ALL, locale_string) On error will return None (no locale available, e.g. Windows) """ try: raw_locales = locale_getter() except subprocess.CalledProcessError: # Raised on (some? all?) Windows platforms because Note: "locale -a" # is not defined return None try: # raw_locales is "\n" separated list of locales # it may contain non-decodable parts, so split # extract what we can and then rejoin. raw_locales = raw_locales.split(b"\n") out_locales = [] for x in raw_locales: try: out_locales.append(str(x, encoding=options.display.encoding)) except UnicodeError: # 'locale -a' is used to populated 'raw_locales' and on # Redhat 7 Linux (and maybe others) prints locale names # using windows-1252 encoding. Bug only triggered by # a few special characters and when there is an # extensive list of installed locales. out_locales.append(str(x, encoding="windows-1252")) except TypeError: pass if prefix is None: return _valid_locales(out_locales, normalize) pattern = re.compile(f"{prefix}.*") found = pattern.findall("\n".join(out_locales)) return _valid_locales(found, normalize)
from datetime import date, datetime from hypothesis import given, strategies as st import numpy as np import pytest from pandas._libs.tslibs import ccalendar import pandas as pd @pytest.mark.parametrize( "date_tuple,expected", [ ((2001, 3, 1), 60), ((2004, 3, 1), 61), ((1907, 12, 31), 365), # End-of-year, non-leap year. ((2004, 12, 31), 366), # End-of-year, leap year. ], ) def test_get_day_of_year_numeric(date_tuple, expected): assert ccalendar.get_day_of_year(*date_tuple) == expected def test_get_day_of_year_dt(): dt = datetime.fromordinal(1 + np.random.randint(365 * 4000)) result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day) expected = (dt - dt.replace(month=1, day=1)).days + 1 assert result == expected @pytest.mark.parametrize( "input_date_tuple, expected_iso_tuple", [ [(2020, 1, 1), (2020, 1, 3)], [(2019, 12, 31), (2020, 1, 2)], [(2019, 12, 30), (2020, 1, 1)], [(2009, 12, 31), (2009, 53, 4)], [(2010, 1, 1), (2009, 53, 5)], [(2010, 1, 3), (2009, 53, 7)], [(2010, 1, 4), (2010, 1, 1)], [(2006, 1, 1), (2005, 52, 7)], [(2005, 12, 31), (2005, 52, 6)], [(2008, 12, 28), (2008, 52, 7)], [(2008, 12, 29), (2009, 1, 1)], ], ) def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple): result = ccalendar.get_iso_calendar(*input_date_tuple) expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() assert result == expected_from_date_isocalendar assert result == expected_iso_tuple @given( st.datetimes( min_value=pd.Timestamp.min.to_pydatetime(warn=False), max_value=pd.Timestamp.max.to_pydatetime(warn=False), ) ) def test_isocalendar(dt): expected = dt.isocalendar() result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) assert result == expected
jreback/pandas
pandas/tests/tslibs/test_ccalendar.py
pandas/_config/localization.py
from typing import Optional, Type import pytest import pandas as pd import pandas._testing as tm from pandas.core import ops from .base import BaseExtensionTests class BaseOpsUtil(BaseExtensionTests): def get_op_from_name(self, op_name): return tm.get_op_from_name(op_name) def check_opname(self, s, op_name, other, exc=Exception): op = self.get_op_from_name(op_name) self._check_op(s, op, other, op_name, exc) def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: result = op(s, other) if isinstance(s, pd.DataFrame): if len(s.columns) != 1: raise NotImplementedError expected = s.iloc[:, 0].combine(other, op).to_frame() self.assert_frame_equal(result, expected) else: expected = s.combine(other, op) self.assert_series_equal(result, expected) else: with pytest.raises(exc): op(s, other) def _check_divmod_op(self, s, op, other, exc=Exception): # divmod has multiple return values, so check separately if exc is None: result_div, result_mod = op(s, other) if op is divmod: expected_div, expected_mod = s // other, s % other else: expected_div, expected_mod = other // s, other % s self.assert_series_equal(result_div, expected_div) self.assert_series_equal(result_mod, expected_mod) else: with pytest.raises(exc): divmod(s, other) class BaseArithmeticOpsTests(BaseOpsUtil): """ Various Series and DataFrame arithmetic ops methods. Subclasses supporting various ops should set the class variables to indicate that they support ops of that kind * series_scalar_exc = TypeError * frame_scalar_exc = TypeError * series_array_exc = TypeError * divmod_exc = TypeError """ series_scalar_exc: Optional[Type[TypeError]] = TypeError frame_scalar_exc: Optional[Type[TypeError]] = TypeError series_array_exc: Optional[Type[TypeError]] = TypeError divmod_exc: Optional[Type[TypeError]] = TypeError def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # series & scalar op_name = all_arithmetic_operators s = pd.Series(data) self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc) @pytest.mark.xfail(run=False, reason="_reduce needs implementation") def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): # frame & scalar op_name = all_arithmetic_operators df = pd.DataFrame({"A": data}) self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc) def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators s = pd.Series(data) self.check_opname( s, op_name, pd.Series([s.iloc[0]] * len(s)), exc=self.series_array_exc ) def test_divmod(self, data): s = pd.Series(data) self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc) self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc) def test_divmod_series_array(self, data, data_for_twos): s = pd.Series(data) self._check_divmod_op(s, divmod, data) other = data_for_twos self._check_divmod_op(other, ops.rdivmod, s) other = pd.Series(other) self._check_divmod_op(other, ops.rdivmod, s) def test_add_series_with_extension_array(self, data): s = pd.Series(data) result = s + data expected = pd.Series(data + data) self.assert_series_equal(result, expected) def test_error(self, data, all_arithmetic_operators): # invalid ops op_name = all_arithmetic_operators with pytest.raises(AttributeError): getattr(data, op_name) @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) if box is pd.DataFrame: other = other.to_frame() if hasattr(data, "__add__"): result = data.__add__(other) assert result is NotImplemented else: raise pytest.skip(f"{type(data).__name__} does not implement add") class BaseComparisonOpsTests(BaseOpsUtil): """Various Series and DataFrame comparison ops methods.""" def _compare_other(self, s, data, op_name, other): op = self.get_op_from_name(op_name) if op_name == "__eq__": assert not op(s, other).all() elif op_name == "__ne__": assert op(s, other).all() else: # array assert getattr(data, op_name)(other) is NotImplemented # series s = pd.Series(data) with pytest.raises(TypeError): op(s, other) def test_compare_scalar(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) self._compare_other(s, data, op_name, 0) def test_compare_array(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) other = pd.Series([data[0]] * len(data)) self._compare_other(s, data, op_name, other) @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) if box is pd.DataFrame: other = other.to_frame() if hasattr(data, "__eq__"): result = data.__eq__(other) assert result is NotImplemented else: raise pytest.skip(f"{type(data).__name__} does not implement __eq__") if hasattr(data, "__ne__"): result = data.__ne__(other) assert result is NotImplemented else: raise pytest.skip(f"{type(data).__name__} does not implement __ne__") class BaseUnaryOpsTests(BaseOpsUtil): def test_invert(self, data): s = pd.Series(data, name="name") result = ~s expected = pd.Series(~data, name="name") self.assert_series_equal(result, expected)
from datetime import date, datetime from hypothesis import given, strategies as st import numpy as np import pytest from pandas._libs.tslibs import ccalendar import pandas as pd @pytest.mark.parametrize( "date_tuple,expected", [ ((2001, 3, 1), 60), ((2004, 3, 1), 61), ((1907, 12, 31), 365), # End-of-year, non-leap year. ((2004, 12, 31), 366), # End-of-year, leap year. ], ) def test_get_day_of_year_numeric(date_tuple, expected): assert ccalendar.get_day_of_year(*date_tuple) == expected def test_get_day_of_year_dt(): dt = datetime.fromordinal(1 + np.random.randint(365 * 4000)) result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day) expected = (dt - dt.replace(month=1, day=1)).days + 1 assert result == expected @pytest.mark.parametrize( "input_date_tuple, expected_iso_tuple", [ [(2020, 1, 1), (2020, 1, 3)], [(2019, 12, 31), (2020, 1, 2)], [(2019, 12, 30), (2020, 1, 1)], [(2009, 12, 31), (2009, 53, 4)], [(2010, 1, 1), (2009, 53, 5)], [(2010, 1, 3), (2009, 53, 7)], [(2010, 1, 4), (2010, 1, 1)], [(2006, 1, 1), (2005, 52, 7)], [(2005, 12, 31), (2005, 52, 6)], [(2008, 12, 28), (2008, 52, 7)], [(2008, 12, 29), (2009, 1, 1)], ], ) def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple): result = ccalendar.get_iso_calendar(*input_date_tuple) expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() assert result == expected_from_date_isocalendar assert result == expected_iso_tuple @given( st.datetimes( min_value=pd.Timestamp.min.to_pydatetime(warn=False), max_value=pd.Timestamp.max.to_pydatetime(warn=False), ) ) def test_isocalendar(dt): expected = dt.isocalendar() result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) assert result == expected
jreback/pandas
pandas/tests/tslibs/test_ccalendar.py
pandas/tests/extension/base/ops.py
import enum import warnings from collections import ChainMap from .misc import utils, abc from .. import consts from .accounts.records import Record from . import images, posts from pymongo import MongoClient from bson.objectid import ObjectId from jinja2 import Template from jinja2.filters import do_striptags as striptags from string import ascii_uppercase, ascii_lowercase import threading client = MongoClient() _db, _collection = consts.MONGO['notifications'] not_collection = client[_db][_collection] _types = [ 'mention', 'image_reply', 'post_reply', 'image_share', 'post_share', 'friend', 'follower' ] # Order is stable, this approach is safe NTYPES = enum.IntEnum('NOTIFICATION_TYPES', _types) _mtrans = str.maketrans({u: '_' + l for u, l in zip(ascii_uppercase, ascii_lowercase)}) def _lower_under(clsname): return clsname.translate(_mtrans).lstrip('_') class Notification(abc.Item): ''' This class is different from Post and Image in both purpose and implementation. ''' def __init__(self, notification=None): self._fields = {} self.params = {} if notification: notification = ObjectId(notification) data = not_collection.find_one({self.pk: notification}) or {} self._init_setfields(self, data) def _prepare(self): if self.owner and not isinstance(self.owner, Record): self._setfields(self, {'owner': Record(id=self.owner)}) if self._type_map: for field, init in self._type_map.items(): self.params[field] = init(getattr(self, field)) def get_html(self, tpl): if not isinstance(tpl, Template): raise TypeError('Expected {!r}, got {!r}'.format(Template, type(tpl))) macro = getattr(tpl.module, _lower_under(type(self).__name__)) params = {a: self.params[a] for a in macro.arguments if a in self._type_map} return macro(**params) def get_text(self, tpl=None): if not tpl: return '' return striptags(self.get_html(tpl)) @classmethod def new(cls, acct, **ka): if not isinstance(acct, Record): raise TypeError doc = {'owner': acct.id} if getattr(cls, '_ntype', None) is not None: doc['type'] = cls._ntype doc.update(ka) for field, value in ka.items(): if isinstance(value, abc.Pkeyed): doc[field] = value.id not_collection.insert_one(doc) return cls.fromdata(doc) @classmethod def delete_unread(cls, acct): if not isinstance(acct, Record): raise TypeError filt = {'owner': acct.id} if getattr(cls, '_ntype', None) is not None: filt['type'] = cls._ntype not_collection.delete_many(filt) @classmethod def delete(cls, acct, ids): if not isinstance(acct, Record): raise TypeError if not ids: raise ValueError('Nothing to delete') ins = cls.instances(ids) # Make a list excluding not `acct`'s images valid = [x for x in ins if x.owner == acct] filt = {'owner': acct.id, cls.pk: {'$in': [x.id for x in valid]}} if getattr(cls, '_ntype', None) is not None: filt['type'] = cls._ntype not_collection.delete_many(filt) def __repr__(self): return '' class MentionNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), 'post': posts.Post } _ntype = NTYPES.mention class ReplyPostNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), 'post': posts.Post } _ntype = NTYPES.post_reply class ReplyImageNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), 'image': images.Image } _ntype = NTYPES.image_reply class SharedPostNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), 'post': posts.Post } _ntype = NTYPES.post_share class SharedImageNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), 'image': images.Image } _ntype = NTYPES.image_share class FriendNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), } _ntype = NTYPES.friend class FollowerNotification(Notification): _type_map = { 'other': lambda x: Record(id=x), } _ntype = NTYPES.follower _ntypes_map = {x._ntype: x for x in Notification.__subclasses__()} def load(acct, ntype=None, only_unread=True): if not isinstance(acct, Record): raise TypeError if only_unread: filt= {'owner': acct.id} if ntype is not None: filt['type'] = ntype it = not_collection.find(filt) yield from (_ntypes_map[x['type']].fromdata(x) for x in it) else: # TODO return def emit_item(item, tpl, types=None): if not isinstance(item, (posts.Post, images.Image)): raise TypeError if types is None: types = iter(_ntypes_map.values()) for t in types: if not isinstance(item, posts.Post): continue # We don't have notifications about other types yet if item.content and t is MentionNotification: gen = (Record(name=x) for x in utils.mentions(item.content)) for acct in gen: t.new(acct, post=item, other=item.owner) elif item.base: _shared = {SharedImageNotification, SharedPostNotification} _replied = {ReplyImageNotification, ReplyPostNotification} if t not in _shared | _replied: continue base = list(posts.parents(item))[-2] if t is SharedPostNotification and isinstance(base, posts.Post): t.new(base.owner, post=base, other=item.owner) elif t is SharedImageNotification and isinstance(base, images.Image): t.new(base.owner, image=base, other=item.owner) elif t is ReplyPostNotification and isinstance(base, posts.Post): t.new(base.owner, post=base, other=item.owner) elif t is ReplyImageNotification and isinstance(base, images.Image): t.new(base.owner, image=base, other=item.owner) # # TODO: Notification params? # args = ({'action': 'notification', 'notification': str(obj), 'ids': ids},) # threading.Thread(target=wsinter.async_send, args=args, daemon=True).start() def emit_relations(): pass
import pytest from app.el import notifications as no from app.el.accounts.records import Record types = ('MentionNotification', 'ReplyPostNotification', 'ReplyImageNotification', 'SharedPostNotification', 'SharedImageNotification', 'FriendNotification', 'FollowerNotification') @pytest.mark.parametrize('nt', types, ids=types) def test_notifications(nt): this, other = Record.new(), Record.new() klass = getattr(no, nt) o = klass.new(this, other=other) assert o.good() assert o.id assert list(no.load(this)) no.Notification.delete(this, [o.id]) assert not list(no.load(this))
vaultah/L
app/tests/test_notifications.py
app/el/notifications.py
# flake8: noqa __docformat__ = "restructuredtext" # Let users know if they're missing any of our hard dependencies hard_dependencies = ("numpy", "pytz", "dateutil") missing_dependencies = [] for dependency in hard_dependencies: try: __import__(dependency) except ImportError as e: missing_dependencies.append(f"{dependency}: {e}") if missing_dependencies: raise ImportError( "Unable to import required dependencies:\n" + "\n".join(missing_dependencies) ) del hard_dependencies, dependency, missing_dependencies # numpy compat from pandas.compat import ( np_version_under1p18 as _np_version_under1p18, is_numpy_dev as _is_numpy_dev, ) try: from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib except ImportError as e: # pragma: no cover # hack but overkill to use re module = str(e).replace("cannot import name ", "") raise ImportError( f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --force' to build the C extensions first." ) from e from pandas._config import ( get_option, set_option, reset_option, describe_option, option_context, options, ) # let init-time option registration happen import pandas.core.config_init from pandas.core.api import ( # dtype Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype, Float32Dtype, Float64Dtype, CategoricalDtype, PeriodDtype, IntervalDtype, DatetimeTZDtype, StringDtype, BooleanDtype, # missing NA, isna, isnull, notna, notnull, # indexes Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, IndexSlice, # tseries NaT, Period, period_range, Timedelta, timedelta_range, Timestamp, date_range, bdate_range, Interval, interval_range, DateOffset, # conversion to_numeric, to_datetime, to_timedelta, # misc Flags, Grouper, factorize, unique, value_counts, NamedAgg, array, Categorical, set_eng_float_format, Series, DataFrame, ) from pandas.core.arrays.sparse import SparseDtype from pandas.tseries.api import infer_freq from pandas.tseries import offsets from pandas.core.computation.api import eval from pandas.core.reshape.api import ( concat, lreshape, melt, wide_to_long, merge, merge_asof, merge_ordered, crosstab, pivot, pivot_table, get_dummies, cut, qcut, ) import pandas.api from pandas.util._print_versions import show_versions from pandas.io.api import ( # excel ExcelFile, ExcelWriter, read_excel, # parsers read_csv, read_fwf, read_table, # pickle read_pickle, to_pickle, # pytables HDFStore, read_hdf, # sql read_sql, read_sql_query, read_sql_table, # misc read_clipboard, read_parquet, read_orc, read_feather, read_gbq, read_html, read_xml, read_json, read_stata, read_sas, read_spss, ) from pandas.io.json import _json_normalize as json_normalize from pandas.util._tester import test import pandas.testing import pandas.arrays # use the closest tagged version if possible from pandas._version import get_versions v = get_versions() __version__ = v.get("closest-tag", v["version"]) __git_version__ = v.get("full-revisionid") del get_versions, v # GH 27101 def __getattr__(name): import warnings if name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " "Import from datetime module instead.", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == "np": warnings.warn( "The pandas.np module is deprecated " "and will be removed from pandas in a future version. " "Import numpy directly instead", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {"SparseSeries", "SparseDataFrame"}: warnings.warn( f"The {name} class is removed from pandas. Accessing it from " "the top-level namespace will also be removed in the next version", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == "SparseArray": warnings.warn( "The pandas.SparseArray class is deprecated " "and will be removed from pandas in a future version. " "Use pandas.arrays.SparseArray instead.", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f"module 'pandas' has no attribute '{name}'") # module level doc-string __doc__ = """ pandas - a powerful data analysis and manipulation library for Python ===================================================================== **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. Main Features ------------- Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. - Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large data sets. - Intuitive merging and joining data sets. - Flexible reshaping and pivoting of data sets. - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, date shifting and lagging. """
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/__init__.py
import numpy as np import pandas as pd from pandas import ( Categorical, DataFrame, Index, Series, Timestamp, ) import pandas._testing as tm from pandas.core.arrays import IntervalArray class TestGetNumericData: def test_get_numeric_data_preserve_dtype(self): # get the numeric data obj = DataFrame({"A": [1, "2", 3.0]}) result = obj._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) tm.assert_frame_equal(result, expected) def test_get_numeric_data(self): datetime64name = np.dtype("M8[ns]").name objectname = np.dtype(np.object_).name df = DataFrame( {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")}, index=np.arange(10), ) result = df.dtypes expected = Series( [ np.dtype("float64"), np.dtype("int64"), np.dtype(objectname), np.dtype(datetime64name), ], index=["a", "b", "c", "f"], ) tm.assert_series_equal(result, expected) df = DataFrame( { "a": 1.0, "b": 2, "c": "foo", "d": np.array([1.0] * 10, dtype="float32"), "e": np.array([1] * 10, dtype="int32"), "f": np.array([1] * 10, dtype="int16"), "g": Timestamp("20010102"), }, index=np.arange(10), ) result = df._get_numeric_data() expected = df.loc[:, ["a", "b", "d", "e", "f"]] tm.assert_frame_equal(result, expected) only_obj = df.loc[:, ["c", "g"]] result = only_obj._get_numeric_data() expected = df.loc[:, []] tm.assert_frame_equal(result, expected) df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]}) result = df._get_numeric_data() expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]}) tm.assert_frame_equal(result, expected) df = result.copy() result = df._get_numeric_data() expected = df tm.assert_frame_equal(result, expected) def test_get_numeric_data_mixed_dtype(self): # numeric and object columns df = DataFrame( { "a": [1, 2, 3], "b": [True, False, True], "c": ["foo", "bar", "baz"], "d": [None, None, None], "e": [3.14, 0.577, 2.773], } ) result = df._get_numeric_data() tm.assert_index_equal(result.columns, Index(["a", "b", "e"])) def test_get_numeric_data_extension_dtype(self): # GH#22290 df = DataFrame( { "A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"), "B": Categorical(list("abcabc")), "C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"), "D": IntervalArray.from_breaks(range(7)), } ) result = df._get_numeric_data() expected = df.loc[:, ["A", "C"]] tm.assert_frame_equal(result, expected)
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/tests/frame/methods/test_get_numeric_data.py
import warnings import pytest import pandas as pd import pandas._testing as tm from pandas.tests.extension.base.base import BaseExtensionTests class BaseReduceTests(BaseExtensionTests): """ Reduction specific tests. Generally these only make sense for numeric/boolean operations. """ def check_reduce(self, s, op_name, skipna): result = getattr(s, op_name)(skipna=skipna) expected = getattr(s.astype("float64"), op_name)(skipna=skipna) tm.assert_almost_equal(result, expected) class BaseNoReduceTests(BaseReduceTests): """ we don't define any reductions """ @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) class BaseNumericReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) # min/max with empty produce numpy warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) self.check_reduce(s, op_name, skipna) class BaseBooleanReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) self.check_reduce(s, op_name, skipna)
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/tests/extension/base/reduce.py
from __future__ import annotations from contextlib import suppress from typing import ( TYPE_CHECKING, Any, Hashable, Sequence, ) import warnings import numpy as np from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import ( AbstractMethodError, InvalidIndexError, ) from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, isna, ) import pandas.core.common as com from pandas.core.construction import array as pd_array from pandas.core.indexers import ( check_array_indexer, is_empty_indexer, is_exact_shape_match, is_list_like_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) if TYPE_CHECKING: from pandas import ( DataFrame, Series, ) # "null slice" _NS = slice(None, None) # the public IndexSlicerMaker class _IndexSlice: """ Create an object to more easily perform multi-index slicing. See Also -------- MultiIndex.remove_unused_levels : New MultiIndex with no unused levels. Notes ----- See :ref:`Defined Levels <advanced.shown_levels>` for further info on slicing a MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) >>> columns = ['foo', 'bar'] >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), ... index=midx, columns=columns) Using the default slice command: >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 Using the IndexSlice class for a more intuitive command: >>> idx = pd.IndexSlice >>> dfmi.loc[idx[:, 'B0':'B1'], :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 """ def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingError(Exception): pass class IndexingMixin: """ Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series. """ @property def iloc(self) -> _iLocIndexer: """ Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean array. Allowed inputs are: - An integer, e.g. ``5``. - A list or array of integers, e.g. ``[4, 3, 0]``. - A slice object with ints, e.g. ``1:7``. - A boolean array. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See more at :ref:`Selection by Position <indexing.integer>`. See Also -------- DataFrame.iat : Fast integer location scalar accessor. DataFrame.loc : Purely label-location based indexer for selection by label. Series.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] >>> df = pd.DataFrame(mydict) >>> df a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing just the rows** With a scalar integer. >>> type(df.iloc[0]) <class 'pandas.core.series.Series'> >>> df.iloc[0] a 1 b 2 c 3 d 4 Name: 0, dtype: int64 With a list of integers. >>> df.iloc[[0]] a b c d 0 1 2 3 4 >>> type(df.iloc[[0]]) <class 'pandas.core.frame.DataFrame'> >>> df.iloc[[0, 1]] a b c d 0 1 2 3 4 1 100 200 300 400 With a `slice` object. >>> df.iloc[:3] a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 With a boolean mask the same length as the index. >>> df.iloc[[True, False, True]] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 With a callable, useful in method chains. The `x` passed to the ``lambda`` is the DataFrame being sliced. This selects the rows whose index label even. >>> df.iloc[lambda x: x.index % 2 == 0] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 **Indexing both axes** You can mix the indexer types for the index and columns. Use ``:`` to select the entire axis. With scalar integers. >>> df.iloc[0, 1] 2 With lists of integers. >>> df.iloc[[0, 2], [1, 3]] b d 0 2 4 2 2000 4000 With `slice` objects. >>> df.iloc[1:3, 0:3] a b c 1 100 200 300 2 1000 2000 3000 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 With a callable function that expects the Series or DataFrame. >>> df.iloc[:, lambda df: [0, 2]] a c 0 1 3 1 100 300 2 1000 3000 """ return _iLocIndexer("iloc", self) @property def loc(self) -> _LocIndexer: """ Access a group of rows and columns by label(s) or a boolean array. ``.loc[]`` is primarily label based, but may also be used with a boolean array. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index). - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'``. .. warning:: Note that contrary to usual python slices, **both** the start and the stop are included - A boolean array of the same length as the axis being sliced, e.g. ``[True, False, True]``. - An alignable boolean Series. The index of the key will be aligned before masking. - An alignable Index. The Index of the returned selection will be the input. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above) See more at :ref:`Selection by Label <indexing.label>`. Raises ------ KeyError If any items are not found. IndexingError If an indexed key is passed and its index is unalignable to the frame index. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.iloc : Access group of rows and columns by integer position(s). DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the Series/DataFrame. Series.loc : Access group of values using labels. Examples -------- **Getting values** >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 Single label. Note this returns the row as a Series. >>> df.loc['viper'] max_speed 4 shield 5 Name: viper, dtype: int64 List of labels. Note using ``[[]]`` returns a DataFrame. >>> df.loc[['viper', 'sidewinder']] max_speed shield viper 4 5 sidewinder 7 8 Single label for row and column >>> df.loc['cobra', 'shield'] 2 Slice with labels for row and single label for column. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc['cobra':'viper', 'max_speed'] cobra 1 viper 4 Name: max_speed, dtype: int64 Boolean list with the same length as the row axis >>> df.loc[[False, False, True]] max_speed shield sidewinder 7 8 Alignable boolean Series: >>> df.loc[pd.Series([False, True, False], ... index=['viper', 'sidewinder', 'cobra'])] max_speed shield sidewinder 7 8 Index (same behavior as ``df.reindex``) >>> df.loc[pd.Index(["cobra", "viper"], name="foo")] max_speed shield foo cobra 1 2 viper 4 5 Conditional that returns a boolean Series >>> df.loc[df['shield'] > 6] max_speed shield sidewinder 7 8 Conditional that returns a boolean Series with column labels specified >>> df.loc[df['shield'] > 6, ['max_speed']] max_speed sidewinder 7 Callable that returns a boolean Series >>> df.loc[lambda df: df['shield'] == 8] max_speed shield sidewinder 7 8 **Setting values** Set value for all items matching the list of labels >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50 >>> df max_speed shield cobra 1 2 viper 4 50 sidewinder 7 50 Set value for an entire row >>> df.loc['cobra'] = 10 >>> df max_speed shield cobra 10 10 viper 4 50 sidewinder 7 50 Set value for an entire column >>> df.loc[:, 'max_speed'] = 30 >>> df max_speed shield cobra 30 10 viper 30 50 sidewinder 30 50 Set value for rows matching callable condition >>> df.loc[df['shield'] > 35] = 0 >>> df max_speed shield cobra 30 10 viper 0 0 sidewinder 0 0 **Getting values on a DataFrame with an index that has integer labels** Another example using integers for the index >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=[7, 8, 9], columns=['max_speed', 'shield']) >>> df max_speed shield 7 1 2 8 4 5 9 7 8 Slice with integer labels for rows. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc[7:9] max_speed shield 7 1 2 8 4 5 9 7 8 **Getting values with a MultiIndex** A number of examples using a DataFrame with a MultiIndex >>> tuples = [ ... ('cobra', 'mark i'), ('cobra', 'mark ii'), ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'), ... ('viper', 'mark ii'), ('viper', 'mark iii') ... ] >>> index = pd.MultiIndex.from_tuples(tuples) >>> values = [[12, 2], [0, 4], [10, 20], ... [1, 4], [7, 1], [16, 36]] >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index) >>> df max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Single label. Note this returns a DataFrame with a single index. >>> df.loc['cobra'] max_speed shield mark i 12 2 mark ii 0 4 Single index tuple. Note this returns a Series. >>> df.loc[('cobra', 'mark ii')] max_speed 0 shield 4 Name: (cobra, mark ii), dtype: int64 Single label for row and column. Similar to passing in a tuple, this returns a Series. >>> df.loc['cobra', 'mark i'] max_speed 12 shield 2 Name: (cobra, mark i), dtype: int64 Single tuple. Note using ``[[]]`` returns a DataFrame. >>> df.loc[[('cobra', 'mark ii')]] max_speed shield cobra mark ii 0 4 Single tuple for the index with a single label for the column >>> df.loc[('cobra', 'mark i'), 'shield'] 2 Slice from index tuple to single label >>> df.loc[('cobra', 'mark i'):'viper'] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Slice from index tuple to index tuple >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 """ return _LocIndexer("loc", self) @property def at(self) -> _AtIndexer: """ Access a single value for a row/column label pair. Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ KeyError If 'label' does not exist in DataFrame. See Also -------- DataFrame.iat : Access a single value for a row/column pair by integer position. DataFrame.loc : Access a group of rows and columns by label(s). Series.at : Access a single value using a label. Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... index=[4, 5, 6], columns=['A', 'B', 'C']) >>> df A B C 4 0 2 3 5 0 4 1 6 10 20 30 Get value at specified row/column pair >>> df.at[4, 'B'] 2 Set value at specified row/column pair >>> df.at[4, 'B'] = 10 >>> df.at[4, 'B'] 10 Get value within a Series >>> df.loc[5].at['B'] 4 """ return _AtIndexer("at", self) @property def iat(self) -> _iAtIndexer: """ Access a single value for a row/column pair by integer position. Similar to ``iloc``, in that both provide integer-based lookups. Use ``iat`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ IndexError When integer position is out of bounds. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.loc : Access a group of rows and columns by label(s). DataFrame.iloc : Access a group of rows and columns by integer position(s). Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... columns=['A', 'B', 'C']) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] 1 Set value at specified row/column pair >>> df.iat[1, 2] = 10 >>> df.iat[1, 2] 10 Get value within a series >>> df.loc[0].iat[1] 2 """ return _iAtIndexer("iat", self) class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis = None def __call__(self, axis=None): # we need to return a copy of ourselves new_self = type(self)(self.name, self.obj) if axis is not None: axis = self.obj._get_axis_number(axis) new_self.axis = axis return new_self def _get_setitem_indexer(self, key): """ Convert a potentially-label-based key into a positional indexer. """ if self.name == "loc": self._ensure_listlike_indexer(key) if self.axis is not None: return self._convert_tuple(key, is_setter=True) ax = self.obj._get_axis(0) if isinstance(ax, MultiIndex) and self.name != "iloc": with suppress(TypeError, KeyError, InvalidIndexError): # TypeError e.g. passed a bool return ax.get_loc(key) if isinstance(key, tuple): with suppress(IndexingError): return self._convert_tuple(key, is_setter=True) if isinstance(key, range): return list(key) try: return self._convert_to_indexer(key, axis=0, is_setter=True) except TypeError as e: # invalid indexer type vs 'other' indexing errors if "cannot do" in str(e): raise elif "unhashable type" in str(e): raise raise IndexingError(key) from e def _ensure_listlike_indexer(self, key, axis=None, value=None): """ Ensure that a list-like of column labels are all present by adding them if they do not already exist. Parameters ---------- key : list-like of column labels Target labels. axis : key axis if known """ column_axis = 1 # column only exists in 2-dimensional DataFrame if self.ndim != 2: return if isinstance(key, tuple) and len(key) > 1: # key may be a tuple if we are .loc # if length of key is > 1 set key to column part key = key[column_axis] axis = column_axis if ( axis == column_axis and not isinstance(self.obj.columns, MultiIndex) and is_list_like_indexer(key) and not com.is_bool_indexer(key) and all(is_hashable(k) for k in key) ): # GH#38148 keys = self.obj.columns.union(key, sort=False) self.obj._mgr = self.obj._mgr.reindex_axis( keys, axis=0, consolidate=False, only_slice=True ) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._has_valid_setitem_indexer(key) iloc = self if self.name == "iloc" else self.obj.iloc iloc._setitem_with_indexer(indexer, value, self.name) def _validate_key(self, key, axis: int): """ Ensure that key is valid for current indexer. Parameters ---------- key : scalar, slice or list-like Key requested. axis : int Dimension on which the indexing is being made. Raises ------ TypeError If the key (or some element of it) has wrong type. IndexError If the key (or some element of it) is out of bounds. KeyError If the key was not found. """ raise AbstractMethodError(self) def _has_valid_tuple(self, key: tuple): """ Check the key for valid keys across my indexer. """ self._validate_key_length(key) for i, k in enumerate(key): try: self._validate_key(k, i) except ValueError as err: raise ValueError( "Location based indexing can only have " f"[{self._valid_types}] types" ) from err def _is_nested_tuple_indexer(self, tup: tuple) -> bool: """ Returns ------- bool """ if any(isinstance(ax, MultiIndex) for ax in self.obj.axes): return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) return False def _convert_tuple(self, key, is_setter: bool = False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) for i in range(self.ndim): if i == axis: keyidx.append( self._convert_to_indexer(key, axis=axis, is_setter=is_setter) ) else: keyidx.append(slice(None)) else: self._validate_key_length(key) for i, k in enumerate(key): idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter) keyidx.append(idx) return tuple(keyidx) def _validate_key_length(self, key: Sequence[Any]) -> None: if len(key) > self.ndim: raise IndexingError("Too many indexers") def _getitem_tuple_same_dim(self, tup: tuple): """ Index with indexers that should return an object of the same dimension as self.obj. This is only called after a failed call to _getitem_lowerdim. """ retval = self.obj for i, key in enumerate(tup): if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) # We should never have retval.ndim < self.ndim, as that should # be handled by the _getitem_lowerdim call above. assert retval.ndim == self.ndim return retval def _getitem_lowerdim(self, tup: tuple): # we can directly get the axis result since the axis is specified if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) # we may have a nested tuples indexer here if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) # we maybe be using a tuple to represent multiple dimensions here ax0 = self.obj._get_axis(0) # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) if isinstance(ax0, MultiIndex) and self.name != "iloc": with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) self._validate_key_length(tup) for i, key in enumerate(tup): if is_label_like(key): # We don't need to check for tuples here because those are # caught by the _is_nested_tuple_indexer check above. section = self._getitem_axis(key, axis=i) # We should never have a scalar section here, because # _getitem_lowerdim is only called after a check for # is_scalar_access, which that would be. if section.ndim == self.ndim: # we're in the middle of slicing through a MultiIndex # revise the key wrt to `section` by inserting an _NS new_key = tup[:i] + (_NS,) + tup[i + 1 :] else: # Note: the section.ndim == self.ndim check above # rules out having DataFrame here, so we dont need to worry # about transposing. new_key = tup[:i] + tup[i + 1 :] if len(new_key) == 1: new_key = new_key[0] # Slices should return views, but calling iloc/loc with a null # slice returns a new object. if com.is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc return getattr(section, self.name)[new_key] raise IndexingError("not applicable") def _getitem_nested_tuple(self, tup: tuple): # we have a nested tuple so have at least 1 multi-index level # we should be able to match up the dimensionality here # we have too many indexers for our dim, but have at least 1 # multi-index dimension, try to see if we have something like # a tuple passed to a series with a multi-index if len(tup) > self.ndim: if self.name != "loc": # This should never be reached, but lets be explicit about it raise ValueError("Too many indices") if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in tup ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") if self.ndim == 1 or not any(isinstance(x, slice) for x in tup): # GH#10521 Series should reduce MultiIndex dimensions instead of # DataFrame, IndexingError is not raised when slice(None,None,None) # with one row. with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) # this is a series with a multi-index specified a tuple of # selectors axis = self.axis or 0 return self._getitem_axis(tup, axis=axis) # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj # GH#41369 Loop in reverse order ensures indexing along columns before rows # which selects only necessary blocks which avoids dtype conversion if possible axis = len(tup) - 1 for key in tup[::-1]: if com.is_null_slice(key): axis -= 1 continue obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis -= 1 # if we have a scalar, we are done if is_scalar(obj) or not hasattr(obj, "ndim"): break return obj def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if type(key) is tuple: key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) if self._is_scalar_access(key): with suppress(KeyError, IndexError, AttributeError): # AttributeError for IntervalTree get_value return self.obj._get_value(*key, takeable=self._takeable) return self._getitem_tuple(key) else: # we by definition only have the 0th axis axis = self.axis or 0 maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key: tuple): raise NotImplementedError() def _getitem_tuple(self, tup: tuple): raise AbstractMethodError(self) def _getitem_axis(self, key, axis: int): raise NotImplementedError() def _has_valid_setitem_indexer(self, indexer) -> bool: raise AbstractMethodError(self) def _getbool_axis(self, key, axis: int): # caller is responsible for ensuring non-None axis labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds = key.nonzero()[0] return self.obj._take_with_is_copy(inds, axis=axis) @doc(IndexingMixin.loc) class _LocIndexer(_LocationIndexer): _takeable: bool = False _valid_types = ( "labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean" ) # ------------------------------------------------------------------- # Key Checks @doc(_LocationIndexer._validate_key) def _validate_key(self, key, axis: int): # valid for a collection of labels (we check their presence later) # slice of labels (where start-end in labels) # slice of integers (only if in the labels) # boolean not in slice and with boolean index if isinstance(key, bool) and not is_bool_dtype(self.obj.index): raise KeyError( f"{key}: boolean label can not be used without a boolean index" ) if isinstance(key, slice) and ( isinstance(key.start, bool) or isinstance(key.stop, bool) ): raise TypeError(f"{key}: boolean values can not be used in a slice") def _has_valid_setitem_indexer(self, indexer) -> bool: return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False for i, k in enumerate(key): if not is_scalar(k): return False ax = self.obj.axes[i] if isinstance(ax, MultiIndex): return False if isinstance(k, str) and ax._supports_partial_string_indexing: # partial string indexing, df.loc['2000', 'A'] # should not be considered scalar return False if not ax.is_unique: return False return True # ------------------------------------------------------------------- # MultiIndex Handling def _multi_take_opportunity(self, tup: tuple) -> bool: """ Check whether there is the possibility to use ``_multi_take``. Currently the limit is that all axes being indexed, must be indexed with list-likes. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- bool Whether the current indexing, can be passed through `_multi_take`. """ if not all(is_list_like_indexer(x) for x in tup): return False # just too complicated return not any(com.is_bool_indexer(x) for x in tup) def _multi_take(self, tup: tuple): """ Create the indexers for the passed tuple of keys, and executes the take operation. This allows the take operation to be executed all at once, rather than once for each dimension. Improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- values: same type as the object being indexed """ # GH 836 d = { axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, self.obj._AXIS_ORDERS) } return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True) # ------------------------------------------------------------------- def _getitem_iterable(self, key, axis: int): """ Index current object with an iterable collection of keys. Parameters ---------- key : iterable Targeted labels. axis : int Dimension on which the indexing is being made. Raises ------ KeyError If no key was found. Will change in the future to raise if not all keys were found. Returns ------- scalar, DataFrame, or Series: indexed value(s). """ # we assume that not com.is_bool_indexer(key), as that is # handled before we get here. self._validate_key(key, axis) # A collection of keys keyarr, indexer = self._get_listlike_indexer(key, axis) return self.obj._reindex_with_indexers( {axis: [keyarr, indexer]}, copy=True, allow_dups=True ) def _getitem_tuple(self, tup: tuple): with suppress(IndexingError): return self._getitem_lowerdim(tup) # no multi-index, so validate all of the indexers self._has_valid_tuple(tup) # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) return self._getitem_tuple_same_dim(tup) def _get_label(self, label, axis: int): # GH#5667 this will fail if the label is not present in the axis. return self.obj.xs(label, axis=axis) def _handle_lowerdim_multi_index_axis0(self, tup: tuple): # we have an axis0 multi-index, handle or raise axis = self.axis or 0 try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=axis) except (TypeError, InvalidIndexError): # slices are unhashable pass except KeyError as ek: # raise KeyError if number of indexers match # else IndexingError will be raised if self.ndim < len(tup) <= self.obj.index.nlevels: raise ek raise IndexingError("No label returned") def _getitem_axis(self, key, axis: int): key = item_from_zerodim(key) if is_iterator(key): key = list(key) labels = self.obj._get_axis(axis) key = labels._get_partial_string_timestamp_match_key(key) if isinstance(key, slice): self._validate_key(key, axis) return self._get_slice_axis(key, axis=axis) elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): # an iterable multi-selection if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, "ndim") and key.ndim > 1: raise ValueError("Cannot index with multidimensional key") return self._getitem_iterable(key, axis=axis) # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] # fall thru to straight lookup self._validate_key(key, axis) return self._get_label(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): """ This is pretty simple as we just have to deal with labels. """ # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self.obj._slice(indexer, axis=axis) else: # DatetimeIndex overrides Index.slice_indexer and may # return a DatetimeIndex instead of a slice object. return self.obj.take(indexer, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Convert indexing key into something we can use to do actual fancy indexing on a ndarray. Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) if isinstance(key, slice): return labels._convert_slice_indexer(key, kind="loc") # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(key) and not is_int_index if is_scalar(key) or isinstance(labels, MultiIndex): # Otherwise get_loc will raise InvalidIndexError # if we are a label return me try: return labels.get_loc(key) except LookupError: if isinstance(key, tuple) and isinstance(labels, MultiIndex): if len(key) == labels.nlevels: return {"key": key} raise except InvalidIndexError: # GH35015, using datetime as column indices raises exception if not isinstance(labels, MultiIndex): raise except TypeError: pass except ValueError: if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition # always valid return {"key": key} if is_nested_tuple(key, labels): if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in key ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") return labels.get_locs(key) elif is_list_like_indexer(key): if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) (inds,) = key.nonzero() return inds else: return self._get_listlike_indexer(key, axis)[1] else: try: return labels.get_loc(key) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(key): return {"key": key} raise def _get_listlike_indexer(self, key, axis: int): """ Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found. """ ax = self.obj._get_axis(axis) # Have the index compute an indexer or return None # if it cannot handle: indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): # _validate_read_indexer is a no-op if no -1s, so skip return ax[indexer], indexer if ax._index_as_unique: indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) self._validate_read_indexer(keyarr, indexer, axis) return keyarr, indexer def _validate_read_indexer(self, key, indexer, axis: int): """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis : int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values: missing_mask = indexer < 0 missing = (missing_mask).sum() if missing: if missing == len(indexer): axis_name = self.obj._get_axis_name(axis) raise KeyError(f"None of [{key}] are in the [{axis_name}]") ax = self.obj._get_axis(axis) not_found = list(set(key) - set(ax)) raise KeyError(f"{not_found} not in index") @doc(IndexingMixin.iloc) class _iLocIndexer(_LocationIndexer): _valid_types = ( "integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array" ) _takeable = True # ------------------------------------------------------------------- # Key Checks def _validate_key(self, key, axis: int): if com.is_bool_indexer(key): if hasattr(key, "index") and isinstance(key.index, Index): if key.index.inferred_type == "integer": raise NotImplementedError( "iLocation based boolean " "indexing on an integer type " "is not available" ) raise ValueError( "iLocation based boolean indexing cannot use " "an indexable as a mask" ) return if isinstance(key, slice): return elif is_integer(key): self._validate_integer(key, axis) elif isinstance(key, tuple): # a tuple should already have been caught by this point # so don't treat a tuple as a valid indexer raise IndexingError("Too many indexers") elif is_list_like_indexer(key): arr = np.array(key) len_axis = len(self.obj._get_axis(axis)) # check that the key has a numeric dtype if not is_numeric_dtype(arr.dtype): raise IndexError(f".iloc requires numeric indexers, got {arr}") # check that the key does not exceed the maximum size of the index if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis): raise IndexError("positional indexers are out-of-bounds") else: raise ValueError(f"Can only index by location with a [{self._valid_types}]") def _has_valid_setitem_indexer(self, indexer) -> bool: """ Validate that a positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally. Returns ------- bool """ if isinstance(indexer, dict): raise IndexError("iloc cannot enlarge its target object") if isinstance(indexer, ABCDataFrame): warnings.warn( "DataFrame indexer for .iloc is deprecated and will be removed in" "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, stacklevel=3, ) if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("iloc cannot enlarge its target object") elif isinstance(i, dict): raise IndexError("iloc cannot enlarge its target object") return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False return all(is_integer(k) for k in key) def _validate_integer(self, key: int, axis: int) -> None: """ Check that 'key' is a valid position in the desired axis. Parameters ---------- key : int Requested position. axis : int Desired axis. Raises ------ IndexError If 'key' is not a valid position in axis 'axis'. """ len_axis = len(self.obj._get_axis(axis)) if key >= len_axis or key < -len_axis: raise IndexError("single positional indexer is out-of-bounds") # ------------------------------------------------------------------- def _getitem_tuple(self, tup: tuple): self._has_valid_tuple(tup) with suppress(IndexingError): return self._getitem_lowerdim(tup) return self._getitem_tuple_same_dim(tup) def _get_list_axis(self, key, axis: int): """ Return Series values by list or array of integers. Parameters ---------- key : list-like positional indexer axis : int Returns ------- Series object Notes ----- `axis` can only be zero. """ try: return self.obj._take_with_is_copy(key, axis=axis) except IndexError as err: # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): if isinstance(key, ABCDataFrame): raise IndexError( "DataFrame indexer is not allowed for .iloc\n" "Consider using .loc for automatic alignment." ) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) if is_iterator(key): key = list(key) if isinstance(key, list): key = np.asarray(key) if com.is_bool_indexer(key): self._validate_key(key, axis) return self._getbool_axis(key, axis=axis) # a list of integers elif is_list_like_indexer(key): return self._get_list_axis(key, axis=axis) # a single integer else: key = item_from_zerodim(key) if not is_integer(key): raise TypeError("Cannot index by location index with a non-integer key") # validate the location self._validate_integer(key, axis) return self.obj._ixs(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) labels._validate_positional_slice(slice_obj) return self.obj._slice(slice_obj, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Much simpler as we only have to deal with our valid types. """ return key def _get_setitem_indexer(self, key): # GH#32257 Fall through to let numpy do validation if is_iterator(key): return list(key) return key # ------------------------------------------------------------------- def _setitem_with_indexer(self, indexer, value, name="iloc"): """ _setitem_with_indexer is for setting values on a Series/DataFrame using positional indexers. If the relevant keys are not present, the Series/DataFrame may be expanded. This method is currently broken when dealing with non-unique Indexes, since it goes from positional indexers back to labels when calling BlockManager methods, see GH#12991, GH#22046, GH#15686. """ info_axis = self.obj._info_axis_number # maybe partial set take_split_path = not self.obj._mgr.is_single_block # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value if ( not take_split_path and getattr(self.obj._mgr, "blocks", False) and self.ndim > 1 ): # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value blk = self.obj._mgr.blocks[0] take_split_path = not blk._can_hold_element(val) # if we have any multi-indexes that have non-trivial slices # (not null slices) then we must take the split path, xref # GH 10360, GH 27841 if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for i, ax in zip(indexer, self.obj.axes): if isinstance(ax, MultiIndex) and not ( is_integer(i) or com.is_null_slice(i) ): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): if isinstance(idx, dict): # reindex the axis to the new value # and set inplace key, _ = convert_missing_indexer(idx) # if this is the items axes, then take the main missing # path first # this correctly sets the dtype and avoids cache issues # essentially this separates out the block that is needed # to possibly be modified if self.ndim > 1 and i == info_axis: # add the new item, and set the value # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like if not len(self.obj): if not is_list_like_indexer(value): raise ValueError( "cannot set a frame with no " "defined index and a scalar" ) self.obj[key] = value return # add a new item with the dtype setup if com.is_null_slice(indexer[0]): # We are setting an entire column self.obj[key] = value else: self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes ) self._setitem_with_indexer(new_indexer, value, name) return # reindex the axis # make sure to clear the cache because we are # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) # We are expanding the Series/DataFrame values to match # the length of thenew index `labels`. GH#40096 ensure # this is valid even if the index has duplicates. taker = np.arange(len(index) + 1, dtype=np.intp) taker[-1] = -1 reindexers = {i: (labels, taker)} new_obj = self.obj._reindex_with_indexers( reindexers, allow_dups=True ) self.obj._mgr = new_obj._mgr self.obj._maybe_update_cacher(clear=True) self.obj._is_copy = None nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: indexer, missing = convert_missing_indexer(indexer) if missing: self._setitem_with_indexer_missing(indexer, value) return # align and set the values if take_split_path: # We have to operate column-wise self._setitem_with_indexer_split_path(indexer, value, name) else: self._setitem_single_block(indexer, value, name) def _setitem_with_indexer_split_path(self, indexer, value, name: str): """ Setitem column-wise. """ # Above we only set take_split_path to True for 2D cases assert self.ndim == 2 if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) if len(indexer) > self.ndim: raise IndexError("too many indices for array") if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: raise ValueError(r"Cannot set values with ndim > 2") if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): from pandas import Series value = self._align_series(indexer, Series(value)) # Ensure we have something we can iterate over info_axis = indexer[1] ilocs = self._ensure_iterable_column_indexer(info_axis) pi = indexer[0] lplane_indexer = length_of_indexer(pi, self.obj.index) # lplane_indexer gives the expected length of obj[indexer[0]] # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value, name) elif np.ndim(value) == 2: self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): # We are setting multiple rows in a single column. self._setitem_single_column(ilocs[0], value, pi) elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): # We are trying to set N values into M entries of a single # column, which is invalid for N != M # Exclude zero-len for e.g. boolean masking that is all-false if len(value) == 1 and not is_integer(info_axis): # This is a case like df.iloc[:3, [1]] = [0] # where we treat as df.iloc[:3, 1] = 0 return self._setitem_with_indexer((pi, info_axis[0]), value[0]) raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) elif lplane_indexer == 0 and len(value) == len(self.obj.index): # We get here in one case via .loc with a all-False mask pass elif len(ilocs) == len(value): # We are setting multiple columns in a single row. for loc, v in zip(ilocs, value): self._setitem_single_column(loc, v, pi) elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0: # This is a setitem-with-expansion, see # test_loc_setitem_empty_append_expands_rows_mixed_dtype # e.g. df = DataFrame(columns=["x", "y"]) # df["x"] = df["x"].astype(np.int64) # df.loc[:, "x"] = [1, 2, 3] self._setitem_single_column(ilocs[0], value, pi) else: raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) else: # scalar value for loc in ilocs: self._setitem_single_column(loc, value, pi) def _setitem_with_indexer_2d_value(self, indexer, value): # We get here with np.ndim(value) == 2, excluding DataFrame, # which goes through _setitem_with_indexer_frame_value pi = indexer[0] ilocs = self._ensure_iterable_column_indexer(indexer[1]) # GH#7551 Note that this coerces the dtype if we are mixed value = np.array(value, dtype=object) if len(ilocs) != value.shape[1]: raise ValueError( "Must have equal len keys and value when setting with an ndarray" ) for i, loc in enumerate(ilocs): # setting with a list, re-coerces self._setitem_single_column(loc, value[:, i].tolist(), pi) def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str): ilocs = self._ensure_iterable_column_indexer(indexer[1]) sub_indexer = list(indexer) pi = indexer[0] multiindex_indexer = isinstance(self.obj.columns, MultiIndex) unique_cols = value.columns.is_unique # We do not want to align the value in case of iloc GH#37728 if name == "iloc": for i, loc in enumerate(ilocs): val = value.iloc[:, i] self._setitem_single_column(loc, val, pi) elif not unique_cols and value.columns.equals(self.obj.columns): # We assume we are already aligned, see # test_iloc_setitem_frame_duplicate_columns_multiple_blocks for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value.iloc[:, loc], multiindex_indexer, ) else: val = np.nan self._setitem_single_column(loc, val, pi) elif not unique_cols: raise ValueError("Setting with non-unique columns is not allowed.") else: for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value[item], multiindex_indexer ) else: val = np.nan self._setitem_single_column(loc, val, pi) def _setitem_single_column(self, loc: int, value, plane_indexer): """ Parameters ---------- loc : int Indexer for column position plane_indexer : int, slice, listlike[int] The indexer we use for setitem along axis=0. """ pi = plane_indexer ser = self.obj._ixs(loc, axis=1) # perform the equivalent of a setitem on the info axis # as we have a null slice or a slice with full bounds # which means essentially reassign to the columns of a # multi-dim object # GH#6149 (null slice), GH#10408 (full bounds) if com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)): ser = value elif ( is_array_like(value) and is_exact_shape_match(ser, value) and not is_empty_indexer(pi, value) ): if is_list_like(pi): ser = value[np.argsort(pi)] else: # in case of slice ser = value[pi] else: # set the item, possibly having a dtype change ser = ser.copy() ser._mgr = ser._mgr.setitem(indexer=(pi,), value=value) ser._maybe_update_cacher(clear=True) # reset the sliced object if unique self.obj._iset_item(loc, ser) def _setitem_single_block(self, indexer, value, name: str): """ _setitem_with_indexer for the case when we have a single Block. """ from pandas import Series info_axis = self.obj._info_axis_number item_labels = self.obj._get_axis(info_axis) if isinstance(indexer, tuple): # if we are setting on the info axis ONLY # set using those methods to avoid block-splitting # logic here if ( len(indexer) > info_axis and is_integer(indexer[info_axis]) and all( com.is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis ) ): selected_item_labels = item_labels[indexer[info_axis]] if len(item_labels.get_indexer_for([selected_item_labels])) == 1: self.obj[selected_item_labels] = value return indexer = maybe_convert_ix(*indexer) if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): # TODO(EA): ExtensionBlock.setitem this causes issues with # setting for extensionarrays that store dicts. Need to decide # if it's worth supporting that. value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame) and name != "iloc": value = self._align_frame(indexer, value) # check for chained assignment self.obj._check_is_chained_assignment_possible() # actually do the set self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) def _setitem_with_indexer_missing(self, indexer, value): """ Insert new row(s) or column(s) into the Series or DataFrame. """ from pandas import Series # reindex the axis to the new value # and set inplace if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index), indexer) # we have a coerced indexer, e.g. a float # that matches in an Int64Index, so # we will not create a duplicate index, rather # index to that element # e.g. 0.0 -> 0 # GH#12246 if index.is_unique: new_indexer = index.get_indexer([new_index[-1]]) if (new_indexer != -1).any(): # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): # GH#22717 handle casting compatibility that np.concatenate # does incorrectly new_values = concat_compat([self.obj._values, new_values]) self.obj._mgr = self.obj._constructor( new_values, index=new_index, name=self.obj.name )._mgr self.obj._maybe_update_cacher(clear=True) elif self.ndim == 2: if not len(self.obj.columns): # no columns and scalar raise ValueError("cannot set a frame with no defined columns") if isinstance(value, ABCSeries): # append a Series value = value.reindex(index=self.obj.columns, copy=True) value.name = indexer elif isinstance(value, dict): value = Series( value, index=self.obj.columns, name=indexer, dtype=object ) else: # a list-list if is_list_like_indexer(value): # must have conforming columns if len(value) != len(self.obj.columns): raise ValueError("cannot set a row with mismatched columns") value = Series(value, index=self.obj.columns, name=indexer) self.obj._mgr = self.obj.append(value)._mgr self.obj._maybe_update_cacher(clear=True) def _ensure_iterable_column_indexer(self, column_indexer): """ Ensure that our column indexer is something that can be iterated over. """ if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False): """ Parameters ---------- indexer : tuple, slice, scalar Indexer used to get the locations that will be set to `ser`. ser : pd.Series Values to assign to the locations specified by `indexer`. multiindex_indexer : bool, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. Returns ------- `np.array` of `ser` broadcast to the appropriate shape for assignment to the locations selected by `indexer` """ if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = (indexer,) if isinstance(indexer, tuple): # flatten np.ndarray indexers def ravel(i): return i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.ndim == 2 obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? # hence need to align to a single axis dimension # rather that find all valid dims # frame if is_frame: single_aligner = single_aligner and aligners[0] # we have a frame, with multiple indexers on both axes; and a # series, so need to broadcast (see GH5206) if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer): ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: len_indexer = len(indexer[1]) ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T return ser for i, idx in enumerate(indexer): ax = obj.axes[i] # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if ser.index.equals(new_ix) or not len(new_ix): return ser._values.copy() return ser.reindex(new_ix)._values # 2 dims elif single_aligner: # reindex along index ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values elif is_integer(indexer) and self.ndim == 1: if is_object_dtype(self.obj): return ser ax = self.obj._get_axis(0) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values[indexer] elif is_integer(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError("Incompatible indexer with Series") def _align_frame(self, indexer, df: DataFrame): is_frame = self.ndim == 2 if isinstance(indexer, tuple): idx, cols = None, None sindexers = [] for i, ix in enumerate(indexer): ax = self.obj.axes[i] if is_sequence(ix) or isinstance(ix, slice): if isinstance(ix, np.ndarray): ix = ix.ravel() if idx is None: idx = ax[ix] elif cols is None: cols = ax[ix] else: break else: sindexers.append(i) if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy()._values else: val = df.reindex(idx, columns=cols)._values return val elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame: ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy()._values else: # we have a multi-index and are trying to align # with a particular, level GH3738 if ( isinstance(ax, MultiIndex) and isinstance(df.index, MultiIndex) and ax.nlevels != df.index.nlevels ): raise TypeError( "cannot align on a multi-index with out " "specifying the join levels" ) val = df.reindex(index=ax)._values return val raise ValueError("Incompatible indexer with DataFrame") class _ScalarAccessIndexer(NDFrameIndexerBase): """ Access scalars quickly. """ def _convert_key(self, key, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if not isinstance(key, tuple): # we could have a convertible item here (e.g. Timestamp) if not is_list_like_indexer(key): key = (key,) else: raise ValueError("Invalid call for scalar access (getting)!") key = self._convert_key(key) return self.obj._get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = _tuplify(self.ndim, key) key = list(self._convert_key(key, is_setter=True)) if len(key) != self.ndim: raise ValueError("Not enough indexers for scalar access (setting)!") self.obj._set_value(*key, value=value, takeable=self._takeable) @doc(IndexingMixin.at) class _AtIndexer(_ScalarAccessIndexer): _takeable = False def _convert_key(self, key, is_setter: bool = False): """ Require they keys to be the same type as the index. (so we don't fallback) """ # GH 26989 # For series, unpacking key needs to result in the label. # This is already the case for len(key) == 1; e.g. (1,) if self.ndim == 1 and len(key) > 1: key = (key,) # allow arbitrary setting if is_setter: return list(key) return key @property def _axes_are_unique(self) -> bool: # Only relevant for self.ndim == 2 assert self.ndim == 2 return self.obj.index.is_unique and self.obj.columns.is_unique def __getitem__(self, key): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (getting)!") return self.obj.loc[key] return super().__getitem__(key) def __setitem__(self, key, value): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (setting)!") self.obj.loc[key] = value return return super().__setitem__(key, value) @doc(IndexingMixin.iat) class _iAtIndexer(_ScalarAccessIndexer): _takeable = True def _convert_key(self, key, is_setter: bool = False): """ Require integer args. (and convert to label arguments) """ for i in key: if not is_integer(i): raise ValueError("iAt based indexing can only have integer indexers") return key def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: """ Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple """ _tup: list[Hashable | slice] _tup = [slice(None, None) for _ in range(ndim)] _tup[0] = loc return tuple(_tup) def convert_to_index_sliceable(obj: DataFrame, key): """ If we are index sliceable, then return my slicer, otherwise return None. """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind="getitem") elif isinstance(key, str): # we are an actual column if key in obj.columns: return None # We might have a datetimelike string that we can translate to a # slice here via partial string indexing if idx._supports_partial_string_indexing: try: res = idx._get_string_slice(str(key)) warnings.warn( "Indexing a DataFrame with a datetimelike index using a single " "string to slice the rows, like `frame[string]`, is deprecated " "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, stacklevel=3, ) return res except (KeyError, ValueError, NotImplementedError): return None return None def check_bool_indexer(index: Index, key) -> np.ndarray: """ Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index. """ result = key if isinstance(key, ABCSeries) and not key.index.equals(index): result = result.reindex(index) mask = isna(result._values) if mask.any(): raise IndexingError( "Unalignable boolean Series provided as " "indexer (index of the boolean Series and of " "the indexed object do not match)." ) return result.astype(bool)._values if is_object_dtype(key): # key might be object-dtype bool, check_array_indexer needs bool array result = np.asarray(result, dtype=bool) elif not is_array_like(result): # GH 33924 # key may contain nan elements, check_array_indexer needs bool array result = pd_array(result, dtype=bool) return check_array_indexer(index, result) def convert_missing_indexer(indexer): """ Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer["key"] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False def convert_from_missing_indexer_tuple(indexer, axes): """ Create a filtered indexer that doesn't have any missing indexers. """ def get_indexer(_i, _idx): return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)) def maybe_convert_ix(*args): """ We likely want to take the cross-product. """ for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): return args return np.ix_(*args) def is_nested_tuple(tup, labels) -> bool: """ Returns ------- bool """ # check for a compatible nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False for k in tup: if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False def is_label_like(key) -> bool: """ Returns ------- bool """ # select a label or row return not isinstance(key, slice) and not is_list_like_indexer(key) def need_slice(obj: slice) -> bool: """ Returns ------- bool """ return ( obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1) )
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/core/indexing.py
from pandas import ( TimedeltaIndex, timedelta_range, ) import pandas._testing as tm class TestTimedeltaIndexDelete: def test_delete(self): idx = timedelta_range(start="1 Days", periods=5, freq="D", name="idx") # preserve freq expected_0 = timedelta_range(start="2 Days", periods=4, freq="D", name="idx") expected_4 = timedelta_range(start="1 Days", periods=4, freq="D", name="idx") # reset freq to None expected_1 = TimedeltaIndex( ["1 day", "3 day", "4 day", "5 day"], freq=None, name="idx" ) cases = { 0: expected_0, -5: expected_0, -1: expected_4, 4: expected_4, 1: expected_1, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq with tm.external_error_raised((IndexError, ValueError)): # either depending on numpy version idx.delete(5) def test_delete_slice(self): idx = timedelta_range(start="1 days", periods=10, freq="D", name="idx") # preserve freq expected_0_2 = timedelta_range(start="4 days", periods=7, freq="D", name="idx") expected_7_9 = timedelta_range(start="1 days", periods=7, freq="D", name="idx") # reset freq to None expected_3_5 = TimedeltaIndex( ["1 d", "2 d", "3 d", "7 d", "8 d", "9 d", "10d"], freq=None, name="idx" ) cases = { (0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq result = idx.delete(slice(n[0], n[-1] + 1)) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq def test_delete_doesnt_infer_freq(self): # GH#30655 behavior matches DatetimeIndex tdi = TimedeltaIndex(["1 Day", "2 Days", None, "3 Days", "4 Days"]) result = tdi.delete(2) assert result.freq is None
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/tests/indexes/timedeltas/test_delete.py
from __future__ import annotations from contextlib import contextmanager import re from typing import ( Sequence, Type, cast, ) import warnings @contextmanager def assert_produces_warning( expected_warning: type[Warning] | bool | None = Warning, filter_level="always", check_stacklevel: bool = True, raise_on_extra_warnings: bool = True, match: str | None = None, ): """ Context manager for running code expected to either raise a specific warning, or not raise any warnings. Verifies that the code raises the expected warning, and that it does not raise any other unexpected warnings. It is basically a wrapper around ``warnings.catch_warnings``. Parameters ---------- expected_warning : {Warning, False, None}, default Warning The type of Exception raised. ``exception.Warning`` is the base class for all warnings. To check that no warning is returned, specify ``False`` or ``None``. filter_level : str or None, default "always" Specifies whether warnings are ignored, displayed, or turned into errors. Valid values are: * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated from each location * "module" - print the warning the first time it is generated from each module * "once" - print the warning the first time it is generated check_stacklevel : bool, default True If True, displays the line that called the function containing the warning to show were the function is called. Otherwise, the line that implements the function is displayed. raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. match : str, optional Match warning message. Examples -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning()) ... >>> with assert_produces_warning(False): ... warnings.warn(RuntimeWarning()) ... Traceback (most recent call last): ... AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. >>> with assert_produces_warning(UserWarning): ... warnings.warn(RuntimeWarning()) Traceback (most recent call last): ... AssertionError: Did not see expected warning of class 'UserWarning'. ..warn:: This is *not* thread-safe. """ __tracebackhide__ = True with warnings.catch_warnings(record=True) as w: warnings.simplefilter(filter_level) yield w if expected_warning: expected_warning = cast(Type[Warning], expected_warning) _assert_caught_expected_warning( caught_warnings=w, expected_warning=expected_warning, match=match, check_stacklevel=check_stacklevel, ) if raise_on_extra_warnings: _assert_caught_no_extra_warnings( caught_warnings=w, expected_warning=expected_warning, ) def _assert_caught_expected_warning( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning], match: str | None, check_stacklevel: bool, ) -> None: """Assert that there was the expected warning among the caught warnings.""" saw_warning = False matched_message = False for actual_warning in caught_warnings: if issubclass(actual_warning.category, expected_warning): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): _assert_raised_with_correct_stacklevel(actual_warning) if match is not None and re.search(match, str(actual_warning.message)): matched_message = True if not saw_warning: raise AssertionError( f"Did not see expected warning of class " f"{repr(expected_warning.__name__)}" ) if match and not matched_message: raise AssertionError( f"Did not see warning {repr(expected_warning.__name__)} " f"matching {match}" ) def _assert_caught_no_extra_warnings( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning] | bool | None, ) -> None: """Assert that no extra warnings apart from the expected ones are caught.""" extra_warnings = [] for actual_warning in caught_warnings: if _is_unexpected_warning(actual_warning, expected_warning): unclosed = "unclosed transport <asyncio.sslproto._SSLProtocolTransport" if actual_warning.category == ResourceWarning and unclosed in str( actual_warning.message ): # FIXME: kludge because pytest.filterwarnings does not # suppress these, xref GH#38630 continue extra_warnings.append( ( actual_warning.category.__name__, actual_warning.message, actual_warning.filename, actual_warning.lineno, ) ) if extra_warnings: raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}") def _is_unexpected_warning( actual_warning: warnings.WarningMessage, expected_warning: type[Warning] | bool | None, ) -> bool: """Check if the actual warning issued is unexpected.""" if actual_warning and not expected_warning: return True expected_warning = cast(Type[Warning], expected_warning) return bool(not issubclass(actual_warning.category, expected_warning)) def _assert_raised_with_correct_stacklevel( actual_warning: warnings.WarningMessage, ) -> None: from inspect import ( getframeinfo, stack, ) caller = getframeinfo(stack()[4][0]) msg = ( "Warning not set with correct stacklevel. " f"File where warning is raised: {actual_warning.filename} != " f"{caller.filename}. Warning message: {actual_warning.message}" ) assert actual_warning.filename == caller.filename, msg
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/_testing/_warnings.py
from typing import Optional import numpy as np from pandas._libs import lib from pandas.core.dtypes.cast import maybe_downcast_numeric from pandas.core.dtypes.common import ( ensure_object, is_datetime_or_timedelta_dtype, is_decimal, is_integer_dtype, is_number, is_numeric_dtype, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) import pandas as pd from pandas.core.arrays.numeric import NumericArray def to_numeric(arg, errors="raise", downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series Argument to be converted. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaN. - If 'ignore', then invalid parsing will return the input. downcast : {'integer', 'signed', 'unsigned', 'float'}, default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. Returns ------- ret Numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. DataFrame.convert_dtypes : Convert dtypes. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 Downcasting of nullable integer and floating dtypes is supported: >>> s = pd.Series([1, 2, 3], dtype="Int64") >>> pd.to_numeric(s, downcast="integer") 0 1 1 2 2 3 dtype: Int8 >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") >>> pd.to_numeric(s, downcast="float") 0 1.0 1 2.1 2 3.0 dtype: Float32 """ if downcast not in (None, "integer", "signed", "unsigned", "float"): raise ValueError("invalid downcasting method provided") if errors not in ("ignore", "raise", "coerce"): raise ValueError("invalid error value specified") is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndex): is_index = True if needs_i8_conversion(arg.dtype): values = arg.asi8 else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype="O") elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype="O") elif getattr(arg, "ndim", 1) > 1: raise TypeError("arg must be a list, tuple, 1-d array, or Series") else: values = arg # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: Optional[np.ndarray] = None if isinstance(values, NumericArray): mask = values._mask values = values._data[~mask] values_dtype = getattr(values, "dtype", None) if is_numeric_dtype(values_dtype): pass elif is_datetime_or_timedelta_dtype(values_dtype): values = values.view(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: values, _ = lib.maybe_convert_numeric( values, set(), coerce_numeric=coerce_numeric ) except (ValueError, TypeError): if errors == "raise": raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values.dtype): typecodes = None if downcast in ("integer", "signed"): typecodes = np.typecodes["Integer"] elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): typecodes = np.typecodes["UnsignedInteger"] elif downcast == "float": typecodes = np.typecodes["Float"] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: dtype = np.dtype(dtype) if dtype.itemsize <= values.dtype.itemsize: values = maybe_downcast_numeric(values, dtype) # successful conversion if values.dtype == dtype: break # GH33013: for IntegerArray & FloatingArray need to reconstruct masked array if mask is not None: data = np.zeros(mask.shape, dtype=values.dtype) data[~mask] = values from pandas.core.arrays import ( FloatingArray, IntegerArray, ) klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray values = klass(data, mask.copy()) if is_series: return arg._constructor(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
import operator import numpy as np import pytest from pandas import ( DataFrame, Index, Series, ) import pandas._testing as tm class TestMatMul: def test_matmul(self): # matmul test is for GH#10259 a = DataFrame( np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"] ) b = DataFrame( np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"] ) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame( np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] ) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2) def test_matmul_message_shapes(self): # GH#21581 exception message should reflect original shapes, # not transposed shapes a = np.random.rand(10, 4) b = np.random.rand(5, 3) df = DataFrame(b) msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" with pytest.raises(ValueError, match=msg): a @ df with pytest.raises(ValueError, match=msg): a.tolist() @ df
datapythonista/pandas
pandas/tests/frame/methods/test_matmul.py
pandas/core/tools/numeric.py
# -*- coding: utf-8 -*- '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2019, 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' from __future__ import division __all__ = [ 'sequential_substitution_2P', 'sequential_substitution_2P_functional', 'sequential_substitution_GDEM3_2P', 'dew_bubble_Michelsen_Mollerup', 'bubble_T_Michelsen_Mollerup', 'dew_T_Michelsen_Mollerup', 'bubble_P_Michelsen_Mollerup', 'dew_P_Michelsen_Mollerup', 'minimize_gibbs_2P_transformed', 'sequential_substitution_Mehra_2P', 'nonlin_2P', 'nonlin_n_2P', 'sequential_substitution_NP', 'minimize_gibbs_NP_transformed', 'TPV_HSGUA_guesses_1P_methods', 'TPV_solve_HSGUA_guesses_1P', 'sequential_substitution_2P_HSGUAbeta', 'sequential_substitution_2P_sat', 'TP_solve_VF_guesses', 'TPV_double_solve_1P', 'nonlin_2P_HSGUAbeta', 'sequential_substitution_2P_double', 'cm_flash_tol', 'nonlin_2P_newton', 'dew_bubble_newton_zs', 'existence_3P_Michelsen_Mollerup', 'SS_VF_simultaneous', 'stability_iteration_Michelsen', 'assert_stab_success_2P', 'nonlin_equilibrium_NP', 'nonlin_spec_NP', 'TPV_solve_HSGUA_guesses_VL', 'solve_P_VF_IG_K_composition_independent', 'solve_T_VF_IG_K_composition_independent' ] from fluids.constants import R from fluids.numerics import (UnconvergedError, trunc_exp, newton, brenth, secant, translate_bound_f_jac, numpy as np, assert_close, assert_close1d, damping_maintain_sign, oscillation_checking_wrapper, OscillationError, NotBoundedError, jacobian, best_bounding_bounds, isclose, newton_system, make_damp_initial, newton_minimize, root, minimize, fsolve) from fluids.numerics import py_solve, trunc_log from chemicals.utils import (exp, log, copysign, normalize, mixing_simple, property_mass_to_molar) from chemicals.heat_capacity import (Dadgostar_Shaw_integral, Dadgostar_Shaw_integral_over_T, Lastovka_Shaw_integral, Lastovka_Shaw_integral_over_T) from chemicals.rachford_rice import (flash_inner_loop, Rachford_Rice_solutionN, Rachford_Rice_flash_error, Rachford_Rice_solution_LN2) from chemicals.phase_change import SMK from chemicals.volume import COSTALD from chemicals.flash_basic import flash_wilson, flash_Tb_Tc_Pc, flash_ideal from chemicals.exceptions import TrivialSolutionError from thermo.phases import Phase, CoolPropPhase, CEOSLiquid, CEOSGas, IAPWS95 from thermo.phases.phase_utils import lnphis_direct from thermo.coolprop import CPiP_min LASTOVKA_SHAW = 'Lastovka Shaw' DADGOSTAR_SHAW_1 = 'Dadgostar Shaw 1' STP_T_GUESS = '298.15 K' LAST_CONVERGED = 'Last converged' FIXED_GUESS = 'Fixed guess' IG_ENTHALPY = 'Ideal gas' IDEAL_LIQUID_ENTHALPY = 'Ideal liquid' WILSON_GUESS = 'Wilson' TB_TC_GUESS = 'Tb Tc' IDEAL_PSAT = 'Ideal Psat' PT_SS = 'SS' PT_SS_MEHRA = 'SS Mehra' PT_SS_GDEM3 = 'SS GDEM3' PT_NEWTON_lNKVF = 'Newton lnK VF' IDEAL_WILSON = 'Ideal Wilson' SHAW_ELEMENTAL = 'Shaw Elemental' PH_T_guesses_1P_methods = [LASTOVKA_SHAW, DADGOSTAR_SHAW_1, IG_ENTHALPY, IDEAL_LIQUID_ENTHALPY, FIXED_GUESS, STP_T_GUESS, LAST_CONVERGED] TPV_HSGUA_guesses_1P_methods = PH_T_guesses_1P_methods def sequential_substitution_2P(T, P, V, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=None, check_G=False, check_V=False, dZ_allow=0.1): xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess cmps = range(len(zs)) err, err1, err2, err3 = 0.0, 0.0, 0.0, 0.0 G_old = None V_over_F_old = V_over_F restrained = 0 restrained_switch_count = 300 # Code for testing phis at zs l, g = liquid_phase, gas_phase if liquid_phase.T != T or liquid_phase.P != P: liquid_phase = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) if gas_phase.T != T or gas_phase.P != P: gas_phase = gas_phase.to_TP_zs(T=T, P=P, zs=ys) for iteration in range(maxiter): # g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) # l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) # l = liquid_phase.to(xs, T=T, P=P, V=V) # g = gas_phase.to(ys, T=T, P=P, V=V) # lnphis_g = g.lnphis() # lnphis_l = l.lnphis() lnphis_g = gas_phase.lnphis_at_zs(ys) lnphis_l = liquid_phase.lnphis_at_zs(xs) limited_Z = False try: Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g) except OverflowError: Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g) V_over_F_old = V_over_F try: V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) except Exception as e: V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True) # K_low, K_high = False, False # for zi, Ki in zip(zs, Ks): # if zi != 0.0: # if Ki > 1.0: # K_high = True # else: # K_low = True # if K_high and K_low: # break # if not (K_high and K_low): # raise TrivialSolutionError("Converged to trivial condition, all K same phase", # comp_difference, iteration, err) # else: if check_G: V_over_F_G = min(max(V_over_F_old, 0), 1) G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G() print('new G', G, 'old G', G_old) if G_old is not None: if G > G_old: step = .5 while G > G_old and step > 1e-4: # ys_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(xs, xs_old)]) # xs_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(ys, ys_old)]) # ys_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)]) # xs_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)]) # g = gas_phase.to(ys_working, T=T, P=P, V=V) # l = liquid_phase.to(xs_working, T=T, P=P, V=V) # lnphis_g = g.lnphis() # lnphis_l = l.lnphis() # try: # Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # except OverflowError: # Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks_old, Ks)] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F) # V_over_F_G = min(max(V_over_F, 0), 1) g = gas_phase.to(ys_new, T=T, P=P, V=V) l = liquid_phase.to(xs_new, T=T, P=P, V=V) G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G() print('step', step, G, V_over_F, Ks) step *= 0.5 # xs, ys = xs_working, ys_working # print('Gibbs increased', G/G_old) G_old = G if check_V and iteration > 2: big_Z_change = (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow) if restrained <= restrained_switch_count and big_Z_change: limited_Z = True step = .5 #.5 while (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow ) and step > 1e-8: # Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks, Ks_old)] # Ks_working = [Ks[i]*(Ks_old[i]/Ks[i])**(1.0 - step) for i in cmps] # step = 0 - all new; step = 1 - all old # Ks_working = [Ks_old[i]*(exp(lnphis_l[i])/exp(lnphis_g[i])/Ks_old[i])**(1.0 - step) for i in cmps] ys_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)]) xs_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)]) # V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F) l = liquid_phase.to(xs_new, T=T, P=P, V=V) g = gas_phase.to(ys_new, T=T, P=P, V=V) # lnphis_g = g.lnphis() # lnphis_l = l.lnphis() print('step', step, V_over_F, g.Z()) step *= 0.5 xs, ys = xs_new, ys_new lnphis_g = g.lnphis() lnphis_l = l.lnphis() Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) restrained += 1 elif restrained > restrained_switch_count and big_Z_change: restrained = 0 # Check for negative fractions - normalize only if needed for xi in xs_new: if xi < 0.0: xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new) for i in cmps: xs_new[i] = abs(xs_new[i])*xs_new_sum_inv break for yi in ys_new: if yi < 0.0: ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new) for i in cmps: ys_new[i] = abs(ys_new[i])*ys_new_sum_inv break # Calculate the error using the new Ks and old compositions # Claimed error function in CONVENTIONAL AND RAPID FLASH # CALCULATIONS FOR THE SOAVE-REDLICH-KWONG AND PENG-ROBINSON EQUATIONS OF STATE err = 0.0 # Suggested tolerance 1e-15 try: for Ki, xi, yi in zip(Ks, xs, ys): # equivalent of fugacity ratio # Could divide by the old Ks as well. err_i = Ki*xi/yi - 1.0 err += err_i*err_i except ZeroDivisionError: err = 0.0 for Ki, xi, yi in zip(Ks, xs, ys): try: err_i = Ki*xi/yi - 1.0 err += err_i*err_i except ZeroDivisionError: pass if err > 0.0 and err in (err1, err2, err3): raise OscillationError("Converged to cycle in errors, no progress being made") # Accept the new compositions xs_old, ys_old, Ks_old = xs, ys, Ks # if not limited_Z: # assert xs == l.zs # assert ys == g.zs xs, ys = xs_new, ys_new lnphis_g_old, lnphis_l_old = lnphis_g, lnphis_l l_old, g_old = l, g # print(err, V_over_F, Ks) # xs, ys # Check for comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)]) if comp_difference < trivial_solution_tol: raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal", comp_difference, iteration, err) if err < tol and not limited_Z: # Temporary! # err_mole_balance = 0.0 # for i in cmps: # err_mole_balance += abs(xs_old[i] * (1.0 - V_over_F_old) + ys_old[i] * V_over_F_old - zs[i]) # if err_mole_balance < mole_balance_tol: # return V_over_F, xs, ys, l, g, iteration, err if iteration == 0: # We are composition independent! g = gas_phase.to(ys_new, T=T, P=P, V=V) l = liquid_phase.to(xs_new, T=T, P=P, V=V) return V_over_F, xs_new, ys_new, l, g, iteration, err else: g = gas_phase.to(ys_old, T=T, P=P, V=V) l = liquid_phase.to(xs_old, T=T, P=P, V=V) return V_over_F_old, xs_old, ys_old, l, g, iteration, err # elif err < tol and limited_Z: # print(l.fugacities()/np.array(g.fugacities())) err1, err2, err3 = err, err1, err2 raise UnconvergedError('End of SS without convergence') def sequential_substitution_2P_functional(zs, xs_guess, ys_guess, liquid_args, gas_args, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=0.5): xs, ys = xs_guess, ys_guess V_over_F = V_over_F_guess N = len(zs) err = 0.0 V_over_F_old = V_over_F Ks = [0.0]*N for iteration in range(maxiter): lnphis_g = lnphis_direct(ys, *gas_args) lnphis_l = lnphis_direct(xs, *liquid_args) for i in range(N): Ks[i] = exp(lnphis_l[i] - lnphis_g[i]) V_over_F_old = V_over_F try: V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) except: V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True) for xi in xs_new: if xi < 0.0: # Remove negative mole fractions - may help or may still fail xs_new_sum_inv = 0.0 for xj in xs_new: xs_new_sum_inv += abs(xj) xs_new_sum_inv = 1.0/xs_new_sum_inv for i in range(N): xs_new[i] = abs(xs_new[i])*xs_new_sum_inv break for yi in ys_new: if yi < 0.0: ys_new_sum_inv = 0.0 for yj in ys_new: ys_new_sum_inv += abs(yj) ys_new_sum_inv = 1.0/ys_new_sum_inv for i in range(N): ys_new[i] = abs(ys_new[i])*ys_new_sum_inv break err = 0.0 for Ki, xi, yi in zip(Ks, xs, ys): # equivalent of fugacity ratio # Could divide by the old Ks as well. err_i = Ki*xi/yi - 1.0 err += err_i*err_i xs_old, ys_old = xs, ys xs, ys = xs_new, ys_new comp_difference = 0.0 for xi, yi in zip(xs, ys): comp_difference += abs(xi - yi) if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") if err < tol: return V_over_F_old, xs_old, ys_old, iteration, err raise ValueError('End of SS without convergence') def sequential_substitution_NP(T, P, zs, compositions_guesses, betas_guesses, phases, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, ref_phase=2): compositions = compositions_guesses cmps = range(len(zs)) phase_count = len(phases) phases_iter = range(phase_count) phase_iter_n1 = range(phase_count - 1) betas = betas_guesses if len(betas) < len(phases): betas.append(1.0 - sum(betas)) compositions_K_order = [compositions[i] for i in phases_iter if i != ref_phase] compositions_ref = compositions_guesses[ref_phase] for iteration in range(maxiter): phases = [phases[i].to_TP_zs(T=T, P=P, zs=compositions[i]) for i in phases_iter] lnphis = [phases[i].lnphis() for i in phases_iter] Ks = [] lnphis_ref = lnphis[ref_phase] for i in phases_iter: if i != ref_phase: lnphis_i = lnphis[i] try: Ks.append([exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps]) except OverflowError: Ks.append([trunc_exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps]) beta_guesses = [betas[i] for i in phases_iter if i != ref_phase] #if phase_count == 3: # Rachford_Rice_solution2(zs, Ks[0], Ks[1], beta_y=beta_guesses[0], beta_z=beta_guesses[1]) betas_new, compositions_new = Rachford_Rice_solutionN(zs, Ks, beta_guesses) # Sort the order back beta_ref_new = betas_new[-1] betas_new = betas_new[:-1] betas_new.insert(ref_phase, beta_ref_new) compositions_ref_new = compositions_new[-1] compositions_K_order_new = compositions_new[:-1] compositions_new = list(compositions_K_order_new) compositions_new.insert(ref_phase, compositions_ref_new) err = 0.0 for i in phase_iter_n1: Ks_i = Ks[i] ys = compositions_K_order[i] try: for Ki, xi, yi in zip(Ks_i, compositions_ref, ys): err_i = Ki*xi/yi - 1.0 err += err_i*err_i except ZeroDivisionError: err = 0.0 for Ki, xi, yi in zip(Ks_i, compositions_ref, ys): try: err_i = Ki*xi/yi - 1.0 err += err_i*err_i except ZeroDivisionError: pass # print(betas, Ks, 'calculated', err) # print(err) compositions = compositions_new compositions_K_order = compositions_K_order_new compositions_ref = compositions_ref_new betas = betas_new # TODO trivial solution check - how to handle - drop phase? # Check for # comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)]) # if comp_difference < trivial_solution_tol: # raise ValueError("Converged to trivial condition, compositions of both phases equal") if err < tol: return betas, compositions, phases, iteration, err # if iteration > 100: # return betas, compositions, phases, iteration, err raise UnconvergedError('End of SS without convergence') def sequential_substitution_Mehra_2P(T, P, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, acc_frequency=3, acc_delay=5, lambda_max=3, lambda_min=0.0, V_over_F_guess=None): xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess N = len(zs) cmps = range(N) lambdas = [1.0]*N Ks = [ys[i]/xs[i] for i in cmps] gs = [] import numpy as np for iteration in range(maxiter): g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) fugacities_g = g.fugacities() fugacities_l = l.fugacities() # Ks = [fugacities_l[i]*ys[i]/(fugacities_g[i]*xs[i]) for i in cmps] lnphis_g = g.lnphis() lnphis_l = l.lnphis() phis_g = g.phis() phis_l = l.phis() # Ks = [Ks[i]*exp(-lnphis_g[i]/lnphis_l[i]) for i in cmps] # Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps] # Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps] # Ks = [Ks[i]*exp(-phis_g[i]/phis_l[i]) for i in cmps] # Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414. # Strongly believed correct gis = np.log(fugacities_g) - np.log(fugacities_l) if not (iteration % acc_frequency) and iteration > acc_delay: gis_old = np.array(gs[-1]) # lambdas = np.abs(gis_old.T*gis_old/(gis_old.T*(gis_old - gis))*lambdas).tolist() # Alrotithm 3 also working # lambdas = np.abs(gis_old.T*(gis_old-gis)/((gis_old-gis).T*(gis_old - gis))*lambdas).tolist() # WORKING lambdas = np.abs(gis.T*gis/(gis_old.T*(gis - gis_old))).tolist() # 34, working lambdas = [min(max(li, lambda_min), lambda_max) for li in lambdas] # print(lambdas[0:5]) print(lambdas) # print('Ks', Ks, ) # print(Ks[-1], phis_l[-1], phis_g[-1], lambdas[-1], gis[-1], gis_old[-1]) Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps] # print(Ks) else: Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps] # print(Ks[0:5]) gs.append(gis) # lnKs = [lnKs[i]*1.5 for i in cmps] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) # Check for negative fractions - normalize only if needed for xi in xs_new: if xi < 0.0: xs_new_sum = sum(abs(i) for i in xs_new) xs_new = [abs(i)/xs_new_sum for i in xs_new] break for yi in ys_new: if yi < 0.0: ys_new_sum = sum(abs(i) for i in ys_new) ys_new = [abs(i)/ys_new_sum for i in ys_new] break err = 0.0 # Suggested tolerance 1e-15 for Ki, xi, yi in zip(Ks, xs, ys): # equivalent of fugacity ratio # Could divide by the old Ks as well. err_i = Ki*xi/yi - 1.0 err += err_i*err_i print(err) # Accept the new compositions xs, ys = xs_new, ys_new # Check for comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)]) if comp_difference < trivial_solution_tol: raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal", comp_difference, iteration, err) if err < tol: return V_over_F, xs, ys, l, g, iteration, err raise UnconvergedError('End of SS without convergence') def sequential_substitution_GDEM3_2P(T, P, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=None, acc_frequency=3, acc_delay=3, ): xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess cmps = range(len(zs)) all_Ks = [] all_lnKs = [] for iteration in range(maxiter): g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) lnphis_g = g.lnphis() lnphis_l = l.lnphis() # Mehra et al. (1983) is another option # Ks = [exp(l - g) for l, g in zip(lnphis_l, lnphis_g)] # if not (iteration %3) and iteration > 3: # dKs = gdem(Ks, all_Ks[-1], all_Ks[-2], all_Ks[-3]) # print(iteration, dKs) # Ks = [Ks[i] + dKs[i] for i in cmps] # all_Ks.append(Ks) # lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)] # if not (iteration %3) and iteration > 3: ## dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3]) # # dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3]) # lnKs = [lnKs[i] + dlnKs[i] for i in cmps] # Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414. lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)] if not (iteration %acc_frequency) and iteration > acc_delay: dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3]) print(dlnKs) lnKs = [lnKs[i] + dlnKs[i] for i in cmps] # Try to testaccelerated all_lnKs.append(lnKs) Ks = [exp(lnKi) for lnKi in lnKs] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) # Check for negative fractions - normalize only if needed for xi in xs_new: if xi < 0.0: xs_new_sum = sum(abs(i) for i in xs_new) xs_new = [abs(i)/xs_new_sum for i in xs_new] break for yi in ys_new: if yi < 0.0: ys_new_sum = sum(abs(i) for i in ys_new) ys_new = [abs(i)/ys_new_sum for i in ys_new] break err = 0.0 # Suggested tolerance 1e-15 for Ki, xi, yi in zip(Ks, xs, ys): # equivalent of fugacity ratio # Could divide by the old Ks as well. err_i = Ki*xi/yi - 1.0 err += err_i*err_i # Accept the new compositions xs, ys = xs_new, ys_new # Check for comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)]) if comp_difference < trivial_solution_tol: raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal", comp_difference, iteration, err) if err < tol: return V_over_F, xs, ys, l, g, iteration, err raise UnconvergedError('End of SS without convergence') def nonlin_equilibrium_NP(T, P, zs, compositions_guesses, betas_guesses, phases, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, ref_phase=-1, method='hybr', solve_kwargs=None, debug=False): if solve_kwargs is None: solve_kwargs = {} compositions = compositions_guesses N = len(zs) Nm1 = N - 1 cmps = range(N) phase_count = len(phases) phase_iter = range(phase_count) if ref_phase < 0: ref_phase = phase_count + ref_phase phase_iter_n1 = [i for i in phase_iter if i != ref_phase] phase_iter_n1_0 = range(phase_count-1) betas = betas_guesses if len(betas) < len(phases): betas.append(1.0 - sum(betas)) flows_guess = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps] jac = True if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): jac = False global iterations, info iterations = 0 info = [] def to_solve(flows, jac=jac): global iterations, info try: flows = flows.tolist() except: flows = list(flows) iterations += 1 iter_flows = [] iter_comps = [] iter_betas = [] iter_phases = [] jac_arr = None remaining = zs for i in range(len(flows)): if flows[i] < 0.0: flows[i] = 1e-100 for j, k in zip(phase_iter_n1, phase_iter_n1_0): v = flows[k*N:k*N+N] vs = v vs_sum = sum(abs(i) for i in vs) if vs_sum == 0.0: # Handle the case an optimizer takes all of all compounds already ys = zs else: vs_sum_inv = 1.0/vs_sum ys = [abs(vs[i]*vs_sum_inv) for i in cmps] ys = normalize(ys) iter_flows.append(vs) iter_comps.append(ys) iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1 iter_phases.append(phases[j].to_TP_zs(T=T, P=P, zs=ys)) remaining = [remaining[i] - vs[i] for i in cmps] flows_ref = remaining iter_flows.insert(ref_phase, remaining) beta_ref = sum(remaining) iter_betas.insert(ref_phase, beta_ref) xs_ref = normalize([abs(i) for i in remaining]) iter_comps.insert(ref_phase, xs_ref) phase_ref = phases[ref_phase].to_TP_zs(T=T, P=P, zs=xs_ref) iter_phases.insert(ref_phase, phase_ref) lnphis_ref = phase_ref.lnphis() dlnfugacities_ref = phase_ref.dlnfugacities_dns() errs = [] for k in phase_iter_n1: phase = iter_phases[k] lnphis = phase.lnphis() xs = iter_comps[k] for i in cmps: # This is identical to lnfugacity(i)^j - lnfugacity(i)^ref gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i] errs.append(gi) if jac: jac_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))] for ni, nj in zip(phase_iter_n1, phase_iter_n1_0): p = iter_phases[ni] dlnfugacities = p.dlnfugacities_dns() # Begin with the first row using ni, nj; for i in cmps: for ki, kj in zip(phase_iter_n1, phase_iter_n1_0): for j in cmps: delta = 1.0 if nj == kj else 0.0 v_ref = dlnfugacities_ref[i][j]/beta_ref jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows if jac: return errs, jac_arr return errs if method == 'newton_system': comp_val, iterations = newton_system(to_solve, flows_guess, jac=True, xtol=tol, damping=1, damping_func=damping_maintain_sign) else: def f_jac_numpy(flows_guess): # needed ans = to_solve(flows_guess) if jac: return np.array(ans[0]), np.array(ans[1]) return np.array(ans) sln = root(f_jac_numpy, flows_guess, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs) iterations = sln['nfev'] betas, compositions, phases, errs, jac, flows = info sln = (betas, compositions, phases, errs, jac, iterations) if debug: return sln, flows, to_solve return sln def nonlin_spec_NP(guess, fixed_val, spec_val, zs, compositions_guesses, betas_guesses, phases, iter_var='T', fixed_var='P', spec='H', maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, ref_phase=-1, # method='hybr', method='fsolve', solve_kwargs=None, debug=False, analytical_jac=True): if solve_kwargs is None: solve_kwargs = {} phase_kwargs = {fixed_var: fixed_val, iter_var: guess} compositions = compositions_guesses N = len(zs) Nm1 = N - 1 cmps = range(N) phase_count = len(phases) phase_iter = range(phase_count) if ref_phase < 0: ref_phase = phase_count + ref_phase phase_iter_n1 = [i for i in phase_iter if i != ref_phase] phase_iter_n1_0 = range(phase_count-1) betas = betas_guesses if len(betas) < len(phases): betas.append(1.0 - sum(betas)) guesses = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps] guesses.append(guess) spec_callables = [getattr(phase.__class__, spec) for phase in phases] dlnphis_diter_s = 'dlnphis_d' + iter_var dlnphis_diter_callables = [getattr(phase.__class__, dlnphis_diter_s) for phase in phases] dspec_diter_s = 'd%s_d%s' %(spec, iter_var) dspec_diter_callables = [getattr(phase.__class__, dspec_diter_s) for phase in phases] dspec_dn_s = 'd%s_dns' %(spec) dspec_dn_callables = [getattr(phase.__class__, dspec_dn_s) for phase in phases] jac = True if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', 'fsolve'): jac = False global iterations, info iterations = 0 info = [] def to_solve(flows, jac=jac, skip_err=False): global iterations, info try: flows = flows.tolist() except: flows = list(flows) iter_val = flows[-1] phase_kwargs[iter_var] = iter_val flows = flows[:-1] iter_flows = [] iter_comps = [] iter_betas = [] iter_phases = [] jac_arr = None remaining = zs if not skip_err: # print(flows, iter_val) iterations += 1 for i in range(len(flows)): if flows[i] < 0.0: flows[i] = 1e-100 for j, k in zip(phase_iter_n1, phase_iter_n1_0): v = flows[k*N:k*N+N] vs = v vs_sum = sum(abs(i) for i in vs) if vs_sum == 0.0: # Handle the case an optimizer takes all of all compounds already ys = zs else: vs_sum_inv = 1.0/vs_sum ys = [abs(vs[i]*vs_sum_inv) for i in cmps] ys = normalize(ys) iter_flows.append(vs) iter_comps.append(ys) iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1 iter_phases.append(phases[j].to_TP_zs(zs=ys, **phase_kwargs)) remaining = [remaining[i] - vs[i] for i in cmps] flows_ref = remaining iter_flows.insert(ref_phase, remaining) beta_ref = sum(remaining) iter_betas.insert(ref_phase, beta_ref) xs_ref = normalize([abs(i) for i in remaining]) iter_comps.insert(ref_phase, xs_ref) phase_ref = phases[ref_phase].to_TP_zs(zs=xs_ref, **phase_kwargs) iter_phases.insert(ref_phase, phase_ref) lnphis_ref = phase_ref.lnphis() errs = [] for k in phase_iter_n1: phase = iter_phases[k] lnphis = phase.lnphis() xs = iter_comps[k] for i in cmps: # This is identical to lnfugacity(i)^j - lnfugacity(i)^ref gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i] errs.append(gi) spec_phases = [] spec_calc = 0.0 for k in phase_iter: spec_phase = spec_callables[k](iter_phases[k]) spec_phases.append(spec_phase) spec_calc += spec_phase*iter_betas[k] errs.append(spec_calc - spec_val) else: iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val_check, spec_phases = info beta_ref = iter_betas[ref_phase] xs_ref = iter_comps[ref_phase] phase_ref = iter_phases[ref_phase] lnphis_ref = phase_ref.lnphis() # print(errs[-1], 'err', iter_val, 'T') if jac: dlnfugacities_ref = phase_ref.dlnfugacities_dns() jac_arr = [[0.0]*(N*(phase_count-1) + 1) for i in range(N*(phase_count-1)+1)] for ni, nj in zip(phase_iter_n1, phase_iter_n1_0): p = iter_phases[ni] dlnfugacities = p.dlnfugacities_dns() # Begin with the first row using ni, nj; for i in cmps: for ki, kj in zip(phase_iter_n1, phase_iter_n1_0): for j in cmps: delta = 1.0 if nj == kj else 0.0 v_ref = dlnfugacities_ref[i][j]/beta_ref jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref dlnphis_dspec = [dlnphis_diter_callables[i](phases[i]) for i in phase_iter] dlnphis_dspec_ref = dlnphis_dspec[ref_phase] for ni, nj in zip(phase_iter_n1, phase_iter_n1_0): p = iter_phases[ni] for i in cmps: jac_arr[nj*N + i][-1] = dlnphis_dspec[ni][i] - dlnphis_dspec_ref[i] # last = dspec_calc = 0.0 for k in phase_iter: dspec_calc += dspec_diter_callables[k](iter_phases[k])*iter_betas[k] jac_arr[-1][-1] = dspec_calc dspec_dns = [dspec_dn_callables[i](phases[i]) for i in phase_iter] dspec_dns_ref = dspec_dns[ref_phase] last_jac_row = jac_arr[-1] for ni, nj in zip(phase_iter_n1, phase_iter_n1_0): for i in cmps: # What is wrong? # H is multiplied by the phase fraction, of which this n is a part of # So there must be two parts here last_jac_row[nj*N + i] = ((iter_betas[ni]*dspec_dns[ni][i]/iter_betas[ni] - beta_ref*dspec_dns_ref[i]/beta_ref) + (spec_phases[ni] - spec_phases[ref_phase])) if skip_err: return jac_arr info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val, spec_phases if jac: return errs, jac_arr return errs if method == 'newton_system': comp_val, iterations = newton_system(to_solve, guesses, jac=True, xtol=tol, damping=1, damping_func=damping_maintain_sign) else: def f_jac_numpy(flows_guess): # needed ans = to_solve(flows_guess) if jac: return np.array(ans[0]), np.array(ans[1]) return np.array(ans) def jac_numpy(flows_guess): if flows_guess.tolist() == info[5] + [info[6]]: a = np.array(to_solve(flows_guess, jac=True, skip_err=True)) # b = np.array(to_solve(flows_guess, jac=True)[1]) # from numpy.testing import assert_allclose # assert_allclose(a, b, rtol=1e-10) return a # print('fail jac', tuple(flows_guess.tolist()), tuple(info[5])) # print('new jac') return np.array(to_solve(flows_guess, jac=True)[1]) if method == 'fsolve': # Need a function cache! 2 wasted fevals, 1 wasted jaceval if analytical_jac: jac = False sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, fprime=jac_numpy, xtol=tol, full_output=1, **solve_kwargs) else: sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, xtol=tol, full_output=1, **solve_kwargs) iterations = infodict['nfev'] else: sln = root(f_jac_numpy, guesses, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs) iterations = sln['nfev'] betas, compositions, phases, errs, jac, flows, iter_val, spec_phases = info sln = (iter_val, betas, compositions, phases, errs, jac, iterations) if debug: return sln, flows, to_solve return sln def nonlin_2P(T, P, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=None, method='hybr'): # Do with just n? cmps = range(len(zs)) xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess Ks_guess = [ys[i]/xs[i] for i in cmps] info = [0, None, None, None] def to_solve(lnKsVFTrans): Ks = [trunc_exp(i) for i in lnKsVFTrans[:-1]] V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTrans[-1]))) # Translation function - keep it zero to 1 xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps] ys = [Ks[i]*xs[i] for i in cmps] g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) lnphis_g = g.lnphis() lnphis_l = l.lnphis() # print(g.fugacities(), l.fugacities()) new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks) err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err] info[1:] = l, g, err info[0] += 1 return err VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0)) guesses = [log(i) for i in Ks_guess] guesses.append(VF_guess_in_basis) # try: sol = root(to_solve, guesses, tol=tol, method=method) # No reliable way to get number of iterations from OptimizeResult # solution, infodict, ier, mesg = fsolve(to_solve, guesses, full_output=True) solution = sol.x.tolist() V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-1]))) Ks = [exp(solution[i]) for i in cmps] xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps] ys = [Ks[i]*xs[i] for i in cmps] # except Exception as e: # raise UnconvergedError(e) tot_err = 0.0 for i in info[3]: tot_err += abs(i) return V_over_F, xs, ys, info[1], info[2], info[0], tot_err def nonlin_2P_HSGUAbeta(spec, spec_var, iter_val, iter_var, fixed_val, fixed_var, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=None, method='hybr' ): cmps = range(len(zs)) xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess Ks_guess = [ys[i]/xs[i] for i in cmps] kwargs_l = {'zs': xs_guess, fixed_var: fixed_val} kwargs_g = {'zs': ys_guess, fixed_var: fixed_val} info = [0, None, None, None, None] def to_solve(lnKsVFTransHSGUABeta): Ks = [trunc_exp(i) for i in lnKsVFTransHSGUABeta[:-2]] V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTransHSGUABeta[-2]))) # Translation function - keep it zero to 1 iter_val = lnKsVFTransHSGUABeta[-1] xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps] ys = [Ks[i]*xs[i] for i in cmps] kwargs_l[iter_var] = iter_val kwargs_l['zs'] = xs kwargs_g[iter_var] = iter_val kwargs_g['zs'] = ys g = gas_phase.to(**kwargs_g) l = liquid_phase.to(**kwargs_l) lnphis_g = g.lnphis() lnphis_l = l.lnphis() new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks) val_l = getattr(l, spec_var)() val_g = getattr(g, spec_var)() val = V_over_F*val_g + (1.0 - V_over_F)*val_l other_err = val - spec err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err, other_err] info[1:] = l, g, err, other_err info[0] += 1 # print(lnKsVFTransHSGUABeta, err) return err VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0)) guesses = [log(i) for i in Ks_guess] guesses.append(VF_guess_in_basis) guesses.append(iter_val) # solution, iterations = broyden2(guesses, fun=to_solve, jac=False, xtol=1e-7, # maxiter=maxiter, jac_has_fun=False, skip_J=True) sol = root(to_solve, guesses, tol=tol, method=method) solution = sol.x.tolist() V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-2]))) iter_val = solution[-1] Ks = [exp(solution[i]) for i in cmps] xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps] ys = [Ks[i]*xs[i] for i in cmps] tot_err = 0.0 for v in info[3]: tot_err += abs(v) return V_over_F, solution[-1], xs, ys, info[1], info[2], info[0], tot_err #def broyden2(xs, fun, jac, xtol=1e-7, maxiter=100, jac_has_fun=False, # skip_J=False): def nonlin_n_2P(T, P, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=None, method='hybr'): cmps = range(len(zs)) xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.45 else: V_over_F = V_over_F_guess ns = [ys[i]*V_over_F for i in cmps] info = [0, None, None, None] def to_solve(ns): ys = normalize(ns) ns_l = [zs[i] - ns[i] for i in cmps] # print(sum(ns)+sum(ns_l)) xs = normalize(ns_l) # print(ys, xs) g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) # print(np.array(g.dfugacities_dns()) - np.array(l.dfugacities_dns()) ) fugacities_g = g.fugacities() fugacities_l = l.fugacities() err = [fugacities_g[i] - fugacities_l[i] for i in cmps] info[1:] = l, g, err info[0] += 1 # print(err) return err # print(np.array(jacobian(to_solve, ns, scalar=False))) # print('ignore') sol = root(to_solve, ns, tol=tol, method=method) ns_sln = sol.x.tolist() ys = normalize(ns_sln) xs_sln = [zs[i] - ns_sln[i] for i in cmps] xs = normalize(xs_sln) return xs, ys def nonlin_2P_newton(T, P, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, xtol=1E-10, trivial_solution_tol=1e-5, V_over_F_guess=None): N = len(zs) cmps = range(N) xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess Ks_guess = [ys[i]/xs[i] for i in cmps] info = [] def to_solve(lnKsVF): # Jacobian verified. However, very sketchy - mole fractions may want # to go negative. lnKs = lnKsVF[:-1] Ks = [exp(lnKi) for lnKi in lnKs] VF = float(lnKsVF[-1]) # if VF > 1: # VF = 1-1e-15 # if VF < 0: # VF = 1e-15 xs = [zi/(1.0 + VF*(Ki - 1.0)) for zi, Ki in zip(zs, Ks)] ys = [Ki*xi for Ki, xi in zip(Ks, xs)] g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) lnphis_g = g.lnphis() lnphis_l = l.lnphis() size = N + 1 J = [[None]*size for i in range(size)] d_lnphi_dxs = l.dlnphis_dzs() d_lnphi_dys = g.dlnphis_dzs() J[N][N] = 1.0 # Last column except last value; believed correct # Was not correct when compared to numerical solution Ksm1 = [Ki - 1.0 for Ki in Ks] RR_denoms_inv2 = [] for i in cmps: t = 1.0 + VF*Ksm1[i] RR_denoms_inv2.append(1.0/(t*t)) RR_terms = [zs[k]*Ksm1[k]*RR_denoms_inv2[k] for k in cmps] for i in cmps: value = 0.0 d_lnphi_dxs_i, d_lnphi_dys_i = d_lnphi_dxs[i], d_lnphi_dys[i] for k in cmps: value += RR_terms[k]*(d_lnphi_dxs_i[k] - Ks[k]*d_lnphi_dys_i[k]) J[i][-1] = value # Main body - expensive to compute! Lots of elements zsKsRRinvs2 = [zs[j]*Ks[j]*RR_denoms_inv2[j] for j in cmps] one_m_VF = 1.0 - VF for i in cmps: Ji = J[i] d_lnphi_dxs_is, d_lnphi_dys_is = d_lnphi_dxs[i], d_lnphi_dys[i] for j in cmps: value = 1.0 if i == j else 0.0 value += zsKsRRinvs2[j]*(VF*d_lnphi_dxs_is[j] + one_m_VF*d_lnphi_dys_is[j]) Ji[j] = value # Last row except last value - good, working # Diff of RR w.r.t each log K bottom_row = J[-1] for j in cmps: bottom_row[j] = zsKsRRinvs2[j]*(one_m_VF) + VF*zsKsRRinvs2[j] # Last value - good, working, being overwritten dF_ncp1_dB = 0.0 for i in cmps: dF_ncp1_dB -= RR_terms[i]*Ksm1[i] J[-1][-1] = dF_ncp1_dB err_RR = Rachford_Rice_flash_error(VF, zs, Ks) Fs = [lnKi - lnphi_l + lnphi_g for lnphi_l, lnphi_g, lnKi in zip(lnphis_l, lnphis_g, lnKs)] Fs.append(err_RR) info[:] = VF, xs, ys, l, g, Fs, J return Fs, J guesses = [log(i) for i in Ks_guess] guesses.append(V_over_F) # TODO trust-region sln, iterations = newton_system(to_solve, guesses, jac=True, xtol=xtol, maxiter=maxiter, damping_func=make_damp_initial(steps=3), damping=.5) VF, xs, ys, l, g, Fs, J = info tot_err = 0.0 for Fi in Fs: tot_err += abs(Fi) return VF, xs, ys, l, g, tot_err, J, iterations def gdem(x, x1, x2, x3): cmps = range(len(x)) dx2 = [x[i] - x3[i] for i in cmps] dx1 = [x[i] - x2[i] for i in cmps] dx = [x[i] - x1[i] for i in cmps] b01, b02, b12, b11, b22 = 0.0, 0.0, 0.0, 0.0, 0.0 for i in cmps: b01 += dx[i]*dx1[i] b02 += dx[i]*dx2[i] b12 += dx1[i]*dx2[i] b11 += dx1[i]*dx1[i] b22 += dx2[i]*dx2[i] den_inv = 1.0/(b11*b22 - b12*b12) mu1 = den_inv*(b02*b12 - b01*b22) mu2 = den_inv*(b01*b12 - b02*b11) factor = 1.0/(1.0 + mu1 + mu2) return [factor*(dx[i] - mu2*dx1[i]) for i in cmps] def minimize_gibbs_2P_transformed(T, P, zs, xs_guess, ys_guess, liquid_phase, gas_phase, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, V_over_F_guess=None): if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess flows_v = [yi*V_over_F for yi in ys_guess] cmps = range(len(zs)) calc_phases = [] def G(flows_v): vs = [(0.0 + (zs[i] - 0.0)/(1.0 - flows_v[i])) for i in cmps] ls = [zs[i] - vs[i] for i in cmps] xs = normalize(ls) ys = normalize(vs) VF = flows_v[0]/ys[0] g = gas_phase.to_TP_zs(T=T, P=P, zs=ys) l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs) G_l = l.G() G_g = g.G() calc_phases[:] = G_l, G_g GE_calc = (G_g*VF + (1.0 - VF)*G_l)/(R*T) return GE_calc ans = minimize(G, flows_v) flows_v = ans['x'] vs = [(0.0 + (zs[i] - 0.0) / (1.0 - flows_v[i])) for i in cmps] ls = [zs[i] - vs[i] for i in cmps] xs = normalize(ls) ys = normalize(vs) V_over_F = flows_v[0] / ys[0] return V_over_F, xs, ys, calc_phases[0], calc_phases[1], ans['nfev'], ans['fun'] def minimize_gibbs_NP_transformed(T, P, zs, compositions_guesses, phases, betas, tol=1E-13, method='L-BFGS-B', opt_kwargs=None, translate=False): if opt_kwargs is None: opt_kwargs = {} N = len(zs) cmps = range(N) phase_count = len(phases) phase_iter = range(phase_count) phase_iter_n1 = range(phase_count-1) if method == 'differential_evolution': translate = True # RT_inv = 1.0/(R*T) # Only exist for the first n phases # Do not multiply by zs - we are already multiplying by a composition flows_guess = [compositions_guesses[j][i]*betas[j] for j in range(phase_count - 1) for i in cmps] # Convert the flow guesses to the basis used remaining = zs if translate: flows_guess_basis = [] for j in range(phase_count-1): phase_guess = flows_guess[j*N:j*N+N] flows_guess_basis.extend([-trunc_log((remaining[i]-phase_guess[i])/(phase_guess[i]-0.0)) for i in cmps]) remaining = [remaining[i] - phase_guess[i] for i in cmps] else: flows_guess_basis = flows_guess global min_G, iterations jac, hess = False, False real_min = False min_G = 1e100 iterations = 0 info = [] last = [] def G(flows): global min_G, iterations try: flows = flows.tolist() except: flows = list(flows) iterations += 1 iter_flows = [] iter_comps = [] iter_betas = [] iter_phases = [] remaining = zs if not translate: for i in range(len(flows)): if flows[i] < 1e-10: flows[i] = 1e-10 for j in phase_iter: v = flows[j*N:j*N+N] # Mole flows of phase0/vapor if j == phase_count - 1: vs = remaining else: if translate: vs = [(0.0 + (remaining[i] - 0.0)/(1.0 + trunc_exp(-v[i]))) for i in cmps] else: vs = v vs_sum = sum(abs(i) for i in vs) if vs_sum == 0.0: # Handle the case an optimizer takes all of all compounds already ys = zs else: vs_sum_inv = 1.0/vs_sum ys = [abs(vs[i]*vs_sum_inv) for i in cmps] ys = normalize(ys) iter_flows.append(vs) iter_comps.append(ys) iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1 remaining = [remaining[i] - vs[i] for i in cmps] G = 0.0 jac_array = [] for j in phase_iter: comp = iter_comps[j] phase = phases[j].to_TP_zs(T=T, P=P, zs=comp) lnphis = phase.lnphis() if real_min: # fugacities = phase.fugacities() # fugacities = phase.phis() #G += sum([iter_flows[j][i]*trunc_log(fugacities[i]) for i in cmps]) G += phase.G()*iter_betas[j] else: for i in cmps: G += iter_flows[j][i]*(trunc_log(comp[i]) + lnphis[i]) iter_phases.append(phase) if 0: fugacities_last = iter_phases[-1].fugacities() # G = 0.0 for j in phase_iter_n1: fugacities = iter_phases[j].fugacities() G += sum([abs(fugacities_last[i] - fugacities[i]) for i in cmps]) # lnphis = phase.lnphis() # if real_min: # G += G_base # # if not jac: # for j in phase_iter: # comp = iter_comps[j] # G += phase.G()*iter_betas[j] # if jac: # r = [] # for i in cmps: # v = (log()) # jac_array.append([log()]) jac_arr = [] comp = iter_comps[0] phase = iter_phases[0] lnphis = phase.lnphis() base = [log(xi) + lnphii for xi, lnphii in zip(comp, lnphis)] if jac: for j in range(1, phase_count): comp = iter_comps[j] phase = iter_phases[j] lnphis = phase.lnphis() jac_arr.extend([ref - (log(xi) + lnphii) for ref, xi, lnphii in zip(base, comp, lnphis)]) jac_arr = [] comp_last = iter_comps[-1] phase_last = iter_phases[-1] flows_last = iter_flows[-1] lnphis_last = phase_last.lnphis() dlnphis_dns_last = phase_last.dlnphis_dns() for j in phase_iter_n1: comp = iter_comps[j] phase = iter_phases[j] flows = iter_flows[j] lnphis = phase.lnphis() dlnphis_dns = phase.dlnphis_dns() for i in cmps: v = 0 for k in cmps: v += flows[k][i]*lnphis[k][i] v -= flows_last[i]*dlnphis_dns_last[k][i] v += lnphis[i] + log(comp[i]) if G < min_G: # 'phases', iter_phases print('new min G', G, 'betas', iter_betas, 'comp', iter_comps) info[:] = iter_betas, iter_comps, iter_phases, G min_G = G last[:] = iter_betas, iter_comps, iter_phases, G if hess: base = iter_phases[0].dlnfugacities_dns() p1 = iter_phases[1].dlnfugacities_dns() dlnphis_dns = [i.dlnphis_dns() for i in iter_phases] dlnphis_dns0 = iter_phases[0].dlnphis_dns() dlnphis_dns1 = iter_phases[1].dlnphis_dns() xs, ys = iter_comps[0], iter_comps[1] hess_arr = [] beta = iter_betas[0] hess_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))] for n in range(1, phase_count): for m in range(1, phase_count): for i in cmps: for j in cmps: delta = 1.0 if i == j else 0.0 v = 1.0/iter_betas[n]*(1.0/iter_comps[n][i]*delta - 1.0 + dlnphis_dns[n][i][j]) v += 1.0/iter_betas[0]*(1.0/iter_comps[0][i]*delta - 1.0 + dlnphis_dns[0][i][j]) hess_arr[(n-1)*N+i][(m-1)*N+j] = v # # for n in range(1, phase_count): # for i in cmps: # r = [] # for j in cmps: # v = 0.0 # for m in phase_iter: # delta = 1.0 if i ==j else 0.0 # v += 1.0/iter_betas[m]*(1.0/iter_comps[m][i]*delta # - 1.0 + dlnphis_dns[m][i][j]) # # # How the heck to make this multidimensional? # # v = 1.0/(beta*(1.0 - beta))*(zs[i]*delta/(xs[i]*ys[i]) # # - 1.0 + (1.0 - beta)*dlnphis_dns0[i][j] # # + beta*dlnphis_dns1[i][j]) # # # v = base[i][j] + p1[i][j] # r.append(v) # hess_arr.append(r) # Going to be hard to figure out # for j in range(1, phase_count): # comp = iter_comps[j] # phase = iter_phases[j] # dlnfugacities_dns = phase.dlnfugacities_dns() # row = [base[i] + dlnfugacities_dns[i] for i in cmps] # hess_arr = row # hess_arr.append(row) return G, jac_arr, hess_arr if jac: return G, np.array(jac_arr) return G # ans = None if method == 'differential_evolution': from scipy.optimize import differential_evolution real_min = True translate = True G_base = 1e100 for p in phases: G_calc = p.to(T=T,P=P, zs=zs).G() if G_base > G_calc: G_base = G_calc jac = hess = False # print(G(list(flows_guess_basis))) ans = differential_evolution(G, [(-30.0, 30.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs) # ans = differential_evolution(G, [(-100.0, 100.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs) objf = float(ans['fun']) elif method == 'newton_minimize': import numdifftools as nd jac = True hess = True initial_hess = nd.Hessian(lambda x: G(x)[0], step=1e-4)(flows_guess_basis) ans, iters = newton_minimize(G, flows_guess_basis, jac=True, hess=True, xtol=tol, ytol=None, maxiter=100, damping=1.0, damping_func=damping_maintain_sign) objf = None else: jac = True hess = True import numdifftools as nd def hess_fun(flows): return np.array(G(flows)[2]) # hess_fun = lambda flows_guess_basis: np.array(G(flows_guess_basis)[2]) # nd.Jacobian(G, step=1e-5) # trust-constr special handling to add constraints def fun_and_jac(x): x, j, _ = G(x) return x, np.array(j) ans = minimize(fun_and_jac, flows_guess_basis, jac=True, hess=hess_fun, method=method, tol=tol, **opt_kwargs) objf = float(ans['fun']) # G(ans['x']) # Make sure info has right value # ans['fun'] *= R*T betas, compositions, phases, objf = info#info return betas, compositions, phases, iterations, objf def TP_solve_VF_guesses(zs, method, constants, correlations, T=None, P=None, VF=None, maxiter=50, xtol=1E-7, ytol=None, bounded=False, user_guess=None, last_conv=None): if method == IDEAL_PSAT: return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=T, P=P, VF=VF) elif method == WILSON_GUESS: return flash_wilson(zs, Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas, T=T, P=P, VF=VF) elif method == TB_TC_GUESS: return flash_Tb_Tc_Pc(zs, Tbs=constants.Tbs, Tcs=constants.Tcs, Pcs=constants.Pcs, T=T, P=P, VF=VF) # Simple return values - not going through a model elif method == STP_T_GUESS: return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=298.15, P=101325.0) elif method == LAST_CONVERGED: if last_conv is None: raise ValueError("No last converged") return last_conv else: raise ValueError("Could not converge") def dew_P_newton(P_guess, T, zs, liquid_phase, gas_phase, maxiter=200, xtol=1E-10, xs_guess=None, max_step_damping=1e5, trivial_solution_tol=1e-4): # Trial function only V = None N = len(zs) cmps = range(N) xs = zs if xs_guess is None else xs_guess V_over_F = 1.0 def to_solve(lnKsP): # d(fl_i - fg_i)/d(ln K,i) - # rest is less important # d d(fl_i - fg_i)/d(P) should be easy Ks = [trunc_exp(i) for i in lnKsP[:-1]] P = lnKsP[-1] xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps] ys = [Ks[i]*xs[i] for i in cmps] g = gas_phase.to(ys, T=T, P=P, V=V) l = liquid_phase.to(xs, T=T, P=P, V=V) fugacities_l = l.fugacities() fugacities_g = g.fugacities() VF_err = Rachford_Rice_flash_error(V_over_F, zs, Ks) errs = [fi_l - fi_g for fi_l, fi_g in zip(fugacities_l, fugacities_g)] errs.append(VF_err) return errs lnKs_guess = [log(zs[i]/xs[i]) for i in cmps] lnKs_guess.append(P_guess) def jac(lnKsP): j = jacobian(to_solve, lnKsP, scalar=False) return j lnKsP, iterations = newton_system(to_solve, lnKs_guess, jac=jac, xtol=xtol) xs = [zs[i]/(1.0 + V_over_F*(exp(lnKsP[i]) - 1.0)) for i in cmps] # ys = [exp(lnKsP[i])*xs[i] for i in cmps] return lnKsP[-1], xs, zs, iterations def dew_bubble_newton_zs(guess, fixed_val, zs, liquid_phase, gas_phase, iter_var='T', fixed_var='P', V_over_F=1, # 1 = dew, 0 = bubble maxiter=200, xtol=1E-10, comp_guess=None, max_step_damping=1e5, damping=1.0, trivial_solution_tol=1e-4, debug=False, method='newton', opt_kwargs=None): V = None N = len(zs) cmps = range(N) if comp_guess is None: comp_guess = zs if V_over_F == 1.0: iter_phase, const_phase = liquid_phase, gas_phase elif V_over_F == 0.0: iter_phase, const_phase = gas_phase, liquid_phase else: raise ValueError("Supports only VF of 0 or 1") lnKs = [0.0]*N size = N + 1 errs = [0.0]*size comp_invs = [0.0]*N J = [[0.0]*size for i in range(size)] #J[N][N] = 0.0 as well JN = J[N] for i in cmps: JN[i] = -1.0 s = 'dlnphis_d%s' %(iter_var) dlnphis_diter_var_iter = getattr(iter_phase.__class__, s) dlnphis_diter_var_const = getattr(const_phase.__class__, s) dlnphis_dzs = getattr(iter_phase.__class__, 'dlnphis_dzs') info = [] kwargs = {} kwargs[fixed_var] = fixed_val kwargs['V'] = None def to_solve_comp(iter_vals, jac=True): comp = iter_vals[:-1] iter_val = iter_vals[-1] kwargs[iter_var] = iter_val p_iter = iter_phase.to(comp, **kwargs) p_const = const_phase.to(zs, **kwargs) lnphis_iter = p_iter.lnphis() lnphis_const = p_const.lnphis() for i in cmps: comp_invs[i] = comp_inv = 1.0/comp[i] lnKs[i] = log(zs[i]*comp_inv) errs[i] = lnKs[i] - lnphis_iter[i] + lnphis_const[i] errs[-1] = 1.0 - sum(comp) if jac: dlnphis_dxs = dlnphis_dzs(p_iter) dlnphis_dprop_iter = dlnphis_diter_var_iter(p_iter) dlnphis_dprop_const = dlnphis_diter_var_const(p_const) for i in cmps: Ji = J[i] Ji[-1] = dlnphis_dprop_const[i] - dlnphis_dprop_iter[i] for j in cmps: Ji[j] = -dlnphis_dxs[i][j] Ji[i] -= comp_invs[i] info[:] = [p_iter, p_const, errs, J] return errs, J return errs damping = 1.0 guesses = list(comp_guess) guesses.append(guess) if method == 'newton': comp_val, iterations = newton_system(to_solve_comp, guesses, jac=True, xtol=xtol, damping=damping, solve_func=py_solve, # solve_func=lambda x, y:np.linalg.solve(x, y).tolist(), damping_func=damping_maintain_sign) elif method == 'odeint': # Not even close to working # equations are hard from scipy.integrate import odeint def fun_and_jac(x, t): x, j = to_solve_comp(x.tolist() + [t]) return np.array(x), np.array(j) def fun(x, t): x, j = to_solve_comp(x.tolist() +[t]) return np.array(x) def jac(x, t): x, j = to_solve_comp(x.tolist() + [t]) return np.array(j) ans = odeint(func=fun, y0=np.array(guesses), t=np.linspace(guess, guess*2, 5), Dfun=jac) return ans else: if opt_kwargs is None: opt_kwargs = {} # def fun_and_jac(x): # x, j = to_solve_comp(x.tolist()) # return np.array(x), np.array(j) low = [.0]*N low.append(1.0) # guess at minimum pressure high = [1.0]*N high.append(1e10) # guess at maximum pressure f_j, into, outof = translate_bound_f_jac(to_solve_comp, jac=True, low=low, high=high, as_np=True) ans = root(f_j, np.array(into(guesses)), jac=True, method=method, tol=xtol, **opt_kwargs) comp_val = outof(ans['x']).tolist() iterations = ans['nfev'] iter_val = comp_val[-1] comp = comp_val[:-1] comp_difference = 0.0 for i in cmps: comp_difference += abs(zs[i] - comp[i]) if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") if iter_var == 'P' and iter_val > 1e10: raise ValueError("Converged to unlikely point") sln = [iter_val, comp] sln.append(info[0]) sln.append(info[1]) sln.append(iterations) tot_err = 0.0 for err_i in info[2]: tot_err += abs(err_i) sln.append(tot_err) if debug: return sln, to_solve_comp return sln l_undefined_T_msg = "Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)" g_undefined_T_msg = "Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)" l_undefined_P_msg = "Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)" g_undefined_P_msg = "Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)" def dew_bubble_Michelsen_Mollerup(guess, fixed_val, zs, liquid_phase, gas_phase, iter_var='T', fixed_var='P', V_over_F=1, maxiter=200, xtol=1E-10, comp_guess=None, max_step_damping=.25, guess_update_frequency=1, trivial_solution_tol=1e-7, V_diff=.00002, damping=1.0): # for near critical, V diff very wrong - .005 seen, both g as or both liquid kwargs = {fixed_var: fixed_val} N = len(zs) cmps = range(N) comp_guess = zs if comp_guess is None else comp_guess damping_orig = damping if V_over_F == 1.0: iter_phase, const_phase, bubble = liquid_phase, gas_phase, False elif V_over_F == 0.0: iter_phase, const_phase, bubble = gas_phase, liquid_phase, True else: raise ValueError("Supports only VF of 0 or 1") if iter_var == 'T': if V_over_F == 1.0: iter_msg, const_msg = l_undefined_T_msg, g_undefined_T_msg else: iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg elif iter_var == 'P': if V_over_F == 1.0: iter_msg, const_msg = l_undefined_P_msg, g_undefined_P_msg else: iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg s = 'dlnphis_d%s' %(iter_var) dlnphis_diter_var_iter = getattr(iter_phase.__class__, s) dlnphis_diter_var_const = getattr(const_phase.__class__, s) skip = 0 guess_old = None V_ratio, V_ratio_last = None, None V_iter_last, V_const_last = None, None expect_phase = 'g' if V_over_F == 0.0 else 'l' unwanted_phase = 'l' if expect_phase == 'g' else 'g' successive_fails = 0 for iteration in range(maxiter): kwargs[iter_var] = guess try: const_phase = const_phase.to_TP_zs(zs=zs, **kwargs) lnphis_const = const_phase.lnphis() dlnphis_dvar_const = dlnphis_diter_var_const(const_phase) except Exception as e: if guess_old is None: raise ValueError(const_msg %(guess, zs), e) successive_fails += 1 guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step) continue try: skip -= 1 iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs) if V_diff is not None: V_iter, V_const = iter_phase.V(), const_phase.V() V_ratio = V_iter/V_const if 1.0 - V_diff < V_ratio < 1.0 + V_diff or skip > 0 or V_iter_last and (abs(min(V_iter, V_iter_last)/max(V_iter, V_iter_last)) < .8): # Relax the constraint for the iterating on variable so two different phases exist #if iter_phase.eos_mix.phase in ('l', 'g') and iter_phase.eos_mix.phase == const_phase.eos_mix.phase: # Alternatively, try a stability test here if iter_phase.eos_mix.phase == unwanted_phase: if skip < 0: skip = 4 damping = .15 if iter_var == 'P': split = min(iter_phase.eos_mix.P_discriminant_zeros()) # P_discriminant_zero_l if bubble: split *= 0.999999999 else: split *= 1.000000001 elif iter_var == 'T': split = iter_phase.eos_mix.T_discriminant_zero_l() if bubble: split *= 0.999999999 else: split *= 1.000000001 kwargs[iter_var] = guess = split iter_phase = iter_phase.to(zs=comp_guess, **kwargs) const_phase = const_phase.to(zs=zs, **kwargs) lnphis_const = const_phase.lnphis() dlnphis_dvar_const = dlnphis_diter_var_const(const_phase) print('adj iter phase', split) elif const_phase.eos_mix.phase == expect_phase: if skip < 0: skip = 4 damping = .15 if iter_var == 'P': split = min(const_phase.eos_mix.P_discriminant_zeros()) if bubble: split *= 0.999999999 else: split *= 1.000000001 elif iter_var == 'T': split = const_phase.eos_mix.T_discriminant_zero_l() if bubble: split *= 0.999999999 else: split *= 1.000000001 kwargs[iter_var] = guess = split const_phase = const_phase.to(zs=zs, **kwargs) lnphis_const = const_phase.lnphis() dlnphis_dvar_const = dlnphis_diter_var_const(const_phase) iter_phase = iter_phase.to(zs=comp_guess, **kwargs) # Also need to adjust the other phase to keep it in sync print('adj const phase', split) lnphis_iter = iter_phase.lnphis() dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase) except Exception as e: if guess_old is None: raise ValueError(iter_msg %(guess, zs), e) successive_fails += 1 guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step) continue if successive_fails > 2: raise ValueError("Stopped convergence procedure after multiple bad steps") successive_fails = 0 Ks = [exp(a - b) for a, b in zip(lnphis_const, lnphis_iter)] comp_guess = [zs[i]*Ks[i] for i in cmps] y_sum = sum(comp_guess) comp_guess = [y/y_sum for y in comp_guess] if iteration % guess_update_frequency: # or skip > 0 continue elif skip == 0: damping = damping_orig f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0 dfk_dvar = 0.0 for i in cmps: dfk_dvar += zs[i]*Ks[i]*(dlnphis_dvar_const[i] - dlnphis_dvar_iter[i]) guess_old = guess step = -f_k/dfk_dvar # if near_critical: adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step) if guess + adj_step <= 0.0: adj_step *= 0.5 guess = guess + adj_step # else: # guess = guess + step comp_difference = 0.0 for i in cmps: comp_difference += abs(zs[i] - comp_guess[i]) if comp_difference < trivial_solution_tol and iteration: for zi in zs: if zi == 1.0: # Turn off trivial check for pure components trivial_solution_tol = -1.0 if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") if abs(guess - guess_old) < xtol: #and not skip: guess = guess_old break if V_diff is not None: V_iter_last, V_const_last, V_ratio_last = V_iter, V_const, V_ratio if abs(guess - guess_old) > xtol: raise ValueError("Did not converge to specified tolerance") return guess, comp_guess, iter_phase, const_phase, iteration, abs(guess - guess_old) l_undefined_T_msg = "Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)" g_undefined_T_msg = "Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)" l_undefined_P_msg = "Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)" g_undefined_P_msg = "Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)" def existence_3P_Michelsen_Mollerup(guess, fixed_val, zs, iter_phase, liquid0, liquid1, iter_var='T', fixed_var='P', maxiter=200, xtol=1E-10, comp_guess=None, liquid0_comp=None, liquid1_comp=None, max_step_damping=.25, SS_tol=1e-10, trivial_solution_tol=1e-7, damping=1.0, beta=0.5): # For convenience call the two phases that exist already liquid0, liquid1 # But one of them can be a gas, solid, etc. kwargs = {fixed_var: fixed_val} N = len(zs) cmps = range(N) comp_guess = zs if comp_guess is None else comp_guess damping_orig = damping if iter_var == 'T': iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg elif iter_var == 'P': iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg s = 'dlnphis_d%s' %(iter_var) dlnphis_diter_var_iter = getattr(iter_phase.__class__, s) dlnphis_diter_var_liquid0 = getattr(liquid0.__class__, s) # dlnphis_diter_var_liquid1 = getattr(liquid1.__class__, s) skip = 0 guess_old = None successive_fails = 0 for iteration in range(maxiter): kwargs[iter_var] = guess try: liquid0 = liquid0.to_TP_zs(zs=liquid0_comp, **kwargs) lnphis_liquid0 = liquid0.lnphis() dlnphis_dvar_liquid0 = dlnphis_diter_var_liquid0(liquid0) except Exception as e: if guess_old is None: raise ValueError(const_msg %(guess, liquid0_comp), e) successive_fails += 1 guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step) continue try: liquid1 = liquid1.to_TP_zs(zs=liquid1_comp, **kwargs) lnphis_liquid1 = liquid1.lnphis() # dlnphis_dvar_liquid1 = dlnphis_diter_var_liquid1(liquid1) except Exception as e: if guess_old is None: raise ValueError(const_msg %(guess, liquid0_comp), e) successive_fails += 1 guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step) continue try: iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs) lnphis_iter = iter_phase.lnphis() dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase) except Exception as e: if guess_old is None: raise ValueError(iter_msg %(guess, zs), e) successive_fails += 1 guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step) continue if successive_fails > 2: raise ValueError("Stopped convergence procedure after multiple bad steps") successive_fails = 0 Ks = [exp(a - b) for a, b in zip(lnphis_liquid0, lnphis_iter)] comp_guess = [liquid0_comp[i]*Ks[i] for i in cmps] y_sum_inv = 1.0/sum(comp_guess) comp_guess = [y*y_sum_inv for y in comp_guess] f_k = sum([liquid0_comp[i]*Ks[i] for i in cmps]) - 1.0 dfk_dvar = 0.0 for i in cmps: dfk_dvar += liquid0_comp[i]*Ks[i]*(dlnphis_dvar_liquid0[i] - dlnphis_dvar_iter[i]) guess_old = guess step = -f_k/dfk_dvar adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step) if guess + adj_step <= 0.0: adj_step *= 0.5 guess = guess + adj_step comp_difference = 0.0 for i in cmps: comp_difference += abs(liquid0_comp[i] - comp_guess[i]) if comp_difference < trivial_solution_tol and iteration: if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") # Do the SS part for the two phases try: Ks_SS = [exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps] except OverflowError: Ks_SS = [trunc_exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps] beta, liquid0_comp_new, liquid1_comp_new = flash_inner_loop(zs, Ks_SS, guess=beta) for xi in liquid0_comp_new: if xi < 0.0: xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid0_comp_new) for i in cmps: liquid0_comp_new[i] = abs(liquid0_comp_new[i])*xs_new_sum_inv break for xi in liquid1_comp_new: if xi < 0.0: xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid1_comp_new) for i in cmps: liquid1_comp_new[i] = abs(liquid1_comp_new[i])*xs_new_sum_inv break err_SS = 0.0 try: for Ki, xi, yi in zip(Ks_SS, liquid0_comp, liquid1_comp): err_i = Ki*xi/yi - 1.0 err_SS += err_i*err_i except ZeroDivisionError: err_SS = 0.0 for Ki, xi, yi in zip(Ks, xs, ys): try: err_i = Ki*xi/yi - 1.0 err_SS += err_i*err_i except ZeroDivisionError: pass liquid0_comp, liquid1_comp = liquid0_comp_new, liquid1_comp_new if abs(guess - guess_old) < xtol and err_SS < SS_tol: err_VF = abs(guess - guess_old) guess = guess_old break if abs(guess - guess_old) > xtol: raise ValueError("Did not converge to specified tolerance") return guess, [iter_phase, liquid0, liquid1], [0.0, 1.0-beta, beta], err_VF, err_SS, iteration def bubble_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase, maxiter=200, xtol=1E-10, ys_guess=None, max_step_damping=5.0, T_update_frequency=1, trivial_solution_tol=1e-4): N = len(zs) cmps = range(N) ys = zs if ys_guess is None else ys_guess T_guess_old = None successive_fails = 0 for iteration in range(maxiter): try: g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=ys) lnphis_g = g.lnphis() dlnphis_dT_g = g.dlnphis_dT() except Exception as e: if T_guess_old is None: raise ValueError(g_undefined_T_msg %(T_guess, ys), e) successive_fails += 1 T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step) continue try: l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=zs) lnphis_l = l.lnphis() dlnphis_dT_l = l.dlnphis_dT() except Exception as e: if T_guess_old is None: raise ValueError(l_undefined_T_msg %(T_guess, zs), e) successive_fails += 1 T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step) continue if successive_fails > 2: raise ValueError("Stopped convergence procedure after multiple bad steps") successive_fails = 0 Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)] ys = [zs[i]*Ks[i] for i in cmps] if iteration % T_update_frequency: continue f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0 dfk_dT = 0.0 for i in cmps: dfk_dT += zs[i]*Ks[i]*(dlnphis_dT_l[i] - dlnphis_dT_g[i]) T_guess_old = T_guess step = -f_k/dfk_dT # if near_critical: T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step) # else: # T_guess = T_guess + step comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)]) if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") y_sum = sum(ys) ys = [y/y_sum for y in ys] if abs(T_guess - T_guess_old) < xtol: T_guess = T_guess_old break if abs(T_guess - T_guess_old) > xtol: raise ValueError("Did not converge to specified tolerance") return T_guess, ys, l, g, iteration, abs(T_guess - T_guess_old) def dew_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase, maxiter=200, xtol=1E-10, xs_guess=None, max_step_damping=5.0, T_update_frequency=1, trivial_solution_tol=1e-4): N = len(zs) cmps = range(N) xs = zs if xs_guess is None else xs_guess T_guess_old = None successive_fails = 0 for iteration in range(maxiter): try: g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=zs) lnphis_g = g.lnphis() dlnphis_dT_g = g.dlnphis_dT() except Exception as e: if T_guess_old is None: raise ValueError(g_undefined_T_msg %(T_guess, zs), e) successive_fails += 1 T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step) continue try: l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=xs) lnphis_l = l.lnphis() dlnphis_dT_l = l.dlnphis_dT() except Exception as e: if T_guess_old is None: raise ValueError(l_undefined_T_msg %(T_guess, xs), e) successive_fails += 1 T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step) continue if successive_fails > 2: raise ValueError("Stopped convergence procedure after multiple bad steps") successive_fails = 0 Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)] xs = [zs[i]/Ks[i] for i in cmps] if iteration % T_update_frequency: continue f_k = sum(xs) - 1.0 dfk_dT = 0.0 for i in cmps: dfk_dT += xs[i]*(dlnphis_dT_g[i] - dlnphis_dT_l[i]) T_guess_old = T_guess step = -f_k/dfk_dT # if near_critical: T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step) # else: # T_guess = T_guess + step comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)]) if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") y_sum = sum(xs) xs = [y/y_sum for y in xs] if abs(T_guess - T_guess_old) < xtol: T_guess = T_guess_old break if abs(T_guess - T_guess_old) > xtol: raise ValueError("Did not converge to specified tolerance") return T_guess, xs, l, g, iteration, abs(T_guess - T_guess_old) def bubble_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase, maxiter=200, xtol=1E-10, ys_guess=None, max_step_damping=1e5, P_update_frequency=1, trivial_solution_tol=1e-4): N = len(zs) cmps = range(N) ys = zs if ys_guess is None else ys_guess P_guess_old = None successive_fails = 0 for iteration in range(maxiter): try: g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=ys) lnphis_g = g.lnphis() dlnphis_dP_g = g.dlnphis_dP() except Exception as e: if P_guess_old is None: raise ValueError(g_undefined_P_msg %(P_guess, ys), e) successive_fails += 1 P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step) continue try: l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=zs) lnphis_l = l.lnphis() dlnphis_dP_l = l.dlnphis_dP() except Exception as e: if P_guess_old is None: raise ValueError(l_undefined_P_msg %(P_guess, zs), e) successive_fails += 1 T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step) continue if successive_fails > 2: raise ValueError("Stopped convergence procedure after multiple bad steps") successive_fails = 0 Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)] ys = [zs[i]*Ks[i] for i in cmps] if iteration % P_update_frequency: continue f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0 dfk_dP = 0.0 for i in cmps: dfk_dP += zs[i]*Ks[i]*(dlnphis_dP_l[i] - dlnphis_dP_g[i]) P_guess_old = P_guess step = -f_k/dfk_dP P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step) comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)]) if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") y_sum = sum(ys) ys = [y/y_sum for y in ys] if abs(P_guess - P_guess_old) < xtol: P_guess = P_guess_old break if abs(P_guess - P_guess_old) > xtol: raise ValueError("Did not converge to specified tolerance") return P_guess, ys, l, g, iteration, abs(P_guess - P_guess_old) def dew_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase, maxiter=200, xtol=1E-10, xs_guess=None, max_step_damping=1e5, P_update_frequency=1, trivial_solution_tol=1e-4): N = len(zs) cmps = range(N) xs = zs if xs_guess is None else xs_guess P_guess_old = None successive_fails = 0 for iteration in range(maxiter): try: g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=zs) lnphis_g = g.lnphis() dlnphis_dP_g = g.dlnphis_dP() except Exception as e: if P_guess_old is None: raise ValueError(g_undefined_P_msg %(P_guess, zs), e) successive_fails += 1 P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step) continue try: l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=xs) lnphis_l = l.lnphis() dlnphis_dP_l = l.dlnphis_dP() except Exception as e: if P_guess_old is None: raise ValueError(l_undefined_P_msg %(P_guess, xs), e) successive_fails += 1 T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step) continue if successive_fails > 2: raise ValueError("Stopped convergence procedure after multiple bad steps") successive_fails = 0 Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)] xs = [zs[i]/Ks[i] for i in cmps] if iteration % P_update_frequency: continue f_k = sum(xs) - 1.0 dfk_dP = 0.0 for i in cmps: dfk_dP += xs[i]*(dlnphis_dP_g[i] - dlnphis_dP_l[i]) P_guess_old = P_guess step = -f_k/dfk_dP P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step) comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)]) if comp_difference < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") x_sum_inv = 1.0/sum(xs) xs = [x*x_sum_inv for x in xs] if abs(P_guess - P_guess_old) < xtol: P_guess = P_guess_old break if abs(P_guess - P_guess_old) > xtol: raise ValueError("Did not converge to specified tolerance") return P_guess, xs, l, g, iteration, abs(P_guess - P_guess_old) # spec, iter_var, fixed_var strs_to_ders = {('H', 'T', 'P'): 'dH_dT_P', ('S', 'T', 'P'): 'dS_dT_P', ('G', 'T', 'P'): 'dG_dT_P', ('U', 'T', 'P'): 'dU_dT_P', ('A', 'T', 'P'): 'dA_dT_P', ('H', 'T', 'V'): 'dH_dT_V', ('S', 'T', 'V'): 'dS_dT_V', ('G', 'T', 'V'): 'dG_dT_V', ('U', 'T', 'V'): 'dU_dT_V', ('A', 'T', 'V'): 'dA_dT_V', ('H', 'P', 'T'): 'dH_dP_T', ('S', 'P', 'T'): 'dS_dP_T', ('G', 'P', 'T'): 'dG_dP_T', ('U', 'P', 'T'): 'dU_dP_T', ('A', 'P', 'T'): 'dA_dP_T', ('H', 'P', 'V'): 'dH_dP_V', ('S', 'P', 'V'): 'dS_dP_V', ('G', 'P', 'V'): 'dG_dP_V', ('U', 'P', 'V'): 'dU_dP_V', ('A', 'P', 'V'): 'dA_dP_V', ('H', 'V', 'T'): 'dH_dV_T', ('S', 'V', 'T'): 'dS_dV_T', ('G', 'V', 'T'): 'dG_dV_T', ('U', 'V', 'T'): 'dU_dV_T', ('A', 'V', 'T'): 'dA_dV_T', ('H', 'V', 'P'): 'dH_dV_P', ('S', 'V', 'P'): 'dS_dV_P', ('G', 'V', 'P'): 'dG_dV_P', ('U', 'V', 'P'): 'dU_dV_P', ('A', 'V', 'P'): 'dA_dV_P', } multiple_solution_sets = set([('T', 'S'), ('T', 'H'), ('T', 'U'), ('T', 'A'), ('T', 'G'), ('S', 'T'), ('H', 'T'), ('U', 'T'), ('A', 'T'), ('G', 'T'), ]) def TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val, spec_val, iter_var='T', fixed_var='P', spec='H', maxiter=200, xtol=1E-10, ytol=None, fprime=False, minimum_progress=0.3, oscillation_detection=True, bounded=False, min_bound=None, max_bound=None, multi_solution=False): r'''Solve a single-phase flash where one of `T`, `P`, or `V` are specified and one of `H`, `S`, `G`, `U`, or `A` are also specified. The iteration (changed input variable) variable must be specified as be one of `T`, `P`, or `V`, but it cannot be the same as the fixed variable. This method is a secant or newton based solution method, optionally with oscillation detection to bail out of tring to solve the problem to handle the case where the spec cannot be met because of a phase change (as in a cubic eos case). Parameters ---------- zs : list[float] Mole fractions of the phase, [-] phase : `Phase` The phase object of the mixture, containing the information for calculating properties at new conditions, [-] guess : float The guessed value for the iteration variable, [K or Pa or m^3/mol] fixed_var_val : float The specified value of the fixed variable (one of T, P, or V); [K or Pa, or m^3/mol] spec_val : float The specified value of H, S, G, U, or A, [J/(mol*K) or J/mol] iter_var : str One of 'T', 'P', 'V', [-] fixed_var : str One of 'T', 'P', 'V', [-] spec : str One of 'H', 'S', 'G', 'U', 'A', [-] maxiter : float Maximum number of iterations, [-] xtol : float Tolerance for secant-style convergence of the iteration variable, [K or Pa, or m^3/mol] ytol : float or None Tolerance for convergence of the spec variable, [J/(mol*K) or J/mol] Returns ------- iter_var_val, phase, iterations, err Notes ----- ''' # Needs lots of work but the idea is here # Can iterate chancing any of T, P, V with a fixed other T, P, V to meet any # H S G U A spec. store = [] global iterations iterations = 0 if fixed_var == iter_var: raise ValueError("Fixed variable cannot be the same as iteration variable") if fixed_var not in ('T', 'P', 'V'): raise ValueError("Fixed variable must be one of `T`, `P`, `V`") if iter_var not in ('T', 'P', 'V'): raise ValueError("Iteration variable must be one of `T`, `P`, `V`") # Little point in enforcing the spec - might want to repurpose the function later if spec not in ('H', 'S', 'G', 'U', 'A'): raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`") multiple_solutions = (fixed_var, spec) in multiple_solution_sets phase_kwargs = {fixed_var: fixed_var_val, 'zs': zs} spec_fun = getattr(phase.__class__, spec) # print('spec_fun', spec_fun) if fprime: try: # Gotta be a lookup by (spec, iter_var, fixed_var) der_attr = strs_to_ders[(spec, iter_var, fixed_var)] except KeyError: der_attr = 'd' + spec + '_d' + iter_var der_attr_fun = getattr(phase.__class__, der_attr) # print('der_attr_fun', der_attr_fun) def to_solve(guess, solved_phase=None): global iterations iterations += 1 if solved_phase is not None: p = solved_phase else: phase_kwargs[iter_var] = guess p = phase.to(**phase_kwargs) err = spec_fun(p) - spec_val # err = (spec_fun(p) - spec_val)/spec_val store[:] = (p, err) if fprime: # print([err, guess, p.eos_mix.phase, der_attr]) derr = der_attr_fun(p) # derr = der_attr_fun(p)/spec_val return err, derr # print(err) return err arg_fprime = fprime high = None # Optional and not often used bound for newton if fixed_var == 'V': if iter_var == 'T': max_phys = phase.T_max_at_V(fixed_var_val) elif iter_var == 'P': max_phys = phase.P_max_at_V(fixed_var_val) if max_phys is not None: if max_bound is None: max_bound = high = max_phys else: max_bound = high = min(max_phys, max_bound) # TV iterations ignore_bound_fail = (fixed_var == 'T' and iter_var == 'P') if fixed_var in ('T',) and ((fixed_var == 'T' and iter_var == 'P') or (fixed_var == 'P' and iter_var == 'T') or (fixed_var == 'T' and iter_var == 'V') ) and 1: try: fprime = False if iter_var == 'V': dummy_iter = 1e8 else: dummy_iter = guess phase_kwargs[iter_var] = dummy_iter # Dummy pressure does not matter phase_temp = phase.to(**phase_kwargs) lower_phase, higher_phase = None, None delta = 1e-9 if fixed_var == 'T' and iter_var == 'P': transitions = phase_temp.P_transitions() # assert len(transitions) == 1 under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta) elif fixed_var == 'P' and iter_var == 'T': transitions = phase_temp.T_transitions() under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta) assert len(transitions) == 1 elif fixed_var == 'T' and iter_var == 'V': transitions = phase_temp.P_transitions() delta = 1e-11 # not_separated = True # while not_separated: P_higher = transitions[0]*(1.0 + delta) # Dummy pressure does not matter lower_phase = phase.to(T=fixed_var_val, zs=zs, P=P_higher) P_lower = transitions[0]*(1.0 - delta) # Dummy pressure does not matter higher_phase = phase.to(T=fixed_var_val, zs=zs, P=P_lower) under_trans, above_trans = lower_phase.V(), higher_phase.V() not_separated = isclose(under_trans, above_trans, rel_tol=1e-3) # delta *= 10 # TODO is it possible to evaluate each limit at once, so half the work is avoided? bracketed_high, bracketed_low = False, False if min_bound is not None: f_min = to_solve(min_bound) f_low_trans = to_solve(under_trans, lower_phase) if f_min*f_low_trans <= 0.0: bracketed_low = True bounding_pair = (min(min_bound, under_trans), max(min_bound, under_trans)) if max_bound is not None and (not bracketed_low or multiple_solutions): f_max = to_solve(max_bound) f_max_trans = to_solve(above_trans, higher_phase) if f_max*f_max_trans <= 0.0: bracketed_high = True bounding_pair = (min(max_bound, above_trans), max(max_bound, above_trans)) if max_bound is not None and max_bound is not None and not bracketed_low and not bracketed_high: if not ignore_bound_fail: raise NotBoundedError("Between phases") if bracketed_high or bracketed_low: oscillation_detection = False high = bounding_pair[1] # restrict newton/secant just in case min_bound, max_bound = bounding_pair if not (min_bound < guess < max_bound): guess = 0.5*(min_bound + max_bound) else: if min_bound is not None and transitions[0] < min_bound and not ignore_bound_fail: raise NotBoundedError("Not likely to bound") if max_bound is not None and transitions[0] > max_bound and not ignore_bound_fail: raise NotBoundedError("Not likely to bound") except NotBoundedError as e: raise e except Exception: pass fprime = arg_fprime # Plot the objective function # tests = logspace(log10(10.6999), log10(10.70005), 15000) # tests = logspace(log10(10.6), log10(10.8), 15000) # tests = logspace(log10(min_bound), log10(max_bound), 1500) # values = [to_solve(t)[0] for t in tests] # values = [abs(t) for t in values] # import matplotlib.pyplot as plt # plt.loglog(tests, values) # plt.show() if oscillation_detection and ytol is not None: to_solve2, checker = oscillation_checking_wrapper(to_solve, full=True, minimum_progress=minimum_progress, good_err=ytol*1e6) else: to_solve2 = to_solve checker = None solve_bounded = False try: # All three variables P, T, V are positive but can grow unbounded, so # for the secant method, only set the one variable if fprime: iter_var_val = newton(to_solve2, guess, xtol=xtol, ytol=ytol, fprime=True, maxiter=maxiter, bisection=True, low=min_bound, high=high, gap_detection=False) else: iter_var_val = secant(to_solve2, guess, xtol=xtol, ytol=ytol, maxiter=maxiter, bisection=True, low=min_bound, high=high) except (UnconvergedError, OscillationError, NotBoundedError): solve_bounded = True # Unconverged - from newton/secant; oscillation - from the oscillation detector; # NotBounded - from when EOS needs to solve T and there is no solution fprime = False if solve_bounded: if bounded and min_bound is not None and max_bound is not None: if checker: min_bound_prev, max_bound_prev, fa, fb = best_bounding_bounds(min_bound, max_bound, f=to_solve, xs_pos=checker.xs_pos, ys_pos=checker.ys_pos, xs_neg=checker.xs_neg, ys_neg=checker.ys_neg) if abs(min_bound_prev/max_bound_prev - 1.0) > 2.5e-4: # If the points are too close, odds are there is a discontinuity in the newton solution min_bound, max_bound = min_bound_prev, max_bound_prev # maxiter = 20 else: fa, fb = None, None else: fa, fb = None, None # try: iter_var_val = brenth(to_solve, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter, fa=fa, fb=fb) # except: # # Not sure at all if good idea # iter_var_val = secant(to_solve, guess, xtol=xtol, ytol=ytol, # maxiter=maxiter, bisection=True, low=min_bound) phase, err = store return iter_var_val, phase, iterations, err def solve_PTV_HSGUA_1P(phase, zs, fixed_var_val, spec_val, fixed_var, spec, iter_var, constants, correlations, last_conv=None, oscillation_detection=True, guess_maxiter=50, guess_xtol=1e-7, maxiter=80, xtol=1e-10): # TODO: replace oscillation detection with bounding parameters and translation # The cost should be less. if iter_var == 'T': if isinstance(phase, CoolPropPhase): min_bound = phase.AS.Tmin() max_bound = phase.AS.Tmax() else: min_bound = phase.T_MIN_FIXED max_bound = phase.T_MAX_FIXED # if isinstance(phase, IAPWS95): # min_bound = 235.0 # max_bound = 5000.0 elif iter_var == 'P': min_bound = Phase.P_MIN_FIXED*(1.0 - 1e-12) max_bound = Phase.P_MAX_FIXED*(1.0 + 1e-12) if isinstance(phase, CoolPropPhase): AS = phase.AS max_bound = AS.pmax()*(1.0 - 1e-7) min_bound = AS.trivial_keyed_output(CPiP_min)*(1.0 + 1e-7) elif iter_var == 'V': min_bound = Phase.V_MIN_FIXED max_bound = Phase.V_MAX_FIXED if isinstance(phase, (CEOSLiquid, CEOSGas)): c2R = phase.eos_class.c2*R Tcs, Pcs = constants.Tcs, constants.Pcs b = sum([c2R*Tcs[i]*zs[i]/Pcs[i] for i in range(constants.N)]) min_bound = b*(1.0 + 1e-15) if phase.is_gas: methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IG_ENTHALPY, LASTOVKA_SHAW] elif phase.is_liquid: methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IDEAL_LIQUID_ENTHALPY, DADGOSTAR_SHAW_1] else: methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS] for method in methods: try: guess = TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations, fixed_var_val, spec_val, iter_var=iter_var, fixed_var=fixed_var, spec=spec, maxiter=guess_maxiter, xtol=guess_xtol, ytol=abs(spec_val)*1e-5, bounded=True, min_bound=min_bound, max_bound=max_bound, user_guess=None, last_conv=last_conv, T_ref=298.15, P_ref=101325.0) break except Exception: pass ytol = 1e-8*abs(spec_val) if iter_var == 'T' and spec in ('S', 'H'): ytol = ytol/100 if isinstance(phase, IAPWS95): # Objective function isn't quite as nice and smooth as desired ytol = None _, phase, iterations, err = TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val=fixed_var_val, spec_val=spec_val, ytol=ytol, iter_var=iter_var, fixed_var=fixed_var, spec=spec, oscillation_detection=oscillation_detection, minimum_progress=1e-4, maxiter=maxiter, fprime=True, xtol=xtol, bounded=True, min_bound=min_bound, max_bound=max_bound) T, P = phase.T, phase.P return T, P, phase, iterations, err def TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations, fixed_var_val, spec_val, iter_var='T', fixed_var='P', spec='H', maxiter=20, xtol=1E-7, ytol=None, bounded=False, min_bound=None, max_bound=None, user_guess=None, last_conv=None, T_ref=298.15, P_ref=101325.0): if fixed_var == iter_var: raise ValueError("Fixed variable cannot be the same as iteration variable") if fixed_var not in ('T', 'P', 'V'): raise ValueError("Fixed variable must be one of `T`, `P`, `V`") if iter_var not in ('T', 'P', 'V'): raise ValueError("Iteration variable must be one of `T`, `P`, `V`") if spec not in ('H', 'S', 'G', 'U', 'A'): raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`") cmps = range(len(zs)) iter_T = iter_var == 'T' iter_P = iter_var == 'P' iter_V = iter_var == 'V' fixed_P = fixed_var == 'P' fixed_T = fixed_var == 'T' fixed_V = fixed_var == 'V' always_S = spec in ('S', 'G', 'A') always_H = spec in ('H', 'G', 'U', 'A') always_V = spec in ('U', 'A') if always_S: P_ref_inv = 1.0/P_ref dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition def err(guess): # Translate the fixed variable to a local variable if fixed_P: P = fixed_var_val elif fixed_T: T = fixed_var_val elif fixed_V: V = fixed_var_val T = None # Translate the iteration variable to a local variable if iter_P: P = guess if not fixed_V: V = None elif iter_T: T = guess if not fixed_V: V = None elif iter_V: V = guess T = None if T is None: T = T_from_V(V, P) # Compute S, H, V as necessary if always_S: S = S_model(T, P) - dS_ideal - R*log(P*P_ref_inv) if always_H: H = H_model(T, P) if always_V and V is None: V = V_model(T, P) # print(H, S, V, 'hi') # Return the objective function if spec == 'H': err = H - spec_val elif spec == 'S': err = S - spec_val elif spec == 'G': err = (H - T*S) - spec_val elif spec == 'U': err = (H - P*V) - spec_val elif spec == 'A': err = (H - P*V - T*S) - spec_val # print(T, P, V, 'TPV', err) return err # Precompute some things depending on the method if method in (LASTOVKA_SHAW, DADGOSTAR_SHAW_1): MW = mixing_simple(zs, constants.MWs) n_atoms = [sum(i.values()) for i in constants.atomss] sv = mixing_simple(zs, n_atoms)/MW if method == IG_ENTHALPY: HeatCapacityGases = correlations.HeatCapacityGases def H_model(T, P=None): H_calc = 0. for i in cmps: H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) return H_calc def S_model(T, P=None): S_calc = 0. for i in cmps: S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) return S_calc def V_model(T, P): return R*T/P def T_from_V(V, P): return P*V/R elif method == LASTOVKA_SHAW: H_ref = Lastovka_Shaw_integral(T_ref, sv) S_ref = Lastovka_Shaw_integral_over_T(T_ref, sv) def H_model(T, P=None): H1 = Lastovka_Shaw_integral(T, sv) dH = H1 - H_ref return property_mass_to_molar(dH, MW) def S_model(T, P=None): S1 = Lastovka_Shaw_integral_over_T(T, sv) dS = S1 - S_ref return property_mass_to_molar(dS, MW) def V_model(T, P): return R*T/P def T_from_V(V, P): return P*V/R elif method == DADGOSTAR_SHAW_1: Tc = mixing_simple(zs, constants.Tcs) omega = mixing_simple(zs, constants.omegas) H_ref = Dadgostar_Shaw_integral(T_ref, sv) S_ref = Dadgostar_Shaw_integral_over_T(T_ref, sv) def H_model(T, P=None): H1 = Dadgostar_Shaw_integral(T, sv) Hvap = SMK(T, Tc, omega) return (property_mass_to_molar(H1 - H_ref, MW) - Hvap) def S_model(T, P=None): S1 = Dadgostar_Shaw_integral_over_T(T, sv) dSvap = SMK(T, Tc, omega)/T return (property_mass_to_molar(S1 - S_ref, MW) - dSvap) Vc = mixing_simple(zs, constants.Vcs) def V_model(T, P=None): return COSTALD(T, Tc, Vc, omega) def T_from_V(V, P): secant(lambda T: COSTALD(T, Tc, Vc, omega), .65*Tc) elif method == IDEAL_LIQUID_ENTHALPY: HeatCapacityGases = correlations.HeatCapacityGases EnthalpyVaporizations = correlations.EnthalpyVaporizations def H_model(T, P=None): H_calc = 0. for i in cmps: H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T)) return H_calc def S_model(T, P=None): S_calc = 0. T_inv = 1.0/T for i in cmps: S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T)) return S_calc VolumeLiquids = correlations.VolumeLiquids def V_model(T, P=None): V_calc = 0. for i in cmps: V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T) return V_calc def T_from_V(V, P): T_calc = 0. for i in cmps: T_calc += zs[i]*VolumeLiquids[i].solve_property(V) return T_calc # Simple return values - not going through a model if method == STP_T_GUESS: if iter_T: return 298.15 elif iter_P: return 101325.0 elif iter_V: return 0.024465403697038125 elif method == LAST_CONVERGED: if last_conv is None: raise ValueError("No last converged") return last_conv elif method == FIXED_GUESS: if user_guess is None: raise ValueError("No user guess") return user_guess try: # All three variables P, T, V are positive but can grow unbounded, so # for the secant method, only set the one variable if iter_T: guess = 298.15 elif iter_P: guess = 101325.0 elif iter_V: guess = 0.024465403697038125 return secant(err, guess, xtol=xtol, ytol=ytol, maxiter=maxiter, bisection=True, low=min_bound) except (UnconvergedError,): # G and A specs are NOT MONOTONIC and the brackets will likely NOT BRACKET # THE ROOTS! return brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter) def PH_secant_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10, minimum_progress=0.3, oscillation_detection=True): store = [] global iterations iterations = 0 def to_solve(T): global iterations iterations += 1 p = phase.to_TP_zs(T, P, zs) err = p.H() - H store[:] = (p, err) return err if oscillation_detection: to_solve, checker = oscillation_checking_wrapper(to_solve, full=True, minimum_progress=minimum_progress) T = secant(to_solve, T_guess, xtol=xtol, maxiter=maxiter) phase, err = store return T, phase, iterations, err def PH_newton_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10, minimum_progress=0.3, oscillation_detection=True): store = [] global iterations iterations = 0 def to_solve(T): global iterations iterations += 1 p = phase.to_TP_zs(T, P, zs) err = p.H() - H derr_dT = p.dH_dT() store[:] = (p, err) return err, derr_dT if oscillation_detection: to_solve, checker = oscillation_checking_wrapper(to_solve, full=True, minimum_progress=minimum_progress) T = newton(to_solve, T_guess, fprime=True, xtol=xtol, maxiter=maxiter) phase, err = store return T, phase, iterations, err def TVF_pure_newton(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10): one_liquid = len(liquids) zs = [1.0] store = [] global iterations iterations = 0 def to_solve_newton(P): global iterations iterations += 1 g = gas.to_TP_zs(T, P, zs) fugacity_gas = g.fugacities()[0] dfugacities_dP_gas = g.dfugacities_dP()[0] if one_liquid: lowest_phase = liquids[0].to_TP_zs(T, P, zs) else: ls = [l.to_TP_zs(T, P, zs) for l in liquids] G_min, lowest_phase = 1e100, None for l in ls: G = l.G() if G < G_min: G_min, lowest_phase = G, l fugacity_liq = lowest_phase.fugacities()[0] dfugacities_dP_liq = lowest_phase.dfugacities_dP()[0] err = fugacity_liq - fugacity_gas derr_dP = dfugacities_dP_liq - dfugacities_dP_gas store[:] = (lowest_phase, g, err) return err, derr_dP Psat = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter, low=Phase.P_MIN_FIXED, require_eval=True, bisection=False, fprime=True) l, g, err = store return Psat, l, g, iterations, err def TVF_pure_secant(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10): one_liquid = len(liquids) zs = [1.0] store = [] global iterations iterations = 0 def to_solve_secant(P): global iterations iterations += 1 g = gas.to_TP_zs(T, P, zs) fugacity_gas = g.fugacities()[0] if one_liquid: lowest_phase = liquids[0].to_TP_zs(T, P, zs) else: ls = [l.to_TP_zs(T, P, zs) for l in liquids] G_min, lowest_phase = 1e100, None for l in ls: G = l.G() if G < G_min: G_min, lowest_phase = G, l fugacity_liq = lowest_phase.fugacities()[0] err = fugacity_liq - fugacity_gas store[:] = (lowest_phase, g, err) return err if P_guess < Phase.P_MIN_FIXED: raise ValueError("Too low.") # if P_guess < Phase.P_MIN_FIXED: # low = None # else: # low = Phase.P_MIN_FIXED Psat = secant(to_solve_secant, P_guess, xtol=xtol, maxiter=maxiter, low=Phase.P_MIN_FIXED*(1-1e-10)) l, g, err = store return Psat, l, g, iterations, err def PVF_pure_newton(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10): one_liquid = len(liquids) zs = [1.0] store = [] global iterations iterations = 0 def to_solve_newton(T): global iterations iterations += 1 g = gas.to_TP_zs(T, P, zs) fugacity_gas = g.fugacities()[0] dfugacities_dT_gas = g.dfugacities_dT()[0] if one_liquid: lowest_phase = liquids[0].to_TP_zs(T, P, zs) else: ls = [l.to_TP_zs(T, P, zs) for l in liquids] G_min, lowest_phase = 1e100, None for l in ls: G = l.G() if G < G_min: G_min, lowest_phase = G, l fugacity_liq = lowest_phase.fugacities()[0] dfugacities_dT_liq = lowest_phase.dfugacities_dT()[0] err = fugacity_liq - fugacity_gas derr_dT = dfugacities_dT_liq - dfugacities_dT_gas store[:] = (lowest_phase, g, err) return err, derr_dT Tsat = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter, low=Phase.T_MIN_FIXED, require_eval=True, bisection=False, fprime=True) l, g, err = store return Tsat, l, g, iterations, err def PVF_pure_secant(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10): one_liquid = len(liquids) zs = [1.0] store = [] global iterations iterations = 0 def to_solve_secant(T): global iterations iterations += 1 g = gas.to_TP_zs(T, P, zs) fugacity_gas = g.fugacities()[0] if one_liquid: lowest_phase = liquids[0].to_TP_zs(T, P, zs) else: ls = [l.to_TP_zs(T, P, zs) for l in liquids] G_min, lowest_phase = 1e100, None for l in ls: G = l.G() if G < G_min: G_min, lowest_phase = G, l fugacity_liq = lowest_phase.fugacities()[0] err = fugacity_liq - fugacity_gas store[:] = (lowest_phase, g, err) return err Tsat = secant(to_solve_secant, T_guess, xtol=xtol, maxiter=maxiter, low=Phase.T_MIN_FIXED) l, g, err = store return Tsat, l, g, iterations, err def TSF_pure_newton(P_guess, T, other_phases, solids, maxiter=200, xtol=1E-10): one_other = len(other_phases) one_solid = len(solids) zs = [1.0] store = [] global iterations iterations = 0 def to_solve_newton(P): global iterations iterations += 1 if one_solid: lowest_solid = solids[0].to_TP_zs(T, P, zs) else: ss = [s.to_TP_zs(T, P, zs) for s in solids] G_min, lowest_solid = 1e100, None for o in ss: G = o.G() if G < G_min: G_min, lowest_solid = G, o fugacity_solid = lowest_solid.fugacities()[0] dfugacities_dP_solid = lowest_solid.dfugacities_dP()[0] if one_other: lowest_other = other_phases[0].to_TP_zs(T, P, zs) else: others = [l.to_TP_zs(T, P, zs) for l in other_phases] G_min, lowest_other = 1e100, None for o in others: G = o.G() if G < G_min: G_min, lowest_other = G, o fugacity_other = lowest_other.fugacities()[0] dfugacities_dP_other = lowest_other.dfugacities_dP()[0] err = fugacity_other - fugacity_solid derr_dP = dfugacities_dP_other - dfugacities_dP_solid store[:] = (lowest_other, lowest_solid, err) return err, derr_dP Psub = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter, require_eval=True, bisection=False, fprime=True) other, solid, err = store return Psub, other, solid, iterations, err def PSF_pure_newton(T_guess, P, other_phases, solids, maxiter=200, xtol=1E-10): one_other = len(other_phases) one_solid = len(solids) zs = [1.0] store = [] global iterations iterations = 0 def to_solve_newton(T): global iterations iterations += 1 if one_solid: lowest_solid = solids[0].to_TP_zs(T, P, zs) else: ss = [s.to_TP_zs(T, P, zs) for s in solids] G_min, lowest_solid = 1e100, None for o in ss: G = o.G() if G < G_min: G_min, lowest_solid = G, o fugacity_solid = lowest_solid.fugacities()[0] dfugacities_dT_solid = lowest_solid.dfugacities_dT()[0] if one_other: lowest_other = other_phases[0].to_TP_zs(T, P, zs) else: others = [l.to_TP_zs(T, P, zs) for l in other_phases] G_min, lowest_other = 1e100, None for o in others: G = o.G() if G < G_min: G_min, lowest_other = G, o fugacity_other = lowest_other.fugacities()[0] dfugacities_dT_other = lowest_other.dfugacities_dT()[0] err = fugacity_other - fugacity_solid derr_dT = dfugacities_dT_other - dfugacities_dT_solid store[:] = (lowest_other, lowest_solid, err) return err, derr_dT Tsub = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter, require_eval=True, bisection=False, fprime=True) other, solid, err = store return Tsub, other, solid, iterations, err def solve_T_VF_IG_K_composition_independent(VF, T, zs, gas, liq, xtol=1e-10): '''from sympy import * zi, P, VF = symbols('zi, P, VF') l_phi, g_phi = symbols('l_phi, g_phi', cls=Function) # g_phi = symbols('g_phi') # Ki = l_phi(P)/g_phi(P) Ki = l_phi(P)#/g_phi err = zi*(Ki-1)/(1+VF*(Ki-1)) cse([diff(err, P), err], optimizations='basic')''' # gas phis are all one in IG model # gas.to(T=T, P=P, zs=zs) cmps = range(liq.N) global Ks, iterations, err iterations = 0 err = 0.0 def to_solve(lnP): global Ks, iterations, err iterations += 1 P = exp(lnP) l = liq.to(T=T, P=P, zs=zs) Ks = liquid_phis = l.phis() dlnphis_dP_l = l.dphis_dP() err = derr = 0.0 for i in cmps: x1 = liquid_phis[i] - 1.0 x2 = VF*x1 x3 = 1.0/(x2 + 1.0) x4 = x3*zs[i] err += x1*x4 derr += x4*(1.0 - x2*x3)*dlnphis_dP_l[i] return err, P*derr # estimate bubble point and dew point # Make sure to overwrite the phase so the Psats get cached P_base = 1e5 liq = liq.to(T=T, P=P_base, zs=zs) phis = liq.phis() P_bub, P_dew = 0.0, 0.0 for i in range(liq.N): P_bub += phis[i]*zs[i] P_dew += zs[i]/(phis[i]*P_base) P_bub = P_bub*liq.P P_dew = 1.0/P_dew P_guess = VF*P_dew + (1.0 - VF)*P_bub # When Poynting is on, the are only an estimate; otherwise it is dead on # and there is no need for a solver if liq.use_Poynting or 0.0 < VF < 1.0: lnP = newton(to_solve, log(P_guess), xtol=xtol, fprime=True) P = exp(lnP) else: if VF == 0.0: Ks = liq.to(T=T, P=P_bub, zs=zs).phis() P = P_bub elif VF == 1.0: Ks = liq.to(T=T, P=P_dew, zs=zs).phis() P = P_dew else: raise ValueError("Vapor fraction outside range 0 to 1") xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps] for i in cmps: Ks[i] *= xs[i] ys = Ks return P, xs, ys, iterations, err def solve_P_VF_IG_K_composition_independent(VF, P, zs, gas, liq, xtol=1e-10): # gas phis are all one in IG model # gas.to(T=T, P=P, zs=zs) cmps = range(liq.N) global Ks, iterations, err iterations = 0 def to_solve(T): global Ks, iterations, err iterations += 1 dlnphis_dT_l, liquid_phis = liq.dphis_dT_at(T, P, zs, phis_also=True) Ks = liquid_phis # l = liq.to(T=T, P=P, zs=zs) # Ks = liquid_phis = l.phis() # dlnphis_dT_l = l.dphis_dT() err = derr = 0.0 for i in cmps: x1 = liquid_phis[i] - 1.0 x2 = VF*x1 x3 = 1.0/(x2 + 1.0) x4 = x3*zs[i] err += x1*x4 derr += x4*(1.0 - x2*x3)*dlnphis_dT_l[i] return err, derr try: T = newton(to_solve, 300.0, xtol=xtol, fprime=True, low=1e-6) except: try: T = brenth(lambda x: to_solve(x)[0], 300, 1000) except: T = newton(to_solve, 400.0, xtol=xtol, fprime=True, low=1e-6) xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps] for i in cmps: Ks[i] *= xs[i] ys = Ks return T, xs, ys, iterations, err def sequential_substitution_2P_sat(T, P, V, zs_dry, xs_guess, ys_guess, liquid_phase, gas_phase, idx, z0, z1=None, maxiter=1000, tol=1E-13, trivial_solution_tol=1e-5, damping=1.0): xs, ys = xs_guess, ys_guess V_over_F = 1.0 cmps = range(len(zs_dry)) if z1 is None: z1 = z0*1.0001 + 1e-4 if z1 > 1: z1 = z0*1.0001 - 1e-4 # secant step/solving p0, p1, err0, err1 = None, None, None, None def step(p0, p1, err0, err1): if p0 is None: return z0 if p1 is None: return z1 else: new = p1 - err1*(p1 - p0)/(err1 - err0)*damping return new for iteration in range(maxiter): p0, p1 = step(p0, p1, err0, err1), p0 zs = list(zs_dry) zs[idx] = p0 zs = normalize(zs) # print(zs, p0, p1) g = gas_phase.to(ys, T=T, P=P, V=V) l = liquid_phase.to(xs, T=T, P=P, V=V) lnphis_g = g.lnphis() lnphis_l = l.lnphis() Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) err0, err1 = 1.0 - V_over_F, err0 # Check for negative fractions - normalize only if needed for xi in xs_new: if xi < 0.0: xs_new_sum = sum(abs(i) for i in xs_new) xs_new = [abs(i)/xs_new_sum for i in xs_new] break for yi in ys_new: if yi < 0.0: ys_new_sum = sum(abs(i) for i in ys_new) ys_new = [abs(i)/ys_new_sum for i in ys_new] break err, comp_diff = 0.0, 0.0 for i in cmps: err_i = Ks[i]*xs[i]/ys[i] - 1.0 err += err_i*err_i + abs(ys[i] - zs[i]) comp_diff += abs(xs[i] - ys[i]) # Accept the new compositions # xs, ys = xs_new, zs # This has worse convergence behavior? xs, ys = xs_new, ys_new if comp_diff < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") if err < tol and abs(err0) < tol: return V_over_F, xs, zs, l, g, iteration, err, err0 raise UnconvergedError('End of SS without convergence') def SS_VF_simultaneous(guess, fixed_val, zs, liquid_phase, gas_phase, iter_var='T', fixed_var='P', V_over_F=1, maxiter=200, xtol=1E-10, comp_guess=None, damping=0.8, tol_eq=1e-12, update_frequency=3): if comp_guess is None: comp_guess = zs if V_over_F == 1 or V_over_F > 0.5: dew = True xs, ys = comp_guess, zs else: dew = False xs, ys = zs, comp_guess sln = sequential_substitution_2P_HSGUAbeta(zs=zs, xs_guess=xs, ys_guess=ys, liquid_phase=liquid_phase, gas_phase=gas_phase, fixed_var_val=fixed_val, spec_val=V_over_F, tol_spec=xtol, iter_var_0=guess, update_frequency=update_frequency, iter_var=iter_var, fixed_var=fixed_var, spec='beta', damping=damping, tol_eq=tol_eq) guess, _, xs, ys, l, g, iteration, err_eq, spec_err = sln if dew: comp_guess = xs iter_phase, const_phase = l, g else: comp_guess = ys iter_phase, const_phase = g, l return guess, comp_guess, iter_phase, const_phase, iteration, {'err_eq': err_eq, 'spec_err': spec_err} def sequential_substitution_2P_HSGUAbeta(zs, xs_guess, ys_guess, liquid_phase, gas_phase, fixed_var_val, spec_val, iter_var_0, iter_var_1=None, iter_var='T', fixed_var='P', spec='H', maxiter=1000, tol_eq=1E-13, tol_spec=1e-9, trivial_solution_tol=1e-5, damping=1.0, V_over_F_guess=None, fprime=True, update_frequency=1, update_eq=1e-7): xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess cmps = range(len(zs)) if iter_var_1 is None: iter_var_1 = iter_var_0*1.0001 + 1e-4 tol_spec_abs = tol_spec*abs(spec_val) if tol_spec_abs == 0.0: if spec == 'beta': tol_spec_abs = 1e-9 else: tol_spec_abs = 1e-7 # secant step/solving p0, p1, spec_err, spec_err_old = None, None, None, None def step(p0, p1, spec_err, spec_err_old, step_der): if p0 is None: return iter_var_0 if p1 is None: return iter_var_1 else: secant_step = spec_err_old*(p1 - p0)/(spec_err_old - spec_err)*damping if fprime and step_der is not None: if abs(step_der) < abs(secant_step): step = step_der new = p0 - step else: step = secant_step new = p1 - step else: new = p1 - secant_step if new < 1e-7: # Only handle positive values, damped steps to .5 new = 0.5*(1e-7 + p0) # print(p0, p1, new) return new TPV_args = {fixed_var: fixed_var_val, iter_var: iter_var_0} VF_spec = spec == 'beta' if not VF_spec: spec_fun_l = getattr(liquid_phase.__class__, spec) spec_fun_g = getattr(gas_phase.__class__, spec) s_der = 'd%s_d%s_%s'%(spec, iter_var, fixed_var) spec_der_fun_l = getattr(liquid_phase.__class__, s_der) spec_der_fun_g = getattr(gas_phase.__class__, s_der) else: V_over_F = iter_var_0 step_der = None for iteration in range(maxiter): if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2: p0, p1 = step(p0, p1, spec_err, spec_err_old, step_der), p0 TPV_args[iter_var] = p0 g = gas_phase.to(ys, **TPV_args) l = liquid_phase.to(xs, **TPV_args) lnphis_g = g.lnphis() lnphis_l = l.lnphis() Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) if not VF_spec: spec_calc = spec_fun_l(l)*(1.0 - V_over_F) + spec_fun_g(g)*V_over_F spec_der_calc = spec_der_fun_l(l)*(1.0 - V_over_F) + spec_der_fun_g(g)*V_over_F # print(spec_der_calc) else: spec_calc = V_over_F if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2: spec_err_old = spec_err # Only update old error on an update iteration spec_err = spec_calc - spec_val try: step_der = spec_err/spec_der_calc # print(spec_err, step_der, p1-p0) except: pass # Check for negative fractions - normalize only if needed for xi in xs_new: if xi < 0.0: xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new) xs_new = [abs(i)*xs_new_sum_inv for i in xs_new] break for yi in ys_new: if yi < 0.0: ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new) ys_new = [abs(i)*ys_new_sum_inv for i in ys_new] break err_eq, comp_diff = 0.0, 0.0 for i in cmps: err_i = Ks[i]*xs[i]/ys[i] - 1.0 err_eq += err_i*err_i comp_diff += abs(xs[i] - ys[i]) # Accept the new compositions # xs, ys = xs_new, zs # This has worse convergence behavior; seems to not even converge some of the time xs, ys = xs_new, ys_new if comp_diff < trivial_solution_tol and iteration: # Allow the first iteration to start with the same composition raise ValueError("Converged to trivial condition, compositions of both phases equal") print('Guess: %g, Eq Err: %g, Spec Err: %g, VF: %g' %(p0, err_eq, spec_err, V_over_F)) # print(p0, err_eq, spec_err, V_over_F) # print(p0, err, spec_err, xs, ys, V_over_F) if err_eq < tol_eq and abs(spec_err) < tol_spec_abs: return p0, V_over_F, xs, ys, l, g, iteration, err_eq, spec_err raise UnconvergedError('End of SS without convergence') def sequential_substitution_2P_double(zs, xs_guess, ys_guess, liquid_phase, gas_phase, guess, spec_vals, iter_var0='T', iter_var1='P', spec_vars=['H', 'S'], maxiter=1000, tol_eq=1E-13, tol_specs=1e-9, trivial_solution_tol=1e-5, damping=1.0, V_over_F_guess=None, fprime=True): xs, ys = xs_guess, ys_guess if V_over_F_guess is None: V_over_F = 0.5 else: V_over_F = V_over_F_guess cmps = range(len(zs)) iter0_val = guess[0] iter1_val = guess[1] spec0_val = spec_vals[0] spec1_val = spec_vals[1] spec0_var = spec_vars[0] spec1_var = spec_vars[1] spec0_fun_l = getattr(liquid_phase.__class__, spec0_var) spec0_fun_g = getattr(gas_phase.__class__, spec0_var) spec1_fun_l = getattr(liquid_phase.__class__, spec1_var) spec1_fun_g = getattr(gas_phase.__class__, spec1_var) spec0_der0 = 'd%s_d%s_%s'%(spec0_var, iter_var0, iter_var1) spec1_der0 = 'd%s_d%s_%s'%(spec1_var, iter_var0, iter_var1) spec0_der1 = 'd%s_d%s_%s'%(spec0_var, iter_var1, iter_var0) spec1_der1 = 'd%s_d%s_%s'%(spec1_var, iter_var1, iter_var0) spec0_der0_fun_l = getattr(liquid_phase.__class__, spec0_der0) spec0_der0_fun_g = getattr(gas_phase.__class__, spec0_der0) spec1_der0_fun_l = getattr(liquid_phase.__class__, spec1_der0) spec1_der0_fun_g = getattr(gas_phase.__class__, spec1_der0) spec0_der1_fun_l = getattr(liquid_phase.__class__, spec0_der1) spec0_der1_fun_g = getattr(gas_phase.__class__, spec0_der1) spec1_der1_fun_l = getattr(liquid_phase.__class__, spec1_der1) spec1_der1_fun_g = getattr(gas_phase.__class__, spec1_der1) step_der = None for iteration in range(maxiter): TPV_args[iter_var0] = iter0_val TPV_args[iter_var1] = iter1_val g = gas_phase.to(zs=ys, **TPV_args) l = liquid_phase.to(zs=xs, **TPV_args) lnphis_g = g.lnphis() lnphis_l = l.lnphis() Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F) spec0_calc = spec0_fun_l(l)*(1.0 - V_over_F) + spec0_fun_g(g)*V_over_F spec1_calc = spec1_fun_l(l)*(1.0 - V_over_F) + spec1_fun_g(g)*V_over_F spec0_der0_calc = spec0_der0_fun_l(l)*(1.0 - V_over_F) + spec0_der0_fun_g(g)*V_over_F spec0_der1_calc = spec0_der1_fun_l(l)*(1.0 - V_over_F) + spec0_der1_fun_g(g)*V_over_F spec1_der0_calc = spec1_der0_fun_l(l)*(1.0 - V_over_F) + spec1_der0_fun_g(g)*V_over_F spec1_der1_calc = spec1_der1_fun_l(l)*(1.0 - V_over_F) + spec1_der1_fun_g(g)*V_over_F errs = [spec0_calc - spec0_val, spec1_calc - spec1_val] jac = [[spec0_der0_calc, spec0_der1_calc], [spec1_der0_calc, spec1_der1_calc]] # Do the newton step dx = py_solve(jac, [-v for v in errs]) iter0_val, iter1_val = [xi + dxi*damping for xi, dxi in zip([iter0_val, iter1_val], dx)] # Check for negative fractions - normalize only if needed for xi in xs_new: if xi < 0.0: xs_new_sum = sum(abs(i) for i in xs_new) xs_new = [abs(i)/xs_new_sum for i in xs_new] break for yi in ys_new: if yi < 0.0: ys_new_sum = sum(abs(i) for i in ys_new) ys_new = [abs(i)/ys_new_sum for i in ys_new] break err, comp_diff = 0.0, 0.0 for i in cmps: err_i = Ks[i]*xs[i]/ys[i] - 1.0 err += err_i*err_i comp_diff += abs(xs[i] - ys[i]) xs, ys = xs_new, ys_new if comp_diff < trivial_solution_tol: raise ValueError("Converged to trivial condition, compositions of both phases equal") if err < tol_eq and abs(err0) < tol_spec_abs: return p0, V_over_F, xs, ys, l, g, iteration, err, err0 raise UnconvergedError('End of SS without convergence') def stability_iteration_Michelsen(trial_phase, zs_test, test_phase=None, maxiter=20, xtol=1E-12): # So long as for both trial_phase, and test_phase use the lowest Gibbs energy fugacities, no need to test two phases. # Very much no need to converge using acceleration - just keep a low tolerance # At any point, can use the Ks working, assume a drop of the new phase, and evaluate two new phases and see if G drops. # If it does, drop out early! This implementation does not do that. # Should be possible to tell if converging to trivial solution during the process - and bail out then # It is possible to switch this function to operated on lnphis e.g. # corrections[i] = ci = zs[i]/zs_test[i]*trunc_exp(lnphis_trial[i] - lnphis_test[i])*sum_zs_test_inv # however numerical differences seem to be huge and operate better on fugacities with the trunc_exp function # then anything else. # Can this whole function be switched to the functional approach? # Should be possible if test_phase is None: test_phase = trial_phase T, P, zs = trial_phase.T, trial_phase.P, trial_phase.zs N = trial_phase.N fugacities_trial = trial_phase.fugacities_lowest_Gibbs() # Go through the feed composition - and the trial composition - if we have zeros, need to make them a trace; zs_test2 = [0.0]*N for i in range(N): zs_test2[i] = zs_test[i] zs_test = zs_test2 for i in range(N): if zs_test[i] == 0.0: zs_test[i] = 1e-50 # break for i in range(N): if zs[i] == 0.0: zs2 = [0.0]*N for i in range(N): if zs[i] == 0.0: zs2[i] = 1e-50 else: zs2[i] = zs[i] zs = zs2 # Requires another evaluation of the trial phase trial_phase = trial_phase.to(T=T, P=P, zs=zs) fugacities_trial = trial_phase.fugacities_lowest_Gibbs() break # Basis of equations is for the test phase being a gas, the trial phase assumed is a liquid # makes no real difference Ks = [0.0]*N corrections = [1.0]*N # Model converges towards fictional K values which, when evaluated, yield the # stationary point composition for i in range(N): Ks[i] = zs_test[i]/zs[i] sum_zs_test = sum_zs_test_inv = 1.0 converged = False for _ in range(maxiter): # test_phase = test_phase.to(T=T, P=P, zs=zs_test) # fugacities_test = test_phase.fugacities_lowest_Gibbs() fugacities_test = test_phase.fugacities_at_zs(zs_test) err = 0.0 try: for i in range(N): corrections[i] = ci = fugacities_trial[i]/fugacities_test[i]*sum_zs_test_inv Ks[i] *= ci err += (ci - 1.0)*(ci - 1.0) except: # A test fugacity became zero # May need special handling for this outside. converged = True break if err < xtol: converged = True break # Update compositions for the next iteration - might as well move this above the break check for i in range(N): zs_test[i] = Ks[i]*zs[i] # new test phase comp # Cannot move the normalization above the error check - returning # unnormalized sum_zs_test is used also to detect a trivial solution sum_zs_test = 0.0 for i in range(N): sum_zs_test += zs_test[i] try: sum_zs_test_inv = 1.0/sum_zs_test except: # Fugacities are all zero converged = True break for i in range(N): zs_test[i] *= sum_zs_test_inv if converged: try: V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = flash_inner_loop(zs, Ks) except: # Converged to trivial solution so closely the math does not work V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = 0.0, zs, zs # Calculate the dG of the feed dG_RT = 0.0 if V_over_F != 0.0: lnphis_test = test_phase.lnphis_at_zs(zs_test) #test_phase.lnphis() for i in range(N): dG_RT += zs_test[i]*(log(zs_test[i]) + lnphis_test[i]) dG_RT *= V_over_F # print(dG_RT) return sum_zs_test, Ks, zs_test, V_over_F, trial_zs, appearing_zs, dG_RT else: raise UnconvergedError('End of stability_iteration_Michelsen without convergence') def TPV_double_solve_1P(zs, phase, guesses, spec_vals, goal_specs=('V', 'U'), state_specs=('T', 'P'), maxiter=200, xtol=1E-10, ytol=None, spec_funs=None): kwargs = {'zs': zs} phase_cls = phase.__class__ s00 = 'd%s_d%s_%s' %(goal_specs[0], state_specs[0], state_specs[1]) s01 = 'd%s_d%s_%s' %(goal_specs[0], state_specs[1], state_specs[0]) s10 = 'd%s_d%s_%s' %(goal_specs[1], state_specs[0], state_specs[1]) s11 = 'd%s_d%s_%s' %(goal_specs[1], state_specs[1], state_specs[0]) try: err0_fun = getattr(phase_cls, goal_specs[0]) err1_fun = getattr(phase_cls, goal_specs[1]) j00 = getattr(phase_cls, s00) j01 = getattr(phase_cls, s01) j10 = getattr(phase_cls, s10) j11 = getattr(phase_cls, s11) except: pass cache = [] def to_solve(states): kwargs[state_specs[0]] = float(states[0]) kwargs[state_specs[1]] = float(states[1]) new = phase.to(**kwargs) try: v0, v1 = err0_fun(new), err1_fun(new) jac = [[j00(new), j01(new)], [j10(new), j11(new)]] except: v0, v1 = new.value(goal_specs[0]), new.value(goal_specs[1]) jac = [[new.value(s00), new.value(s01)], [new.value(s10), new.value(s11)]] if spec_funs is not None: err0 = v0 - spec_funs[0](new) err1 = v1 - spec_funs[1](new) else: err0 = v0 - spec_vals[0] err1 = v1 - spec_vals[1] errs = [err0, err1] cache[:] = [new, errs, jac] print(kwargs, errs) return errs, jac # states, iterations = newton_system(to_solve, x0=guesses, jac=True, xtol=xtol, ytol=ytol, maxiter=maxiter, damping_func=damping_maintain_sign) phase = cache[0] err = cache[1] jac = cache[2] return states, phase, iterations, err, jac def assert_stab_success_2P(liq, gas, stab, T, P, zs, guess_name, xs=None, ys=None, VF=None, SS_tol=1e-15, rtol=1e-7): r'''Basic function - perform a specified stability test, and then a two-phase flash using it Check on specified variables the method is working. ''' gas = gas.to(T=T, P=P, zs=zs) liq = liq.to(T=T, P=P, zs=zs) trial_comp = stab.incipient_guess_named(T, P, zs, guess_name) if liq.G() < gas.G(): min_phase, other_phase = liq, gas else: min_phase, other_phase = gas, liq _, _, _, V_over_F, trial_zs, appearing_zs, dG_RT = stability_iteration_Michelsen(min_phase, trial_comp, test_phase=other_phase, maxiter=100) V_over_F, xs_calc, ys_calc, l, g, iteration, err = sequential_substitution_2P(T=T, P=P, V=None, zs=zs, xs_guess=trial_zs, ys_guess=appearing_zs, liquid_phase=min_phase, tol=SS_tol, gas_phase=other_phase) if xs_calc is not None: assert_close1d(xs, xs_calc, rtol) if ys_calc is not None: assert_close1d(ys, ys_calc, rtol) if VF is not None: assert_close(V_over_F, VF, rtol) assert_close1d(l.fugacities(), g.fugacities(), rtol) def TPV_solve_HSGUA_guesses_VL(zs, method, constants, correlations, fixed_var_val, spec_val, iter_var='T', fixed_var='P', spec='H', maxiter=20, xtol=1E-7, ytol=None, bounded=False, min_bound=None, max_bound=None, user_guess=None, last_conv=None, T_ref=298.15, P_ref=101325.0): global V_over_F_guess V_over_F_guess = 0.5 cmps = range(constants.N) Tcs, Pcs, omegas = constants.Tcs, constants.Pcs, constants.omegas if fixed_var == iter_var: raise ValueError("Fixed variable cannot be the same as iteration variable") if fixed_var not in ('T', 'P', 'V'): raise ValueError("Fixed variable must be one of `T`, `P`, `V`") if iter_var not in ('T', 'P', 'V'): raise ValueError("Iteration variable must be one of `T`, `P`, `V`") if spec not in ('H', 'S', 'G', 'U', 'A'): raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`") cmps = range(len(zs)) iter_T = iter_var == 'T' iter_P = iter_var == 'P' iter_V = iter_var == 'V' fixed_P = fixed_var == 'P' fixed_T = fixed_var == 'T' fixed_V = fixed_var == 'V' if fixed_P: P = fixed_var_val elif fixed_T: T = fixed_var_val elif fixed_V: V = fixed_var_val always_S = spec in ('S', 'G', 'A') always_H = spec in ('H', 'G', 'U', 'A') always_V = spec in ('U', 'A') def H_model(T, P, xs, ys, V_over_F): if V_over_F >= 1.0: return H_model_g(T, P, zs) elif V_over_F <= 0.0: return H_model_l(T, P, zs) H_liq = H_model_l(T, P, xs) H_gas = H_model_g(T, P, ys) return H_liq*(1.0 - V_over_F) + V_over_F*H_gas def S_model(T, P, xs, ys, V_over_F): if V_over_F >= 1.0: return S_model_g(T, P, zs) elif V_over_F <= 0.0: return S_model_l(T, P, zs) S_liq = S_model_l(T, P, xs) S_gas = S_model_g(T, P, ys) return S_liq*(1.0 - V_over_F) + V_over_F*S_gas def V_model(T, P, xs, ys, V_over_F): if V_over_F >= 1.0: return V_model_g(T, P, zs) elif V_over_F <= 0.0: return V_model_l(T, P, zs) V_liq = V_model_l(T, P, xs) V_gas = V_model_g(T, P, ys) return V_liq*(1.0 - V_over_F) + V_over_F*V_gas # whhat goes in here? if always_S: P_ref_inv = 1.0/P_ref dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition info = [] def err(guess): # Translate the fixed variable to a local variable if fixed_P: P = fixed_var_val elif fixed_T: T = fixed_var_val elif fixed_V: V = fixed_var_val T = None # Translate the iteration variable to a local variable if iter_P: P = guess if not fixed_V: V = None elif iter_T: T = guess if not fixed_V: V = None elif iter_V: V = guess T = None if T is None: T = T_from_V(V, P, zs) VF, xs, ys = flash_model(T, P, zs) info[:] = VF, xs, ys # Compute S, H, V as necessary if always_S: S = S_model(T, P, xs, ys, VF) - dS_ideal - R*log(P*P_ref_inv) if always_H: H = H_model(T, P, xs, ys, VF) if always_V and V is None: V = V_model(T, P, xs, ys, VF) # Return the objective function if spec == 'H': err = H - spec_val elif spec == 'S': err = S - spec_val elif spec == 'G': err = (H - T*S) - spec_val elif spec == 'U': err = (H - P*V) - spec_val elif spec == 'A': err = (H - P*V - T*S) - spec_val # print(T, P, V, 'TPV', err) return err # Common models VolumeLiquids = correlations.VolumeLiquids def V_model_l(T, P, zs): V_calc = 0. for i in cmps: V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T) return V_calc def T_from_V_l(V, P, zs): T_calc = 0. for i in cmps: T_calc += zs[i]*VolumeLiquids[i].solve_property(V) return T_calc def V_model_g(T, P, zs): return R*T/P def T_from_V_g(V, P, zs): return P*V/R if method == IDEAL_WILSON or method == SHAW_ELEMENTAL: if iter_P: if fixed_T: T_inv = 1.0/T Ks_P = [Pcs[i]*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps] def flash_model(T, P, zs): global V_over_F_guess P_inv = 1.0/P if not fixed_T: T_inv = 1.0/T Ks_P_local = [Pcs[i]*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps] Ks = [Ki*P_inv for Ki in Ks_P_local] else: Ks = [Ki*P_inv for Ki in Ks_P] K_low, K_high = False, False for i in cmps: if zs[i] != 0.0: if Ks[i] > 1.0: K_high = True else: K_low = True if K_high and K_low: break if K_high and K_low: V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess) return V_over_F_guess, xs, ys elif K_high: return 1.0, zs, zs else: return 0.0, zs, zs else: P_inv = 1.0/P def flash_model(T, P, zs): global V_over_F_guess T_inv = 1.0/T Ks = [Pcs[i]*P_inv*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps] K_low, K_high = False, False for i in cmps: if zs[i] != 0.0: if Ks[i] > 1.0: K_high = True else: K_low = True if K_high and K_low: break if K_high and K_low: V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess) return V_over_F_guess, xs, ys elif K_high: return 1.0, zs, zs else: return 0.0, zs, zs if method == SHAW_ELEMENTAL: VolumeLiquids = correlations.VolumeLiquids MWs, n_atoms = constants.MWs, constants.n_atoms def H_model_g(T, P, zs): MW_g, sv_g = 0.0, 0.0 for i in cmps: MW_g += MWs[i]*zs[i] sv_g += n_atoms[i]*zs[i] sv_g /= MW_g H_ref_LS = Lastovka_Shaw_integral(T_ref, sv_g) H1 = Lastovka_Shaw_integral(T, sv_g) dH = H1 - H_ref_LS H_gas = 1e-3*dH*MW_g #property_mass_to_molar(dH, MW_g) return H_gas def S_model_g(T, P, zs): MW_g, sv_g = 0.0, 0.0 for i in cmps: MW_g += MWs[i]*zs[i] sv_g += n_atoms[i]*zs[i] sv_g /= MW_g S_ref_LS = Lastovka_Shaw_integral_over_T(T_ref, sv_g) S1 = Lastovka_Shaw_integral_over_T(T, sv_g) dS = S1 - S_ref_LS S_gas = 1e-3*dS*MW_g return S_gas def H_model_l(T, P, zs): MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0 for i in cmps: MW_l += MWs[i]*zs[i] sv_l += n_atoms[i]*zs[i] Tc_l += Tcs[i]*zs[i] omega_l += omegas[i]*zs[i] sv_l /= MW_l H_ref_DS = Dadgostar_Shaw_integral(T_ref, sv_l) H1 = Dadgostar_Shaw_integral(T, sv_l) Hvap = SMK(T, Tc_l, omega_l) dH = H1 - H_ref_DS H_liq = 1e-3*dH*MW_l #property_mass_to_molar(dH, MW_l) return (H_liq - Hvap) def S_model_l(T, P, zs): MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0 for i in cmps: MW_l += MWs[i]*zs[i] sv_l += n_atoms[i]*zs[i] Tc_l += Tcs[i]*zs[i] omega_l += omegas[i]*zs[i] sv_l /= MW_l S_ref_DS = Dadgostar_Shaw_integral_over_T(T_ref, sv_l) S1 = Dadgostar_Shaw_integral_over_T(T, sv_l) Hvap = SMK(T, Tc_l, omega_l) dS = S1 - S_ref_DS S_liq = 1e-3*dS*MW_l return (S_liq - Hvap/T) elif method == IDEAL_WILSON: HeatCapacityGases = correlations.HeatCapacityGases EnthalpyVaporizations = correlations.EnthalpyVaporizations def flash_model(T, P, zs): _, _, VF, xs, ys = flash_wilson(zs, constants.Tcs, constants.Pcs, constants.omegas, T=T, P=P) return VF, xs, ys def H_model_g(T, P, zs): H_calc = 0. for i in cmps: H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) return H_calc def S_model_g(T, P, zs): S_calc = 0. for i in cmps: S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) return S_calc def H_model_l(T, P, zs): H_calc = 0. for i in cmps: H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T)) return H_calc def S_model_l(T, P, zs): S_calc = 0. T_inv = 1.0/T for i in cmps: S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T)) return S_calc try: # All three variables P, T, V are positive but can grow unbounded, so # for the secant method, only set the one variable if iter_T: guess = 298.15 elif iter_P: guess = 101325.0 elif iter_V: guess = 0.024465403697038125 val = secant(err, guess, xtol=xtol, ytol=ytol, maxiter=maxiter, bisection=True, low=min_bound, require_xtol=False) return val, info[0], info[1], info[2] except (UnconvergedError,) as e: val = brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter) return val, info[0], info[1], info[2] global cm_flash cm_flash = None def cm_flash_tol(): global cm_flash if cm_flash is not None: return cm_flash from matplotlib.colors import ListedColormap N = 100 vals = np.zeros((N, 4)) vals[:, 3] = np.ones(N) # Grey for 1e-10 to 1e-7 low = 40 vals[:low, 0] = np.linspace(100/256, 1, low)[::-1] vals[:low, 1] = np.linspace(100/256, 1, low)[::-1] vals[:low, 2] = np.linspace(100/256, 1, low)[::-1] # green 1e-6 to 1e-5 ok = 50 vals[low:ok, 1] = np.linspace(100/256, 1, ok-low)[::-1] # Blue 1e-5 to 1e-3 mid = 70 vals[ok:mid, 2] = np.linspace(100/256, 1, mid-ok)[::-1] # Red 1e-3 and higher vals[mid:101, 0] = np.linspace(100/256, 1, 100-mid)[::-1] newcmp = ListedColormap(vals) cm_flash = newcmp return cm_flash def deduplicate_stab_results(results, tol_frac_err=5e-3): if not results: return results N = len(results[0][0]) cmps = range(N) results.sort(key=lambda x: (x[0][0], x[2])) good_results = [results[0]] for t in results[1:]: xs_last, ys_last = good_results[-1][0], good_results[-1][1] xs, ys = t[0], t[1] diff_x = sum([abs(xs[i] - xs_last[i]) for i in cmps])/N diff_y = sum([abs(ys[i] - ys_last[i]) for i in cmps])/N if diff_x > tol_frac_err or diff_y > tol_frac_err: good_results.append(t) return good_results empty_flash_conv = {'iterations': 0, 'err': 0.0, 'stab_guess_name': None} one_in_list = [1.0] empty_list = []
# -*- coding: utf-8 -*- '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2018, Caleb Bell <Caleb.Andrew.Bell@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' import numpy as np from numpy.testing import assert_allclose import pytest from thermo.utils import TPD from thermo.eos import * from thermo.eos_mix import * from scipy.misc import derivative from scipy.optimize import minimize, newton from math import log, exp, sqrt, log10 from thermo import Mixture from thermo.property_package import * from fluids.numerics import linspace, logspace, normalize from thermo.property_package_constants import (PropertyPackageConstants, PR_PKG) @pytest.mark.deprecated def test_bubble_T_PR(): # Copied to VL! Can't get last point to converge. Ps = np.logspace(np.log10(1e3), np.log10(8e6), 100).tolist() # Value working for sure! # A long enough list of points may reveal errors # Need to check for singularities in results! # Lagrange multiplier is needed. T_bubbles_expect = [135.77792634341301, 136.56179975223873, 137.35592304111714, 138.1605125904237, 138.97579118069618, 139.80198815378043, 140.63933971310234, 141.48808915266713, 142.34848716775062, 143.22079210796352, 144.10527026879004, 145.00219623035326, 145.9118531621595, 146.8345331709676, 147.77053765471518, 148.7201776796149, 149.68377437184307, 150.66165932879846, 151.65417505244912, 152.6616753977778, 153.68452605664353, 154.72310505184726, 155.7778032642612, 156.8490249894867, 157.937188514101, 159.04272673536184, 160.16608780166473, 161.30773579673297, 162.46815145564204, 163.64783292476886, 164.84729656230823, 166.06707778415586, 167.30773196086088, 168.56983536585116, 169.8539861804285, 171.16080556094636, 172.49093877035423, 173.84505638241404, 175.22385556194536, 176.6280614293828, 178.058428515323, 179.51574231484207, 181.00082094865053, 182.5145169422077, 184.0577191341151, 185.63135472512306, 187.2363914833706, 188.8738401205766, 190.54475685783353, 192.25024620138348, 193.991463951159, 195.76962046909824, 197.5859842371162, 199.4418857394953, 201.33872170960848, 203.27795978657647, 205.26114363572563, 207.28989859303456, 209.36593790645554, 211.49106965667633, 213.66720445521423, 215.89636403432021, 218.18069086349888, 220.52245895198226, 222.92408602593875, 225.3881473051149, 227.91739114691686, 230.5147568796014, 233.18339521130144, 235.92669168167328, 238.74829372436815, 241.65214202994656, 244.64250705759693, 247.7240317371467, 250.90178165300227, 254.18130431821905, 257.5686995555806, 261.07070353354993, 264.69478970158224, 268.44929079409445, 272.3435473154688, 276.3880896135361, 280.59486299764814, 284.9775086709067, 289.5517180159047, 294.3356847958481, 299.35069043485873, 304.62187400558975, 310.17926492998157, 316.059200210731, 322.3063237832385, 328.97650301847204, 336.14126110695065, 343.8948656757251, 352.36642480869347, 361.7423599546769, 372.31333661508177, 384.5907961800425, 399.6948959805394, 422.0030866468656] m = Mixture(['CO2', 'n-hexane'], zs=[.5, .5], T=300, P=1E6) pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs, omegas=m.omegas, kijs=[[0,0],[0,0]], eos_kwargs=None) bubs = [] for P in Ps: bubs.append(pkg.bubble_T(P, m.zs, maxiter=20, xtol=1e-10, maxiter_initial=20, xtol_initial=1e-1)[-3]) assert_allclose(bubs, T_bubbles_expect, rtol=5e-6) @pytest.mark.deprecated def test_PR_four_bubble_dew_cases(): m = Mixture(['furfural', 'furfuryl alcohol'], zs=[.5, .5], T=300, P=1E6) pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=[235.9, 250.35], Tbs=[434.65, 441.15], Tcs=[670.0, 632.0], Pcs=[5510000.0, 5350000.0], omegas=[0.4522, 0.734], kijs=[[0,0],[0,0]], eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) # Strongly believed to be correct! assert_allclose(pkg.bubble_T(P=1e6, zs=m.zs)[-3], 539.1838522423355, atol=.1) assert_allclose(pkg.dew_T(P=1e6, zs=m.zs)[-3], 540.208169750248, atol=.1) assert_allclose(pkg.dew_P(T=600, zs=m.zs)[-3], 2702616.6490743402, rtol=1e-4) assert_allclose(pkg.bubble_P(T=600, zs=m.zs)[-3], 2766476.7473238516, rtol=1e-4) @pytest.mark.deprecated def test_C1_C10_PT_flash(): m = Mixture(['methane', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10'], zs=[.1]*10, T=300, P=1E6) pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=m.Tcs, Pcs=m.Pcs, omegas=m.omegas, kijs=None, eos_kwargs=None) pkg.flash(m.zs, T=300, P=1e5) assert_allclose(pkg.V_over_F, 0.3933480636546702, atol=.001) @pytest.mark.deprecated def test_ternary_4_flashes_2_algorithms(): zs = [0.8168, 0.1501, 0.0331] m = Mixture(['n-pentane', 'n-hexane', 'heptane'], zs=zs, T=300, P=1E6) kijs = [[0, 0.00076, 0.00171], [0.00076, 0, 0.00061], [0.00171, 0.00061, 0]] Tcs = [469.7, 507.6, 540.2] Pcs = [3370000.0, 3025000.0, 2740000.0] omegas = [0.251, 0.2975, 0.3457] pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) # Test the TVF dew and bubble functions Ts = linspace(160, 473) # Can go up to 474 K in some ones P_dews = [] P_bubbles = [] P_dews_expect = [0.13546805712060028, 0.43921188284030244, 1.2845937999086763, 3.4284388636658223, 8.432934762206317, 19.28325222813278, 41.304893136512625, 83.4340609062355, 159.8684378673581, 292.0910726014042, 511.22209088685435, 860.6178486961774, 1398.6103635474976, 2201.2649842930023, 3365.0325891826838, 5009.183244809382, 7277.928645001543, 10342.16726173515, 14400.815256785363, 19681.7145007659, 26442.1339934372, 34968.90099430364, 45578.21259245172, 58615.18736373379, 74453.22081253518, 93493.20841630214, 116162.69734460281, 142915.02335020038, 174228.483859415, 210605.5927100339, 252572.45694034494, 300678.3120515243, 355495.2497147813, 417618.1714535647, 487665.00399456796, 566277.2176110205, 654120.6992511221, 751887.0498581398, 860295.4040565268, 980094.9175170006, 1112068.1463153881, 1257035.6798685808, 1415862.6391328266, 1589468.1351378255, 1778839.7877655022, 1985057.6930205086, 2209338.09186709, 2453124.6502311486, 2718322.7103527053, 3008161.494037711] P_bubbles_expect = [1.6235349125052008, 4.093581157610554, 9.575333470191898, 20.9520396276609, 43.19443917687544, 84.41963574404814, 157.25506477949756, 280.5086157382652, 481.11896195432473, 796.3336480902728, 1276.039624284318, 1985.1548551180522, 3005.982245837768, 4440.428884348238, 6412.003702807469, 9067.523338810985, 12578.476805989876, 17142.0220330821, 22981.609086464254, 30347.244137353322, 39515.42378295007, 50788.78061416531, 64495.487917554194, 80988.47447946836, 100644.50024948097, 123863.1408495057, 151065.7243154966, 182694.25768005836, 219210.37457192744, 261094.32829701997, 308844.0480934111, 362974.2694768574, 424015.74270421825, 492514.5160789749, 569031.2825613177, 654140.7680276675, 748431.1260918825, 852503.2852406674, 966970.1650283068, 1092455.6319079874, 1229592.988381081, 1379022.655075611, 1541388.4595460077, 1717331.4675977635, 1907479.294701055, 2112426.5546414126, 2332696.247164788, 2568654.2410637783, 2820281.571897286, 3086319.669072729] for T in Ts[2:]: pkg.flash(T=T, VF=0, zs=zs) P_bubbles.append(pkg.P) pkg.flash(T=T, VF=1, zs=zs) P_dews.append(pkg.P) assert_allclose(P_bubbles, P_bubbles_expect[2:], rtol=5e-5) assert_allclose(P_dews, P_dews_expect[2:], rtol=5e-5) # For each point, solve it as a T problem. for P, T in zip(P_bubbles, Ts[2:]): pkg.flash(P=P, VF=0, zs=zs) assert_allclose(pkg.T, T, rtol=5e-5) for P, T in zip(P_dews, Ts[2:]): pkg.flash(P=P, VF=1, zs=zs) assert_allclose(pkg.T, T, rtol=5e-5) P_dews_almost = [] P_bubbles_almost = [] for T in Ts[4:]: # Some convergence issues in sequential_substitution_VL at lower pressures pkg.flash(T=T, VF=0+1e-9, zs=zs) P_bubbles_almost.append(pkg.P) pkg.flash(T=T, VF=1-1e-9, zs=zs) P_dews_almost.append(pkg.P) assert_allclose(P_bubbles[2:], P_bubbles_almost, rtol=5e-5) assert_allclose(P_dews[2:], P_dews_almost, rtol=5e-5) # Some points fail here too! for P, T in zip(P_dews_expect[4:-1], Ts[4:-1]): pkg.flash(P=P, VF=1-1e-9, zs=zs) assert_allclose(P, pkg.P) for P, T in zip(P_bubbles_expect[2:-2], Ts[2:-2]): pkg.flash(P=P, VF=0+1e-9, zs=zs) assert_allclose(P, pkg.P) @pytest.mark.deprecated @pytest.mark.slow def test_PVF_parametric_binary_vs_CoolProp(): import CoolProp.CoolProp as CP zs = [0.4, 0.6] m = Mixture(['Ethane', 'Heptane'], zs=zs, T=300, P=1E6) kij = .0067 kijs = [[0,kij],[kij,0]] Tcs = [305.322, 540.13] Pcs = [4872200.0, 2736000.0] omegas = [0.099, 0.349] c1, c2 = PRMIX.c1, PRMIX.c2 PRMIX.c1, PRMIX.c2 = 0.45724, 0.07780 pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) pkg.FLASH_VF_TOL = 1e-12 AS = CP.AbstractState("PR", "Ethane&Heptane") AS.set_mole_fractions(zs) AS.set_binary_interaction_double(0,1,"kij", kij) Ps = [10, 100, 1000, 1e4, 5e4, 1e5, 5e5, 1e6, 2e6] for P in Ps: # Up above 2e6, issues arise in thermo VFs = linspace(0, 1) CP_Ts = [] Ts_calc = [] for VF in VFs: try: AS.update(CP.PQ_INPUTS, P, VF); CP_Ts.append(AS.T()) pkg.flash(VF=VF, P=P, zs=zs) Ts_calc.append(pkg.T) except Exception as e: print(VF, e) # print(CP_Ts/np.array(Ts_calc)) # the low pressure and highest pressure regions are the greatest errors # can go down to 1e-6 tol for all, most are 1e-12 assert_allclose(CP_Ts, Ts_calc, rtol=1e-5) PRMIX.c1, PRMIX.c2 = c1, c2 @pytest.mark.deprecated @pytest.mark.slow def test_PVF_parametric_binary_zs_vs_CoolProp(): '''More advanced test of the above. Changes mole fractions. To get more errors, reduce the mole fractions; and wide the P range. ''' import CoolProp.CoolProp as CP zs = [0.4, 0.6] m = Mixture(['Ethane', 'Heptane'], zs=zs, T=300, P=1E6) kij = .0067 kijs = [[0,kij],[kij,0]] Tcs = [305.322, 540.13] Pcs = [4872200.0, 2736000.0] omegas = [0.099, 0.349] c1, c2 = PRMIX.c1, PRMIX.c2 PRMIX.c1, PRMIX.c2 = 0.45724, 0.07780 pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) pkg.FLASH_VF_TOL = 1e-12 AS = CP.AbstractState("PR", "Ethane&Heptane") AS.set_binary_interaction_double(0,1,"kij", kij) zis = linspace(.01, .98, 5) for zi in zis: zs = [1-zi, zi] Ps = [100, 1000, 1e4, 5e4, 1e5, 5e5, 1e6] for P in Ps: # Up above 2e6, issues arise in thermo VFs = linspace(0, 1) CP_Ts = [] Ts_calc = [] for VF in VFs: try: AS.set_mole_fractions(zs) AS.update(CP.PQ_INPUTS, P, VF); CP_Ts.append(AS.T()) pkg.flash(VF=VF, P=P, zs=zs) Ts_calc.append(pkg.T) except Exception as e: print(zi, P, VF, e) # try: # print(CP_Ts/np.array(Ts_calc)) # except: # print('bad shape') assert_allclose(CP_Ts, Ts_calc, rtol=1e-5) PRMIX.c1, PRMIX.c2 = c1, c2 @pytest.mark.deprecated @pytest.mark.xfail def test_failing_sequential_subs(): zs = [0.8168, 0.1501, 0.0331] m = Mixture(['n-pentane', 'n-hexane', 'heptane'], zs=zs, T=300, P=1E6) kijs = [[0, 0.00076, 0.00171], [0.00076, 0, 0.00061], [0.00171, 0.00061, 0]] Tcs = [469.7, 507.6, 540.2] Pcs = [3370000.0, 3025000.0, 2740000.0] omegas = [0.251, 0.2975, 0.3457] pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) pkg.to_TP_zs(T=180, P=4, zs=zs).sequential_substitution_VL(maxiter=10,xtol=1E-7) @pytest.mark.deprecated def test_PRMIX_pkg_H(): zs = [0.4, 0.6] m = Mixture(['Ethane', 'Heptane'], zs=zs, T=300, P=1E6) kij = .0 kijs = [[0,kij],[kij,0]] Tcs = [305.322, 540.13] Pcs = [4872200.0, 2736000.0] omegas = [0.099, 0.349] pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) pkg.FLASH_VF_TOL = 1e-12 # Case gas -gas pressure difference pkg.flash(T=450, P=400, zs=m.zs) H1 = pkg.Hm assert pkg.phase == 'g' pkg.flash(T=450, P=1e6, zs=m.zs) H2 = pkg.Hm assert pkg.phase == 'g' assert_allclose(H1 - H2, 1638.19303081, rtol=1e-3) # Case gas to VF= = 0 at same T pkg.flash(T=350, P=400, zs=m.zs) assert pkg.phase == 'g' H1 = pkg.Hm pkg.flash(T=350, VF=.5, zs=m.zs) assert pkg.phase == 'l/g' H2 = pkg.Hm assert_allclose(H1 - H2, 16445.143155, rtol=1e-3) # Higher pressure, less matching (gas constant diff probably; gas-liquid difference! No partial phase.) pkg.flash(T=450, P=400, zs=m.zs) assert pkg.phase == 'g' H1 = pkg.Hm pkg.flash(T=450, P=1e8, zs=m.zs) assert pkg.phase == 'l' H2 = pkg.Hm H1 - H2 assert_allclose(H1 - H2, 13815.6666172, rtol=1e-3) # low P fluid to saturation pressure (both gas) pkg.flash(T=450, P=400, zs=m.zs) assert pkg.phase == 'g' H1 = pkg.Hm H1 = pkg.Hm pkg.flash(T=450, VF=1, zs=m.zs) assert pkg.phase == 'g' H2 = pkg.Hm H2 = pkg.Hm assert_allclose(H1 - H2, 2003.84468984, rtol=1e-3) # low pressure gas to liquid saturated pkg.flash(T=350, P=400, zs=m.zs) assert pkg.phase == 'g' H1 = pkg.Hm pkg.flash(T=350, VF=0, zs=m.zs) assert pkg.phase == 'l' H2 = pkg.Hm assert_allclose(H1 - H2, 23682.3468207, rtol=1e-3) # High pressure liquid to partial evaporation pkg.flash(T=350, P=3e6, zs=m.zs) assert pkg.phase == 'l' H1 = pkg.Hm pkg.flash(T=350, VF=.25, zs=m.zs) assert pkg.phase == 'l/g' H2 = pkg.Hm assert_allclose(H1 - H2, -2328.21259061, rtol=1e-3) # High pressure temperature change pkg.flash(T=300, P=3e6, zs=m.zs) assert pkg.phase == 'l' H1 = pkg.Hm pkg.flash(T=400, P=1e7, zs=m.zs) assert pkg.phase == 'l' H2 = pkg.Hm assert_allclose(H1 - H2, -18470.2994798, rtol=1e-3) # High pressure temperature change and phase change pkg.flash(T=300, P=3e6, zs=m.zs) assert pkg.phase == 'l' H1 = pkg.Hm pkg.flash(T=400, P=1e5, zs=m.zs) assert pkg.phase == 'g' H2 = pkg.Hm H1 - H2 assert_allclose(H1 - H2, -39430.7145672, rtol=1e-3) @pytest.mark.deprecated def test_PRMIX_pkg_S(): zs = [0.4, 0.6] m = Mixture(['Ethane', 'Heptane'], zs=zs, T=300, P=1E6) kij = .0 kijs = [[0,kij],[kij,0]] Tcs = [305.322, 540.13] Pcs = [4872200.0, 2736000.0] omegas = [0.099, 0.349] pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) pkg.FLASH_VF_TOL = 1e-12 # Case gas -gas pressure difference pkg.flash(T=450, P=400, zs=m.zs) S1 = pkg.Sm assert pkg.phase == 'g' pkg.flash(T=450, P=1e6, zs=m.zs) S2 = pkg.Sm assert pkg.phase == 'g' assert_allclose(S1 - S2, 67.59095157604824, rtol=1e-3) # Case gas to VF= = 0 at same T pkg.flash(T=350, P=400, zs=m.zs) assert pkg.phase == 'g' S1 = pkg.Sm pkg.flash(T=350, VF=.5, zs=m.zs) assert pkg.phase == 'l/g' S2 = pkg.Sm assert_allclose(S1 - S2, 96.84959621651315, rtol=1e-3) # Higher pressure, less matching (gas constant diff probably; gas-liquid difference! No partial phase.) pkg.flash(T=450, P=400, zs=m.zs) assert pkg.phase == 'g' S1 = pkg.Sm pkg.flash(T=450, P=1e8, zs=m.zs) assert pkg.phase == 'l' S2 = pkg.Sm S1 - S2 assert_allclose(S1 - S2, 128.67194096593366, rtol=1e-3) # low P fluid to saturation pressure (both gas) pkg.flash(T=450, P=400, zs=m.zs) assert pkg.phase == 'g' H1 = pkg.Hm S1 = pkg.Sm pkg.flash(T=450, VF=1, zs=m.zs) assert pkg.phase == 'g' H2 = pkg.Hm S2 = pkg.Sm assert_allclose(S1 - S2, 69.64345358808025, rtol=1e-3) # low pressure gas to liquid saturated pkg.flash(T=350, P=400, zs=m.zs) assert pkg.phase == 'g' S1 = pkg.Sm pkg.flash(T=350, VF=0, zs=m.zs) assert pkg.phase == 'l' S2 = pkg.Sm assert_allclose(S1 - S2, 124.44419797042649, rtol=1e-3) # High pressure liquid to partial evaporation pkg.flash(T=350, P=3e6, zs=m.zs) assert pkg.phase == 'l' S1 = pkg.Sm pkg.flash(T=350, VF=.25, zs=m.zs) assert pkg.phase == 'l/g' S2 = pkg.Sm assert_allclose(S1 - S2, -7.913399921816193, rtol=1e-3) # High pressure temperature change pkg.flash(T=300, P=3e6, zs=m.zs) assert pkg.phase == 'l' S1 = pkg.Sm pkg.flash(T=400, P=1e7, zs=m.zs) assert pkg.phase == 'l' S2 = pkg.Sm assert_allclose(S1 - S2, -50.38050604000216, atol=1) # High pressure temperature change and phase change pkg.flash(T=300, P=3e6, zs=m.zs) assert pkg.phase == 'l' S1 = pkg.Sm pkg.flash(T=400, P=1e5, zs=m.zs) assert pkg.phase == 'g' S2 = pkg.Sm S1 - S2 assert_allclose(S1 - S2, -124.39457107124854, atol=1) @pytest.mark.deprecated def test_PRMIX_pkg_extras(): # TODO add more properties as they are added zs = [0.4, 0.6] m = Mixture(['Ethane', 'Heptane'], zs=zs, T=300, P=1E6) kij = .0 kijs = [[0,kij],[kij,0]] Tcs = [305.322, 540.13] Pcs = [4872200.0, 2736000.0] omegas = [0.099, 0.349] pkg = GceosBase(eos_mix=PRMIX, VaporPressures=m.VaporPressures, Tms=m.Tms, Tbs=m.Tbs, Tcs=Tcs, Pcs=Pcs, omegas=omegas, kijs=kijs, eos_kwargs=None, HeatCapacityGases=m.HeatCapacityGases) pkg.flash(T=400, P=1e5, zs=m.zs) assert 'g' == pkg.phase assert_allclose(pkg.eos_g.H_dep_g, -179.77096245871508, rtol=1e-5) assert_allclose(pkg.eos_g.S_dep_g, -0.2971318950892263, rtol=1e-5) assert_allclose(pkg.Hgm_dep, -179.77096245871508, rtol=5e-5) assert_allclose(pkg.Sgm_dep, -0.2971318950892263, rtol=5e-5) assert_allclose(pkg.Cpgm, 153.32126587681677, rtol=1e-3) assert_allclose(pkg.Cvgm, 144.3920626710827, rtol=1e-3) # :) assert_allclose(pkg.Cpgm_dep, 0.7139646058820279, rtol=1e-5) assert_allclose(pkg.Cvgm_dep, 0.09922120014794993, rtol=1e-5) #? maybe issue pkg.flash(T=300, P=1e7, zs=m.zs) assert 'l' == pkg.phase assert_allclose(pkg.eos_l.H_dep_l, -25490.54123032457, rtol=5e-5) assert_allclose(pkg.eos_l.S_dep_l, -48.47646403887194, rtol=5e-5) assert_allclose(pkg.Hlm_dep, -25490.54123, rtol=1e-4) assert_allclose(pkg.Slm_dep, -48.47646403887194, rtol=1e-4) assert_allclose(pkg.Cplm, 160.5756363050434, rtol=1e-3) assert_allclose(pkg.Cvlm, 133.7943922248561, rtol=1e-3) # :) assert_allclose(pkg.Cplm_dep, 39.8813153015303, rtol=5e-5) assert_allclose(pkg.Cvlm_dep, 21.414531021342995, rtol=5e-5) #? maybe issue @pytest.mark.deprecated def test_azeotrope_Txy_PR(): IDs = ['ethanol', 'benzene'] pkg = PropertyPackageConstants(IDs, name=PR_PKG) pkg.pkg.kijs = [[0.0, .0728], [0.0728, 0]] # Test the pressure in the test _, _, Tbubbles, Tdews = pkg.pkg.plot_Txy(P=101325., pts=30, values=True) Tbubbles_expect = [353.1524424999673, 351.21711105215405, 349.63220641849136, 348.3290291072549, 347.2552443556649, 346.37022614955663, 345.6419123814478, 345.0446351984003, 344.55759626315887, 344.16377920005266, 343.84916614883053, 343.60217197943285, 343.41323969870245, 343.2745605540422, 343.1798963139651, 343.12449170081203, 343.1050736632354, 343.1199423771055, 343.169167658216, 343.2549149821879, 343.38193882073034, 343.5582990521058, 343.7963805186986, 344.1143278723936, 344.53804741377195, 345.1039685436253, 345.8627772097754, 346.88426937346605, 348.26317130456636, 350.12491594342015] Tdews_expect = [353.1524424945457, 352.3912913474467, 351.6262944570331, 350.8588218276585, 350.0906535909099, 349.32409993796796, 348.56216098024134, 347.8087416697709, 347.0689431804551, 346.349459873305, 345.6591224986107, 345.00963438553083, 344.4165436003679, 343.90042076441017, 343.4879384830795, 343.21166686886806, 343.10538604291753, 343.1904450269102, 343.4583142995908, 343.8715382698287, 344.38531268086734, 344.96341038590646, 345.5807576414249, 346.22080282099756, 346.8726671468842, 347.52913516661, 348.18536889289476, 348.83809921197854, 349.4851121234294, 350.1249159362295] assert_allclose(Tbubbles, Tbubbles_expect, rtol=5e-5) assert_allclose(Tdews, Tdews_expect, rtol=5e-5) @pytest.mark.deprecated def test_azeotrope_Txy_PR_multiP(): IDs = ['ethanol', 'benzene'] pkg = PropertyPackageConstants(IDs, name=PR_PKG) pkg.pkg.kijs = [[0.0, .0728], [0.0728, 0]] #Test some more pressures for good measure (do not go too far near the boundaries) Tdews_vary_P_expect = [[220.15284322119734, 219.96736090890047, 222.4531025319982, 225.87591713961928, 228.38731541934482, 230.38394741856035, 232.04763019651986, 233.47741573028978, 234.73343380218137, 235.85502051831918, 236.8693632699694, 237.79606282049812, 238.6497311937851, 239.441561771029, 240.18032475929877], [250.29484272442642, 249.8077093695365, 249.42498602603337, 249.28679137901344, 251.96383913156598, 254.37995372490553, 256.413822517376, 258.1732216294687, 259.72617151880036, 261.1180840150342, 262.38075082537034, 263.53729905772974, 264.6050861496727, 265.5974792849115, 266.5249972102388], [291.6640151659878, 290.7705630707953, 289.9138195863271, 289.1364722089608, 288.52440010361823, 288.2911650820978, 289.2699169291151, 291.1074635611929, 292.88383384665804, 294.5345675748379, 296.06323507541447, 297.48280466114016, 298.8066120886574, 300.0464992568524, 301.2125736864664], [352.7187334210476, 351.14318764286776, 349.5582311684951, 347.9830060760723, 346.4505096140636, 345.01844631869784, 343.78971773705734, 342.946979856768, 342.761997697104, 343.3571699367641, 344.44973552643745, 345.7436125329223, 347.09887074851576, 348.456160479165, 349.78950974944104], [452.0244773102955, 448.93347954186527, 445.80843092367013, 442.6587093618919, 439.501298156353, 436.36519982883647, 433.2983005305142, 430.3773519002321, 427.7197877701338, 425.488417876116, 423.8636548918616, 422.9595322281223, 422.7424527930051, 423.0631573964071, 423.755679832123]] Tbubbles_vary_P_expect = [[220.15284322260558, 219.9593754659149, 219.97616818101181, 220.06166994291502, 220.11857644484724, 220.07120074079083, 219.85507103807385, 219.41277280716295, 218.69492646979015, 217.668864547681, 216.34458685271593, 214.85030366123252, 213.69173078234607, 215.05360231675624, 240.18032476043962], [250.29484272636603, 249.6034873812954, 249.3302912913737, 249.28015720321142, 249.32976951168072, 249.39258345431227, 249.40319499276376, 249.31112373153306, 249.08166823900348, 248.70647278451116, 248.234802114006, 247.86101246500266, 248.19942715491368, 251.37880207972458, 266.5249972119777], [291.6640151695664, 289.98349864091705, 289.03830617940577, 288.5439451223825, 288.32964841249037, 288.2845537334007, 288.33357406890497, 288.4270830783461, 288.5394805139744, 288.6772197151695, 288.90297364622535, 289.39555977182874, 290.6007278361622, 293.62911489553994, 301.2125736895028], [352.71873342667294, 349.111737745725, 346.7112146623439, 345.1062784039534, 344.0464541688307, 343.37007653346484, 342.96929893428904, 342.772655513814, 342.7375808864912, 342.85043503480443, 343.1348957652109, 343.67328993658566, 344.6514600873893, 346.44748552216527, 349.78950975665305], [452.0244773382353, 444.9894726934088, 439.5814848561124, 435.3968758569498, 432.1385100677904, 429.5895584761499, 427.5915768205421, 426.02882099085946, 424.8179436451578, 423.90205997868895, 423.2487129719721, 422.85201924536585, 422.7401871273592, 422.99064457143066, 423.7556798321766]] Tdews_vary_P = [] Tbubbles_vary_P = [] # pkg.pkg.plot_Txy(P=100, pts=100) # values=True for P in logspace(2, 6, 5): _, _, Tbubbles, Tdews = pkg.pkg.plot_Txy(P=P, pts=15, values=True) Tbubbles_vary_P.append(Tbubbles) Tdews_vary_P.append(Tdews) assert_allclose(Tbubbles_vary_P, Tbubbles_vary_P_expect, rtol=1e-5) assert_allclose(Tdews_vary_P, Tdews_vary_P_expect, rtol=1e-5) @pytest.mark.deprecated def test_azeotrope_Pxy_PR_multiT(): IDs = ['ethanol', 'benzene'] pkg = PropertyPackageConstants(IDs, name=PR_PKG) pkg.pkg.kijs = [[0.0, .0728], [0.0728, 0]] Ts = [220, 250, 300, 350, 400, 450, 475, 450, 500, 505, 507.5] Ps_bubble_multi_T_expect = [[2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672], [2413788.5246687443, 2641798.365260112, 2861327.402278104, 3072725.7261532187, 3276386.430826817, 3472735.47268973, 3662217.055850461, 3845273.4025023617, 4022317.494062474, 4193697.0600327696, 4359647.812662887, 4520233.860544795, 4675273.823398787, 4824253.350470867, 4966230.2319129715, 5099749.351949349, 5222801.450719186, 5332871.261916157, 5427095.16804713, 5502455.831709672]] Ps_dew_multi_T_expect = [[2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671], [2413788.5246687448, 2528934.820505938, 2653201.630551055, 2787361.571970873, 2932152.5813825456, 3088204.613722783, 3255927.030569813, 3435346.2652946324, 3625890.6578108566, 3826139.6891573607, 4033592.988357248, 4244562.319550634, 4454311.19552944, 4657505.6804501135, 4848882.834798467, 5023886.247109449, 5179004.275036525, 5311703.561411566, 5420071.437922198, 5502455.831709671]] Ps_bubble_multi_T, Ps_dew_multi_T = [], [] for T in Ts: _, _, Ps_bubble, Ps_dew = pkg.pkg.plot_Pxy(T=507.5, pts=20, ignore_errors=True, values=True) Ps_bubble_multi_T.append(Ps_bubble) Ps_dew_multi_T.append(Ps_dew) assert_allclose(Ps_bubble_multi_T_expect, Ps_bubble_multi_T, rtol=1e-6) assert_allclose(Ps_dew_multi_T_expect, Ps_dew_multi_T, rtol=1e-6) @pytest.mark.deprecated def test_phase_envelope_ethane_pentane(): IDs = ['ethane', 'n-pentane'] pkg = PropertyPackageConstants(IDs, PR_PKG, kijs=[[0, 7.609447e-003], [7.609447e-003, 0]]) zs = [0.7058334393128614, 0.2941665606871387] # 50/50 mass basis max_step_damping = 100 P_high = 8e6 factor = 1.2 min_step_termination = 1000 min_factor_termination = 1.001 pkg.pkg.FLASH_VF_TOL = 1e-8 max_P_step = 1e5 P_low = 1e5 spec_points = linspace(1e5, 6.8e6, 68) P_points, Ts_known, xs_known = pkg.pkg.dew_T_envelope(zs, P_low=P_low, P_high=P_high, xtol=1E-10, factor=factor, min_step_termination=min_step_termination, min_factor_termination=min_factor_termination, max_step_damping=max_step_damping, max_P_step=max_P_step, spec_points=spec_points) P_points2, Ts_known2, ys_known = pkg.pkg.bubble_T_envelope(zs, P_low=P_low, P_high=P_high, xtol=1E-10, factor=factor, min_step_termination=min_step_termination, max_step_damping=max_step_damping, min_factor_termination=min_factor_termination, max_P_step=max_P_step, spec_points=spec_points) Ps_dew_check = [] Ts_dew_check = [] Ts_dew_expect = [277.1449361694948, 293.9890986702753, 304.8763147090649, 313.1006603531763, 319.7750626828419, 325.42150966613895, 330.32990856864086, 334.6791912532372, 338.58812791519466, 342.13987634031974, 345.3950895854326, 348.39946023112896, 351.1883247302556, 353.7896091573966, 356.22578835719867, 358.51523195418594, 360.673155009561, 362.71230559820697, 364.64347249145516, 366.47586686424677, 368.21741391309746, 369.8749788315006, 371.4545441481416, 372.96135047685806, 374.40000935978657, 375.774594553273, 377.0887164639959, 378.3455832681606, 379.54805139424366, 380.698667422777, 381.7997029894565, 382.8531839253168, 383.8609145986068, 384.824498212078, 385.74535365141924, 386.6247293397855, 387.4637144549204, 388.2632477701861, 389.02412430395054, 389.7469998909282, 390.4323937166012, 391.08068879770735, 391.69213029960156, 392.2668215113749, 392.8047171889375, 393.30561384008115, 393.76913637985547, 394.19472032380185, 394.58158839582626, 394.9287200011976, 395.2348113354687, 395.4982230372662, 395.71691081859336, 395.8883324260842, 396.0093207511565, 396.0759073750358, 396.0830711573792, 396.024369487178, 395.8913790901176, 395.67280294095485, 395.3529926936849, 394.9092730479461, 394.3067055020046, 393.48636807223045, 392.33342385249546, 390.55261457054587] for P_dew, T_dew in zip(P_points, Ts_known): if abs(P_dew % 1e5) < 1e-5: Ps_dew_check.append(P_dew) Ts_dew_check.append(T_dew) Ps_bubble_check = [] Ts_bubble_check = [] Ts_bubble_expect = [277.1449361694948, 293.9890986702753, 304.8763147090649, 313.1006603531763, 319.7750626828419, 325.42150966613895, 330.32990856864086, 334.6791912532372, 338.58812791519466, 342.13987634031974, 345.3950895854326, 348.39946023112896, 351.1883247302556, 353.7896091573966, 356.22578835719867, 358.51523195418594, 360.673155009561, 362.71230559820697, 364.64347249145516, 366.47586686424677, 368.21741391309746, 369.8749788315006, 371.4545441481416, 372.96135047685806, 374.40000935978657, 375.774594553273, 377.0887164639959, 378.3455832681606, 379.54805139424366, 380.698667422777, 381.7997029894565, 382.8531839253168, 383.8609145986068, 384.824498212078, 385.74535365141924, 386.6247293397855, 387.4637144549204, 388.2632477701861, 389.02412430395054, 389.7469998909282, 390.4323937166012, 391.08068879770735, 391.69213029960156, 392.2668215113749, 392.8047171889375, 393.30561384008115, 393.76913637985547, 394.19472032380185, 394.58158839582626, 394.9287200011976, 395.2348113354687, 395.4982230372662, 395.71691081859336, 395.8883324260842, 396.0093207511565, 396.0759073750358, 396.0830711573792, 396.024369487178, 395.8913790901176, 395.67280294095485, 395.3529926936849, 394.9092730479461, 394.3067055020046, 393.48636807223045, 392.33342385249546, 390.55261457054587] for P_bubble, T_bubble in zip(P_points, Ts_known): if abs(P_bubble % 1e5) < 1e-5: Ps_bubble_check.append(P_bubble) Ts_bubble_check.append(T_bubble) assert_allclose(Ps_bubble_check, spec_points[:-2]) assert_allclose(Ps_dew_check, spec_points[:-2]) assert_allclose(Ts_dew_check, Ts_dew_expect, rtol=1e-5) assert_allclose(Ts_bubble_check, Ts_bubble_expect, rtol=1e-5) @pytest.mark.deprecated def test_ethane_pentane_TP_Tdew_Tbubble_TP(): # Takes 9 seconds! IDs = ['ethane', 'n-pentane'] pkg = PropertyPackageConstants(IDs, PR_PKG, kijs=[[0, 7.609447e-003], [7.609447e-003, 0]]) zs = [0.7058334393128614, 0.2941665606871387] # 50/50 mass basis pkg = pkg.pkg VFs = [] all_Ts = [] all_Ps = [] P_high = 6.1e6 # goal: 6e6 It worked! P_low = 1e3 Ps = logspace(log10(P_low), log10(P_high), 50) T_lows = [] T_highs = [] for P in Ps: pkg.flash(P=P, VF=0, zs=zs) T_low = pkg.T # 129 K T_lows.append(T_low) pkg.flash(P=P, VF=1, zs=zs) T_high = pkg.T # 203 K T_highs.append(T_high) for Wilson_first in (False, True): VFs_working = [] Ts = linspace(T_low+1e-4, T_high-1e-4, 50) for T in Ts: ans = pkg.flash_TP_zs(P=P, T=T, zs=zs, Wilson_first=Wilson_first) VFs_working.append(ans[-1]) if ans[0] != 'l/g': raise ValueError("Converged to single phase solution at T=%g K, P=%g Pa" %(T, P)) VFs.append(VFs_working) all_Ts.append(Ts) all_Ps.append(Ps) @pytest.mark.deprecated @pytest.mark.slow_envelope def test_phase_envelope_44_components(): IDs = ['nitrogen', 'carbon dioxide', 'H2S', 'methane', 'ethane', 'propane', 'isobutane', 'butane', 'isopentane', 'pentane', 'Hexane', 'Heptane', 'Octane', 'Nonane', 'Decane', 'Undecane', 'Dodecane', 'Tridecane', 'Tetradecane', 'Pentadecane', 'Hexadecane', 'Heptadecane', 'Octadecane', 'Nonadecane', 'Eicosane', 'Heneicosane', 'Docosane', 'Tricosane', 'Tetracosane', 'Pentacosane', 'Hexacosane', 'Heptacosane', 'Octacosane', 'Nonacosane', 'Triacontane', 'Benzene', 'Toluene', 'Ethylbenzene', 'Xylene', '1,2,4-Trimethylbenzene', 'Cyclopentane', 'Methylcyclopentane', 'Cyclohexane', 'Methylcyclohexane'] zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682] pkg = PropertyPackageConstants(IDs, PR_PKG) max_step_damping = 50 P_low = 1e4 factor = 1.2 min_step_termination = 1000 min_factor_termination = 1.0002 pkg.pkg.FLASH_VF_TOL = 1e-8 P_high = 2e8 spec_points = linspace(1e5, 4e6, 40) P_points, Ts_known, xs_known = pkg.pkg.dew_T_envelope(zs, P_low=P_low, P_high=P_high, xtol=1E-10, factor=factor, min_step_termination=min_step_termination, min_factor_termination=min_factor_termination, max_step_damping=max_step_damping, spec_points=spec_points ) P_points2, Ts_known2, ys_known = pkg.pkg.bubble_T_envelope(zs, P_low=P_low, P_high=P_high, xtol=1E-10, factor=factor, min_step_termination=min_step_termination, max_step_damping=max_step_damping, min_factor_termination=min_factor_termination, spec_points=spec_points ) Ps_dew_check = [] Ts_dew_check = [] Ts_dew_expect = [585.1745093521665, 609.5133715138915, 624.6944734390993, 635.7991119723131, 644.5334850169733, 651.6941060581852, 657.7213913216676, 662.8858558611348, 667.3660286752593, 671.2860034847065, 674.7354375617153, 677.7810270676093, 680.4734809440047, 682.8519536806468, 684.9469622199979, 686.7823540873131, 688.3766543470003, 689.7439863506575, 690.8946833742955, 691.8356590318011, 692.5705695910872, 693.0997717010517, 693.4200465117376, 693.5240144469666, 693.399082494406, 693.0255964253895, 692.3734715991103, 691.3954910689196, 690.0119359589117, 688.0668235519908, 685.1543692400655, 679.0864243340858] for P_dew, T_dew in zip(P_points, Ts_known): if abs(P_dew % 1e5) < 1e-5: Ps_dew_check.append(P_dew) Ts_dew_check.append(T_dew) Ps_bubble_check = [] Ts_bubble_check = [] Ts_bubble_expect = [585.1745093521665, 609.5133715138915, 624.6944734390993, 635.7991119723131, 644.5334850169733, 651.6941060581852, 657.7213913216676, 662.8858558611348, 667.3660286752593, 671.2860034847065, 674.7354375617153, 677.7810270676093, 680.4734809440047, 682.8519536806468, 684.9469622199979, 686.7823540873131, 688.3766543470003, 689.7439863506575, 690.8946833742955, 691.8356590318011, 692.5705695910872, 693.0997717010517, 693.4200465117376, 693.5240144469666, 693.399082494406, 693.0255964253895, 692.3734715991103, 691.3954910689196, 690.0119359589117, 688.0668235519908, 685.1543692400655, 679.0864243340858] for P_bubble, T_bubble in zip(P_points, Ts_known): if abs(P_bubble % 1e5) < 1e-5: Ps_bubble_check.append(P_bubble) Ts_bubble_check.append(T_bubble) assert_allclose(Ps_bubble_check, spec_points[:-8]) assert_allclose(Ps_dew_check, spec_points[:-8]) assert_allclose(Ts_dew_check, Ts_dew_expect, rtol=1e-5) assert_allclose(Ts_bubble_check, Ts_bubble_expect, rtol=1e-5) @pytest.mark.deprecated def test_TPD_bubble_dew(): IDs = ['ethane', 'n-pentane'] pkg = PropertyPackageConstants(IDs, PR_PKG, kijs=[[0, 7.609447e-003], [7.609447e-003, 0]]) zs = [0.7058334393128614, 0.2941665606871387] # 50/50 mass basis pkg = pkg.pkg pkg.flash(P=1e6, VF=0, zs=zs) pkg.eos_l.fugacities() pkg.eos_g.fugacities() TPD_calc = TPD(pkg.eos_g.T, pkg.eos_l.zs, pkg.eos_l.lnphis_l, pkg.eos_g.zs, pkg.eos_g.lnphis_g,) assert_allclose(TPD_calc, 0, atol=1e-6) pkg.flash(T=200, VF=0, zs=zs) pkg.eos_l.fugacities() pkg.eos_g.fugacities() TPD_calc = TPD(pkg.eos_g.T, pkg.eos_l.zs, pkg.eos_l.lnphis_l, pkg.eos_g.zs, pkg.eos_g.lnphis_g,) assert_allclose(TPD_calc, 0, atol=1e-6) pkg.flash(P=1e6, VF=1, zs=zs) pkg.eos_l.fugacities() pkg.eos_g.fugacities() TPD_calc = TPD(pkg.eos_g.T, pkg.eos_g.zs, pkg.eos_g.lnphis_g, pkg.eos_l.zs, pkg.eos_l.lnphis_l) assert_allclose(TPD_calc, 0, atol=1e-6) pkg.flash(T=300, VF=1, zs=zs) pkg.eos_l.fugacities() pkg.eos_g.fugacities() TPD_calc = TPD(pkg.eos_g.T, pkg.eos_g.zs, pkg.eos_g.lnphis_g, pkg.eos_l.zs, pkg.eos_l.lnphis_l) assert_allclose(TPD_calc, 0, atol=1e-6) @pytest.mark.deprecated def test_stab_comb_products_need_both_roots(): comb_IDs = ['N2', 'CO2', 'O2', 'H2O'] comb_zs = [0.5939849621247668, 0.112781954982051, 0.0676691730155464, 0.2255639098776358] pkg2 = PropertyPackageConstants(comb_IDs, PR_PKG) kijs = [[0.0, -0.0122, -0.0159, 0.0], [-0.0122, 0.0, 0.0, 0.0952], [-0.0159, 0.0, 0.0, 0.0], [0.0, 0.0952, 0.0, 0.0]] pkg2 = PropertyPackageConstants(comb_IDs, PR_PKG, kijs=kijs) pkg2.pkg.flash_caloric(P=1e5,T=794.5305048838037, zs=comb_zs) assert 'g' == pkg2.pkg.phase
CalebBell/thermo
tests/test_property_package_eos.py
thermo/flash/flash_utils.py
""" Provides various throttling policies. """ from __future__ import unicode_literals import time from django.core.cache import cache as default_cache from django.core.exceptions import ImproperlyConfigured from rest_framework.settings import api_settings class BaseThrottle(object): """ Rate throttling of requests. """ def allow_request(self, request, view): """ Return `True` if the request should be allowed, `False` otherwise. """ raise NotImplementedError('.allow_request() must be overridden') def get_ident(self, request): """ Identify the machine making the request by parsing HTTP_X_FORWARDED_FOR if present and number of proxies is > 0. If not use all of HTTP_X_FORWARDED_FOR if it is available, if not use REMOTE_ADDR. """ xff = request.META.get('HTTP_X_FORWARDED_FOR') remote_addr = request.META.get('REMOTE_ADDR') num_proxies = api_settings.NUM_PROXIES if num_proxies is not None: if num_proxies == 0 or xff is None: return remote_addr addrs = xff.split(',') client_addr = addrs[-min(num_proxies, len(addrs))] return client_addr.strip() return ''.join(xff.split()) if xff else remote_addr def wait(self): """ Optionally, return a recommended number of seconds to wait before the next request. """ return None class SimpleRateThrottle(BaseThrottle): """ A simple cache implementation, that only requires `.get_cache_key()` to be overridden. The rate (requests / seconds) is set by a `throttle` attribute on the View class. The attribute is a string of the form 'number_of_requests/period'. Period should be one of: ('s', 'sec', 'm', 'min', 'h', 'hour', 'd', 'day') Previous request information used for throttling is stored in the cache. """ cache = default_cache timer = time.time cache_format = 'throttle_%(scope)s_%(ident)s' scope = None THROTTLE_RATES = api_settings.DEFAULT_THROTTLE_RATES def __init__(self): if not getattr(self, 'rate', None): self.rate = self.get_rate() self.num_requests, self.duration = self.parse_rate(self.rate) def get_cache_key(self, request, view): """ Should return a unique cache-key which can be used for throttling. Must be overridden. May return `None` if the request should not be throttled. """ raise NotImplementedError('.get_cache_key() must be overridden') def get_rate(self): """ Determine the string representation of the allowed request rate. """ if not getattr(self, 'scope', None): msg = ("You must set either `.scope` or `.rate` for '%s' throttle" % self.__class__.__name__) raise ImproperlyConfigured(msg) try: return self.THROTTLE_RATES[self.scope] except KeyError: msg = "No default throttle rate set for '%s' scope" % self.scope raise ImproperlyConfigured(msg) def parse_rate(self, rate): """ Given the request rate string, return a two tuple of: <allowed number of requests>, <period of time in seconds> """ if rate is None: return (None, None) num, period = rate.split('/') num_requests = int(num) duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]] return (num_requests, duration) def allow_request(self, request, view): """ Implement the check to see if the request should be throttled. On success calls `throttle_success`. On failure calls `throttle_failure`. """ if self.rate is None: return True self.key = self.get_cache_key(request, view) if self.key is None: return True self.history = self.cache.get(self.key, []) self.now = self.timer() # Drop any requests from the history which have now passed the # throttle duration while self.history and self.history[-1] <= self.now - self.duration: self.history.pop() if len(self.history) >= self.num_requests: return self.throttle_failure() return self.throttle_success() def throttle_success(self): """ Inserts the current request's timestamp along with the key into the cache. """ self.history.insert(0, self.now) self.cache.set(self.key, self.history, self.duration) return True def throttle_failure(self): """ Called when a request to the API has failed due to throttling. """ return False def wait(self): """ Returns the recommended next request time in seconds. """ if self.history: remaining_duration = self.duration - (self.now - self.history[-1]) else: remaining_duration = self.duration available_requests = self.num_requests - len(self.history) + 1 if available_requests <= 0: return None return remaining_duration / float(available_requests) class AnonRateThrottle(SimpleRateThrottle): """ Limits the rate of API calls that may be made by a anonymous users. The IP address of the request will be used as the unique cache key. """ scope = 'anon' def get_cache_key(self, request, view): if request.user.is_authenticated: return None # Only throttle unauthenticated requests. return self.cache_format % { 'scope': self.scope, 'ident': self.get_ident(request) } class UserRateThrottle(SimpleRateThrottle): """ Limits the rate of API calls that may be made by a given user. The user id will be used as a unique cache key if the user is authenticated. For anonymous requests, the IP address of the request will be used. """ scope = 'user' def get_cache_key(self, request, view): if request.user.is_authenticated: ident = request.user.pk else: ident = self.get_ident(request) return self.cache_format % { 'scope': self.scope, 'ident': ident } class ScopedRateThrottle(SimpleRateThrottle): """ Limits the rate of API calls by different amounts for various parts of the API. Any view that has the `throttle_scope` property set will be throttled. The unique cache key will be generated by concatenating the user id of the request, and the scope of the view being accessed. """ scope_attr = 'throttle_scope' def __init__(self): # Override the usual SimpleRateThrottle, because we can't determine # the rate until called by the view. pass def allow_request(self, request, view): # We can only determine the scope once we're called by the view. self.scope = getattr(view, self.scope_attr, None) # If a view does not have a `throttle_scope` always allow the request if not self.scope: return True # Determine the allowed request rate as we normally would during # the `__init__` call. self.rate = self.get_rate() self.num_requests, self.duration = self.parse_rate(self.rate) # We can now proceed as normal. return super(ScopedRateThrottle, self).allow_request(request, view) def get_cache_key(self, request, view): """ If `view.throttle_scope` is not set, don't apply this throttle. Otherwise generate the unique cache key by concatenating the user id with the '.throttle_scope` property of the view. """ if request.user.is_authenticated: ident = request.user.pk else: ident = self.get_ident(request) return self.cache_format % { 'scope': self.scope, 'ident': ident }
from __future__ import unicode_literals import django.template.loader import pytest from django.conf.urls import url from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.http import Http404 from django.template import TemplateDoesNotExist, engines from django.test import TestCase, override_settings from django.utils import six from rest_framework import status from rest_framework.decorators import api_view, renderer_classes from rest_framework.renderers import TemplateHTMLRenderer from rest_framework.response import Response @api_view(('GET',)) @renderer_classes((TemplateHTMLRenderer,)) def example(request): """ A view that can returns an HTML representation. """ data = {'object': 'foobar'} return Response(data, template_name='example.html') @api_view(('GET',)) @renderer_classes((TemplateHTMLRenderer,)) def permission_denied(request): raise PermissionDenied() @api_view(('GET',)) @renderer_classes((TemplateHTMLRenderer,)) def not_found(request): raise Http404() urlpatterns = [ url(r'^$', example), url(r'^permission_denied$', permission_denied), url(r'^not_found$', not_found), ] @override_settings(ROOT_URLCONF='tests.test_htmlrenderer') class TemplateHTMLRendererTests(TestCase): def setUp(self): class MockResponse(object): template_name = None self.mock_response = MockResponse() self._monkey_patch_get_template() def _monkey_patch_get_template(self): """ Monkeypatch get_template """ self.get_template = django.template.loader.get_template def get_template(template_name, dirs=None): if template_name == 'example.html': return engines['django'].from_string("example: {{ object }}") raise TemplateDoesNotExist(template_name) def select_template(template_name_list, dirs=None, using=None): if template_name_list == ['example.html']: return engines['django'].from_string("example: {{ object }}") raise TemplateDoesNotExist(template_name_list[0]) django.template.loader.get_template = get_template django.template.loader.select_template = select_template def tearDown(self): """ Revert monkeypatching """ django.template.loader.get_template = self.get_template def test_simple_html_view(self): response = self.client.get('/') self.assertContains(response, "example: foobar") self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') def test_not_found_html_view(self): response = self.client.get('/not_found') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertEqual(response.content, six.b("404 Not Found")) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') def test_permission_denied_html_view(self): response = self.client.get('/permission_denied') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.content, six.b("403 Forbidden")) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') # 2 tests below are based on order of if statements in corresponding method # of TemplateHTMLRenderer def test_get_template_names_returns_own_template_name(self): renderer = TemplateHTMLRenderer() renderer.template_name = 'test_template' template_name = renderer.get_template_names(self.mock_response, view={}) assert template_name == ['test_template'] def test_get_template_names_returns_view_template_name(self): renderer = TemplateHTMLRenderer() class MockResponse(object): template_name = None class MockView(object): def get_template_names(self): return ['template from get_template_names method'] class MockView2(object): template_name = 'template from template_name attribute' template_name = renderer.get_template_names(self.mock_response, MockView()) assert template_name == ['template from get_template_names method'] template_name = renderer.get_template_names(self.mock_response, MockView2()) assert template_name == ['template from template_name attribute'] def test_get_template_names_raises_error_if_no_template_found(self): renderer = TemplateHTMLRenderer() with pytest.raises(ImproperlyConfigured): renderer.get_template_names(self.mock_response, view=object()) @override_settings(ROOT_URLCONF='tests.test_htmlrenderer') class TemplateHTMLRendererExceptionTests(TestCase): def setUp(self): """ Monkeypatch get_template """ self.get_template = django.template.loader.get_template def get_template(template_name): if template_name == '404.html': return engines['django'].from_string("404: {{ detail }}") if template_name == '403.html': return engines['django'].from_string("403: {{ detail }}") raise TemplateDoesNotExist(template_name) django.template.loader.get_template = get_template def tearDown(self): """ Revert monkeypatching """ django.template.loader.get_template = self.get_template def test_not_found_html_view_with_template(self): response = self.client.get('/not_found') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertTrue(response.content in ( six.b("404: Not found"), six.b("404 Not Found"))) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') def test_permission_denied_html_view_with_template(self): response = self.client.get('/permission_denied') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertTrue(response.content in ( six.b("403: Permission denied"), six.b("403 Forbidden"))) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
dmwyatt/django-rest-framework
tests/test_htmlrenderer.py
rest_framework/throttling.py
import itertools import numpy as np import pytest import pandas as pd from pandas.core.internals import ExtensionBlock from .base import BaseExtensionTests class BaseReshapingTests(BaseExtensionTests): """Tests for reshaping and concatenation.""" @pytest.mark.parametrize('in_frame', [True, False]) def test_concat(self, data, in_frame): wrapped = pd.Series(data) if in_frame: wrapped = pd.DataFrame(wrapped) result = pd.concat([wrapped, wrapped], ignore_index=True) assert len(result) == len(data) * 2 if in_frame: dtype = result.dtypes[0] else: dtype = result.dtype assert dtype == data.dtype assert isinstance(result._data.blocks[0], ExtensionBlock) @pytest.mark.parametrize('in_frame', [True, False]) def test_concat_all_na_block(self, data_missing, in_frame): valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1]) na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3]) if in_frame: valid_block = pd.DataFrame({"a": valid_block}) na_block = pd.DataFrame({"a": na_block}) result = pd.concat([valid_block, na_block]) if in_frame: expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])}) self.assert_frame_equal(result, expected) else: expected = pd.Series(data_missing.take([1, 1, 0, 0])) self.assert_series_equal(result, expected) def test_concat_mixed_dtypes(self, data): # https://github.com/pandas-dev/pandas/issues/20762 df1 = pd.DataFrame({'A': data[:3]}) df2 = pd.DataFrame({"A": [1, 2, 3]}) df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category') dfs = [df1, df2, df3] # dataframes result = pd.concat(dfs) expected = pd.concat([x.astype(object) for x in dfs]) self.assert_frame_equal(result, expected) # series result = pd.concat([x['A'] for x in dfs]) expected = pd.concat([x['A'].astype(object) for x in dfs]) self.assert_series_equal(result, expected) # simple test for just EA and one other result = pd.concat([df1, df2]) expected = pd.concat([df1.astype('object'), df2.astype('object')]) self.assert_frame_equal(result, expected) result = pd.concat([df1['A'], df2['A']]) expected = pd.concat([df1['A'].astype('object'), df2['A'].astype('object')]) self.assert_series_equal(result, expected) def test_concat_columns(self, data, na_value): df1 = pd.DataFrame({'A': data[:3]}) df2 = pd.DataFrame({'B': [1, 2, 3]}) expected = pd.DataFrame({'A': data[:3], 'B': [1, 2, 3]}) result = pd.concat([df1, df2], axis=1) self.assert_frame_equal(result, expected) result = pd.concat([df1['A'], df2['B']], axis=1) self.assert_frame_equal(result, expected) # non-aligned df2 = pd.DataFrame({'B': [1, 2, 3]}, index=[1, 2, 3]) expected = pd.DataFrame({ 'A': data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), 'B': [np.nan, 1, 2, 3]}) result = pd.concat([df1, df2], axis=1) self.assert_frame_equal(result, expected) result = pd.concat([df1['A'], df2['B']], axis=1) self.assert_frame_equal(result, expected) def test_align(self, data, na_value): a = data[:3] b = data[2:5] r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) # Assumes that the ctor can take a list of scalars of the type e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype)) e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype)) self.assert_series_equal(r1, e1) self.assert_series_equal(r2, e2) def test_align_frame(self, data, na_value): a = data[:3] b = data[2:5] r1, r2 = pd.DataFrame({'A': a}).align( pd.DataFrame({'A': b}, index=[1, 2, 3]) ) # Assumes that the ctor can take a list of scalars of the type e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value], dtype=data.dtype)}) e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b), dtype=data.dtype)}) self.assert_frame_equal(r1, e1) self.assert_frame_equal(r2, e2) def test_align_series_frame(self, data, na_value): # https://github.com/pandas-dev/pandas/issues/20576 ser = pd.Series(data, name='a') df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) r1, r2 = ser.align(df) e1 = pd.Series(data._from_sequence(list(data) + [na_value], dtype=data.dtype), name=ser.name) self.assert_series_equal(r1, e1) self.assert_frame_equal(r2, df) def test_set_frame_expand_regular_with_extension(self, data): df = pd.DataFrame({"A": [1] * len(data)}) df['B'] = data expected = pd.DataFrame({"A": [1] * len(data), "B": data}) self.assert_frame_equal(df, expected) def test_set_frame_expand_extension_with_regular(self, data): df = pd.DataFrame({'A': data}) df['B'] = [1] * len(data) expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) self.assert_frame_equal(df, expected) def test_set_frame_overwrite_object(self, data): # https://github.com/pandas-dev/pandas/issues/20555 df = pd.DataFrame({"A": [1] * len(data)}, dtype=object) df['A'] = data assert df.dtypes['A'] == data.dtype def test_merge(self, data, na_value): # GH-20743 df1 = pd.DataFrame({'ext': data[:3], 'int1': [1, 2, 3], 'key': [0, 1, 2]}) df2 = pd.DataFrame({'int2': [1, 2, 3, 4], 'key': [0, 0, 1, 3]}) res = pd.merge(df1, df2) exp = pd.DataFrame( {'int1': [1, 1, 2], 'int2': [1, 2, 3], 'key': [0, 0, 1], 'ext': data._from_sequence([data[0], data[0], data[1]], dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) res = pd.merge(df1, df2, how='outer') exp = pd.DataFrame( {'int1': [1, 1, 2, 3, np.nan], 'int2': [1, 2, 3, np.nan, 4], 'key': [0, 0, 1, 2, 3], 'ext': data._from_sequence( [data[0], data[0], data[1], data[2], na_value], dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) def test_merge_on_extension_array(self, data): # GH 23020 a, b = data[:2] key = type(data)._from_sequence([a, b], dtype=data.dtype) df = pd.DataFrame({"key": key, "val": [1, 2]}) result = pd.merge(df, df, on='key') expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]}) self.assert_frame_equal(result, expected) # order result = pd.merge(df.iloc[[1, 0]], df, on='key') expected = expected.iloc[[1, 0]].reset_index(drop=True) self.assert_frame_equal(result, expected) def test_merge_on_extension_array_duplicates(self, data): # GH 23020 a, b = data[:2] key = type(data)._from_sequence([a, b, a], dtype=data.dtype) df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) result = pd.merge(df1, df2, on='key') expected = pd.DataFrame({ "key": key.take([0, 0, 0, 0, 1]), "val_x": [1, 1, 3, 3, 2], "val_y": [1, 3, 1, 3, 2], }) self.assert_frame_equal(result, expected) @pytest.mark.parametrize("columns", [ ["A", "B"], pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b')], names=['outer', 'inner']), ]) def test_stack(self, data, columns): df = pd.DataFrame({"A": data[:5], "B": data[:5]}) df.columns = columns result = df.stack() expected = df.astype(object).stack() # we need a second astype(object), in case the constructor inferred # object -> specialized, as is done for period. expected = expected.astype(object) if isinstance(expected, pd.Series): assert result.dtype == df.iloc[:, 0].dtype else: assert all(result.dtypes == df.iloc[:, 0].dtype) result = result.astype(object) self.assert_equal(result, expected) @pytest.mark.parametrize("index", [ # Two levels, uniform. pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]), names=['a', 'b']), # non-uniform pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('B', 'b')]), # three levels, non-uniform pd.MultiIndex.from_product([('A', 'B'), ('a', 'b', 'c'), (0, 1, 2)]), pd.MultiIndex.from_tuples([ ('A', 'a', 1), ('A', 'b', 0), ('A', 'a', 0), ('B', 'a', 0), ('B', 'c', 1), ]), ]) @pytest.mark.parametrize("obj", ["series", "frame"]) def test_unstack(self, data, index, obj): data = data[:len(index)] if obj == "series": ser = pd.Series(data, index=index) else: ser = pd.DataFrame({"A": data, "B": data}, index=index) n = index.nlevels levels = list(range(n)) # [0, 1, 2] # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] combinations = itertools.chain.from_iterable( itertools.permutations(levels, i) for i in range(1, n) ) for level in combinations: result = ser.unstack(level=level) assert all(isinstance(result[col].array, type(data)) for col in result.columns) expected = ser.astype(object).unstack(level=level) result = result.astype(object) self.assert_frame_equal(result, expected)
import datetime import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples from pandas.util import hash_array, hash_pandas_object import pandas.util.testing as tm @pytest.fixture(params=[ Series([1, 2, 3] * 3, dtype="int32"), Series([None, 2.5, 3.5] * 3, dtype="float32"), Series(["a", "b", "c"] * 3, dtype="category"), Series(["d", "e", "f"] * 3), Series([True, False, True] * 3), Series(pd.date_range("20130101", periods=9)), Series(pd.date_range("20130101", periods=9, tz="US/Eastern")), Series(pd.timedelta_range("2000", periods=9))]) def series(request): return request.param @pytest.fixture(params=[True, False]) def index(request): return request.param def _check_equal(obj, **kwargs): """ Check that hashing an objects produces the same value each time. Parameters ---------- obj : object The object to hash. kwargs : kwargs Keyword arguments to pass to the hashing function. """ a = hash_pandas_object(obj, **kwargs) b = hash_pandas_object(obj, **kwargs) tm.assert_series_equal(a, b) def _check_not_equal_with_index(obj): """ Check the hash of an object with and without its index is not the same. Parameters ---------- obj : object The object to hash. """ if not isinstance(obj, Index): a = hash_pandas_object(obj, index=True) b = hash_pandas_object(obj, index=False) if len(obj): assert not (a == b).all() def test_consistency(): # Check that our hash doesn't change because of a mistake # in the actual code; this is the ground truth. result = hash_pandas_object(Index(["foo", "bar", "baz"])) expected = Series(np.array([3600424527151052760, 1374399572096150070, 477881037637427054], dtype="uint64"), index=["foo", "bar", "baz"]) tm.assert_series_equal(result, expected) def test_hash_array(series): arr = series.values tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr)) @pytest.mark.parametrize("arr2", [ np.array([3, 4, "All"]), np.array([3, 4, "All"], dtype=object), ]) def test_hash_array_mixed(arr2): result1 = hash_array(np.array(["3", "4", "All"])) result2 = hash_array(arr2) tm.assert_numpy_array_equal(result1, result2) @pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) def test_hash_array_errors(val): msg = "must pass a ndarray-like" with pytest.raises(TypeError, match=msg): hash_array(val) def test_hash_tuples(): tuples = [(1, "one"), (1, "two"), (2, "one")] result = hash_tuples(tuples) expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values tm.assert_numpy_array_equal(result, expected) result = hash_tuples(tuples[0]) assert result == expected[0] @pytest.mark.parametrize("tup", [ (1, "one"), (1, np.nan), (1.0, pd.NaT, "A"), ("A", pd.Timestamp("2012-01-01"))]) def test_hash_tuple(tup): # Test equivalence between # hash_tuples and hash_tuple. result = hash_tuple(tup) expected = hash_tuples([tup])[0] assert result == expected @pytest.mark.parametrize("val", [ 1, 1.4, "A", b"A", u"A", pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Europe/Brussels"), datetime.datetime(2012, 1, 1), pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(), pd.Timedelta("1 days"), datetime.timedelta(1), pd.Period("2012-01-01", freq="D"), pd.Interval(0, 1), np.nan, pd.NaT, None]) def test_hash_scalar(val): result = _hash_scalar(val) expected = hash_array(np.array([val], dtype=object), categorize=True) assert result[0] == expected[0] @pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) def test_hash_tuples_err(val): msg = "must be convertible to a list-of-tuples" with pytest.raises(TypeError, match=msg): hash_tuples(val) def test_multiindex_unique(): mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)]) assert mi.is_unique is True result = hash_pandas_object(mi) assert result.is_unique is True def test_multiindex_objects(): mi = MultiIndex(levels=[["b", "d", "a"], [1, 2, 3]], codes=[[0, 1, 0, 2], [2, 0, 0, 1]], names=["col1", "col2"]) recons = mi._sort_levels_monotonic() # These are equal. assert mi.equals(recons) assert Index(mi.values).equals(Index(recons.values)) # _hashed_values and hash_pandas_object(..., index=False) equivalency. expected = hash_pandas_object(mi, index=False).values result = mi._hashed_values tm.assert_numpy_array_equal(result, expected) expected = hash_pandas_object(recons, index=False).values result = recons._hashed_values tm.assert_numpy_array_equal(result, expected) expected = mi._hashed_values result = recons._hashed_values # Values should match, but in different order. tm.assert_numpy_array_equal(np.sort(result), np.sort(expected)) @pytest.mark.parametrize("obj", [ Series([1, 2, 3]), Series([1.0, 1.5, 3.2]), Series([1.0, 1.5, np.nan]), Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]), Series(["a", "b", "c"]), Series(["a", np.nan, "c"]), Series(["a", None, "c"]), Series([True, False, True]), Series(), Index([1, 2, 3]), Index([True, False, True]), DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}), DataFrame(), tm.makeMissingDataframe(), tm.makeMixedDataFrame(), tm.makeTimeDataFrame(), tm.makeTimeSeries(), tm.makeTimedeltaIndex(), tm.makePeriodIndex(), Series(tm.makePeriodIndex()), Series(pd.date_range("20130101", periods=3, tz="US/Eastern")), MultiIndex.from_product([range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]), MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]) ]) def test_hash_pandas_object(obj, index): _check_equal(obj, index=index) _check_not_equal_with_index(obj) def test_hash_pandas_object2(series, index): _check_equal(series, index=index) _check_not_equal_with_index(series) @pytest.mark.parametrize("obj", [ Series([], dtype="float64"), Series([], dtype="object"), Index([])]) def test_hash_pandas_empty_object(obj, index): # These are by-definition the same with # or without the index as the data is empty. _check_equal(obj, index=index) @pytest.mark.parametrize("s1", [ Series(["a", "b", "c", "d"]), Series([1000, 2000, 3000, 4000]), Series(pd.date_range(0, periods=4))]) @pytest.mark.parametrize("categorize", [True, False]) def test_categorical_consistency(s1, categorize): # see gh-15143 # # Check that categoricals hash consistent with their values, # not codes. This should work for categoricals of any dtype. s2 = s1.astype("category").cat.set_categories(s1) s3 = s2.cat.set_categories(list(reversed(s1))) # These should all hash identically. h1 = hash_pandas_object(s1, categorize=categorize) h2 = hash_pandas_object(s2, categorize=categorize) h3 = hash_pandas_object(s3, categorize=categorize) tm.assert_series_equal(h1, h2) tm.assert_series_equal(h1, h3) def test_categorical_with_nan_consistency(): c = pd.Categorical.from_codes( [-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")) expected = hash_array(c, categorize=False) c = pd.Categorical.from_codes( [-1, 0], categories=[pd.Timestamp("2012-01-01")]) result = hash_array(c, categorize=False) assert result[0] in expected assert result[1] in expected @pytest.mark.parametrize("obj", [pd.Timestamp("20130101")]) def test_pandas_errors(obj): msg = "Unexpected type for hashing" with pytest.raises(TypeError, match=msg): hash_pandas_object(obj) def test_hash_keys(): # Using different hash keys, should have # different hashes for the same data. # # This only matters for object dtypes. obj = Series(list("abc")) a = hash_pandas_object(obj, hash_key="9876543210123456") b = hash_pandas_object(obj, hash_key="9876543210123465") assert (a != b).all() def test_invalid_key(): # This only matters for object dtypes. msg = "key should be a 16-byte string encoded" with pytest.raises(ValueError, match=msg): hash_pandas_object(Series(list("abc")), hash_key="foo") def test_already_encoded(index): # If already encoded, then ok. obj = Series(list("abc")).str.encode("utf8") _check_equal(obj, index=index) def test_alternate_encoding(index): obj = Series(list("abc")) _check_equal(obj, index=index, encoding="ascii") @pytest.mark.parametrize("l_exp", range(8)) @pytest.mark.parametrize("l_add", [0, 1]) def test_same_len_hash_collisions(l_exp, l_add): length = 2**(l_exp + 8) + l_add s = tm.rands_array(length, 2) result = hash_array(s, "utf8") assert not result[0] == result[1] def test_hash_collisions(): # Hash collisions are bad. # # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 hashes = ["Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe"] # noqa # These should be different. result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8") expected1 = np.array([14963968704024874985], dtype=np.uint64) tm.assert_numpy_array_equal(result1, expected1) result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8") expected2 = np.array([16428432627716348016], dtype=np.uint64) tm.assert_numpy_array_equal(result2, expected2) result = hash_array(np.asarray(hashes, dtype=object), "utf8") tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))
MJuddBooth/pandas
pandas/tests/util/test_hashing.py
pandas/tests/extension/base/reshaping.py
# flake8: noqa from .common import ( is_array_like, is_bool, is_bool_dtype, is_categorical, is_categorical_dtype, is_complex, is_complex_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetimetz, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_extension_type, is_file_like, is_float, is_float_dtype, is_hashable, is_int64_dtype, is_integer, is_integer_dtype, is_interval, is_interval_dtype, is_iterator, is_list_like, is_named_tuple, is_number, is_numeric_dtype, is_object_dtype, is_period, is_period_dtype, is_re, is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype, pandas_dtype)
import datetime import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples from pandas.util import hash_array, hash_pandas_object import pandas.util.testing as tm @pytest.fixture(params=[ Series([1, 2, 3] * 3, dtype="int32"), Series([None, 2.5, 3.5] * 3, dtype="float32"), Series(["a", "b", "c"] * 3, dtype="category"), Series(["d", "e", "f"] * 3), Series([True, False, True] * 3), Series(pd.date_range("20130101", periods=9)), Series(pd.date_range("20130101", periods=9, tz="US/Eastern")), Series(pd.timedelta_range("2000", periods=9))]) def series(request): return request.param @pytest.fixture(params=[True, False]) def index(request): return request.param def _check_equal(obj, **kwargs): """ Check that hashing an objects produces the same value each time. Parameters ---------- obj : object The object to hash. kwargs : kwargs Keyword arguments to pass to the hashing function. """ a = hash_pandas_object(obj, **kwargs) b = hash_pandas_object(obj, **kwargs) tm.assert_series_equal(a, b) def _check_not_equal_with_index(obj): """ Check the hash of an object with and without its index is not the same. Parameters ---------- obj : object The object to hash. """ if not isinstance(obj, Index): a = hash_pandas_object(obj, index=True) b = hash_pandas_object(obj, index=False) if len(obj): assert not (a == b).all() def test_consistency(): # Check that our hash doesn't change because of a mistake # in the actual code; this is the ground truth. result = hash_pandas_object(Index(["foo", "bar", "baz"])) expected = Series(np.array([3600424527151052760, 1374399572096150070, 477881037637427054], dtype="uint64"), index=["foo", "bar", "baz"]) tm.assert_series_equal(result, expected) def test_hash_array(series): arr = series.values tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr)) @pytest.mark.parametrize("arr2", [ np.array([3, 4, "All"]), np.array([3, 4, "All"], dtype=object), ]) def test_hash_array_mixed(arr2): result1 = hash_array(np.array(["3", "4", "All"])) result2 = hash_array(arr2) tm.assert_numpy_array_equal(result1, result2) @pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) def test_hash_array_errors(val): msg = "must pass a ndarray-like" with pytest.raises(TypeError, match=msg): hash_array(val) def test_hash_tuples(): tuples = [(1, "one"), (1, "two"), (2, "one")] result = hash_tuples(tuples) expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values tm.assert_numpy_array_equal(result, expected) result = hash_tuples(tuples[0]) assert result == expected[0] @pytest.mark.parametrize("tup", [ (1, "one"), (1, np.nan), (1.0, pd.NaT, "A"), ("A", pd.Timestamp("2012-01-01"))]) def test_hash_tuple(tup): # Test equivalence between # hash_tuples and hash_tuple. result = hash_tuple(tup) expected = hash_tuples([tup])[0] assert result == expected @pytest.mark.parametrize("val", [ 1, 1.4, "A", b"A", u"A", pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Europe/Brussels"), datetime.datetime(2012, 1, 1), pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(), pd.Timedelta("1 days"), datetime.timedelta(1), pd.Period("2012-01-01", freq="D"), pd.Interval(0, 1), np.nan, pd.NaT, None]) def test_hash_scalar(val): result = _hash_scalar(val) expected = hash_array(np.array([val], dtype=object), categorize=True) assert result[0] == expected[0] @pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) def test_hash_tuples_err(val): msg = "must be convertible to a list-of-tuples" with pytest.raises(TypeError, match=msg): hash_tuples(val) def test_multiindex_unique(): mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)]) assert mi.is_unique is True result = hash_pandas_object(mi) assert result.is_unique is True def test_multiindex_objects(): mi = MultiIndex(levels=[["b", "d", "a"], [1, 2, 3]], codes=[[0, 1, 0, 2], [2, 0, 0, 1]], names=["col1", "col2"]) recons = mi._sort_levels_monotonic() # These are equal. assert mi.equals(recons) assert Index(mi.values).equals(Index(recons.values)) # _hashed_values and hash_pandas_object(..., index=False) equivalency. expected = hash_pandas_object(mi, index=False).values result = mi._hashed_values tm.assert_numpy_array_equal(result, expected) expected = hash_pandas_object(recons, index=False).values result = recons._hashed_values tm.assert_numpy_array_equal(result, expected) expected = mi._hashed_values result = recons._hashed_values # Values should match, but in different order. tm.assert_numpy_array_equal(np.sort(result), np.sort(expected)) @pytest.mark.parametrize("obj", [ Series([1, 2, 3]), Series([1.0, 1.5, 3.2]), Series([1.0, 1.5, np.nan]), Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]), Series(["a", "b", "c"]), Series(["a", np.nan, "c"]), Series(["a", None, "c"]), Series([True, False, True]), Series(), Index([1, 2, 3]), Index([True, False, True]), DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}), DataFrame(), tm.makeMissingDataframe(), tm.makeMixedDataFrame(), tm.makeTimeDataFrame(), tm.makeTimeSeries(), tm.makeTimedeltaIndex(), tm.makePeriodIndex(), Series(tm.makePeriodIndex()), Series(pd.date_range("20130101", periods=3, tz="US/Eastern")), MultiIndex.from_product([range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]), MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]) ]) def test_hash_pandas_object(obj, index): _check_equal(obj, index=index) _check_not_equal_with_index(obj) def test_hash_pandas_object2(series, index): _check_equal(series, index=index) _check_not_equal_with_index(series) @pytest.mark.parametrize("obj", [ Series([], dtype="float64"), Series([], dtype="object"), Index([])]) def test_hash_pandas_empty_object(obj, index): # These are by-definition the same with # or without the index as the data is empty. _check_equal(obj, index=index) @pytest.mark.parametrize("s1", [ Series(["a", "b", "c", "d"]), Series([1000, 2000, 3000, 4000]), Series(pd.date_range(0, periods=4))]) @pytest.mark.parametrize("categorize", [True, False]) def test_categorical_consistency(s1, categorize): # see gh-15143 # # Check that categoricals hash consistent with their values, # not codes. This should work for categoricals of any dtype. s2 = s1.astype("category").cat.set_categories(s1) s3 = s2.cat.set_categories(list(reversed(s1))) # These should all hash identically. h1 = hash_pandas_object(s1, categorize=categorize) h2 = hash_pandas_object(s2, categorize=categorize) h3 = hash_pandas_object(s3, categorize=categorize) tm.assert_series_equal(h1, h2) tm.assert_series_equal(h1, h3) def test_categorical_with_nan_consistency(): c = pd.Categorical.from_codes( [-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")) expected = hash_array(c, categorize=False) c = pd.Categorical.from_codes( [-1, 0], categories=[pd.Timestamp("2012-01-01")]) result = hash_array(c, categorize=False) assert result[0] in expected assert result[1] in expected @pytest.mark.parametrize("obj", [pd.Timestamp("20130101")]) def test_pandas_errors(obj): msg = "Unexpected type for hashing" with pytest.raises(TypeError, match=msg): hash_pandas_object(obj) def test_hash_keys(): # Using different hash keys, should have # different hashes for the same data. # # This only matters for object dtypes. obj = Series(list("abc")) a = hash_pandas_object(obj, hash_key="9876543210123456") b = hash_pandas_object(obj, hash_key="9876543210123465") assert (a != b).all() def test_invalid_key(): # This only matters for object dtypes. msg = "key should be a 16-byte string encoded" with pytest.raises(ValueError, match=msg): hash_pandas_object(Series(list("abc")), hash_key="foo") def test_already_encoded(index): # If already encoded, then ok. obj = Series(list("abc")).str.encode("utf8") _check_equal(obj, index=index) def test_alternate_encoding(index): obj = Series(list("abc")) _check_equal(obj, index=index, encoding="ascii") @pytest.mark.parametrize("l_exp", range(8)) @pytest.mark.parametrize("l_add", [0, 1]) def test_same_len_hash_collisions(l_exp, l_add): length = 2**(l_exp + 8) + l_add s = tm.rands_array(length, 2) result = hash_array(s, "utf8") assert not result[0] == result[1] def test_hash_collisions(): # Hash collisions are bad. # # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 hashes = ["Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe"] # noqa # These should be different. result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8") expected1 = np.array([14963968704024874985], dtype=np.uint64) tm.assert_numpy_array_equal(result1, expected1) result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8") expected2 = np.array([16428432627716348016], dtype=np.uint64) tm.assert_numpy_array_equal(result2, expected2) result = hash_array(np.asarray(hashes, dtype=object), "utf8") tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))
MJuddBooth/pandas
pandas/tests/util/test_hashing.py
pandas/core/dtypes/api.py
# -*- coding: utf-8 -*- from collections import defaultdict from functools import partial import itertools import operator import re import numpy as np from pandas._libs import internals as libinternals, lib from pandas.compat import map, range, zip from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( find_common_type, infer_dtype_from_scalar, maybe_convert_objects, maybe_promote) from pandas.core.dtypes.common import ( _NS_DTYPE, is_datetimelike_v_numeric, is_extension_array_dtype, is_extension_type, is_list_like, is_numeric_v_string_like, is_scalar) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos from pandas.core.arrays.sparse import _maybe_to_sparse from pandas.core.base import PandasObject from pandas.core.index import Index, MultiIndex, ensure_index from pandas.core.indexing import maybe_convert_indices from pandas.io.formats.printing import pprint_thing from .blocks import ( Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, _extend_blocks, _merge_blocks, _safe_reshape, get_block_type, make_block) from .concat import ( # all for concatenate_block_managers combine_concat_plans, concatenate_join_units, get_mgr_concatenation_plan, is_uniform_join_units) # TODO: flexible with index=None and/or items=None class BlockManager(PandasObject): """ Core internal data structure to implement DataFrame, Series, Panel, etc. Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a lightweight blocked set of labeled data to be manipulated by the DataFrame public API class Attributes ---------- shape ndim axes values items Methods ------- set_axis(axis, new_labels) copy(deep=True) get_dtype_counts get_ftype_counts get_dtypes get_ftypes apply(func, axes, block_filter_fn) get_bool_data get_numeric_data get_slice(slice_like, axis) get(label) iget(loc) take(indexer, axis) reindex_axis(new_labels, axis) reindex_indexer(new_labels, indexer, axis) delete(label) insert(loc, label, value) set(label, value) Parameters ---------- Notes ----- This is *not* a public API class """ __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', '_is_consolidated', '_blknos', '_blklocs'] def __init__(self, blocks, axes, do_integrity_check=True): self.axes = [ensure_index(ax) for ax in axes] self.blocks = tuple(blocks) for block in blocks: if block.is_sparse: if len(block.mgr_locs) != 1: raise AssertionError("Sparse block refers to multiple " "items") else: if self.ndim != block.ndim: raise AssertionError( 'Number of Block dimensions ({block}) must equal ' 'number of axes ({self})'.format(block=block.ndim, self=self.ndim)) if do_integrity_check: self._verify_integrity() self._consolidate_check() self._rebuild_blknos_and_blklocs() def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes) def __nonzero__(self): return True # Python3 compat __bool__ = __nonzero__ @property def shape(self): return tuple(len(ax) for ax in self.axes) @property def ndim(self): return len(self.axes) def set_axis(self, axis, new_labels): new_labels = ensure_index(new_labels) old_len = len(self.axes[axis]) new_len = len(new_labels) if new_len != old_len: raise ValueError( 'Length mismatch: Expected axis has {old} elements, new ' 'values have {new} elements'.format(old=old_len, new=new_len)) self.axes[axis] = new_labels def rename_axis(self, mapper, axis, copy=True, level=None): """ Rename one of axes. Parameters ---------- mapper : unary callable axis : int copy : boolean, default True level : int, default None """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj @property def _is_single_block(self): if self.ndim == 1: return True if len(self.blocks) != 1: return False blk = self.blocks[0] return (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(0, len(self), 1)) def _rebuild_blknos_and_blklocs(self): """ Update mgr._blknos / mgr._blklocs. """ new_blknos = np.empty(self.shape[0], dtype=np.int64) new_blklocs = np.empty(self.shape[0], dtype=np.int64) new_blknos.fill(-1) new_blklocs.fill(-1) for blkno, blk in enumerate(self.blocks): rl = blk.mgr_locs new_blknos[rl.indexer] = blkno new_blklocs[rl.indexer] = np.arange(len(rl)) if (new_blknos == -1).any(): raise AssertionError("Gaps in blk ref_locs") self._blknos = new_blknos self._blklocs = new_blklocs @property def items(self): return self.axes[0] def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace() counts = dict() for b in self.blocks: v = f(b) counts[v] = counts.get(v, 0) + b.shape[0] return counts def get_dtype_counts(self): return self._get_counts(lambda b: b.dtype.name) def get_ftype_counts(self): return self._get_counts(lambda b: b.ftype) def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) return algos.take_1d(dtypes, self._blknos, allow_fill=False) def get_ftypes(self): ftypes = np.array([blk.ftype for blk in self.blocks]) return algos.take_1d(ftypes, self._blknos, allow_fill=False) def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = [ax for ax in self.axes] extra_state = { '0.14.1': { 'axes': axes_array, 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) for b in self.blocks] } } # First three elements of the state are to maintain forward # compatibility with 0.13.1. return axes_array, block_values, block_items, extra_state def __setstate__(self, state): def unpickle_block(values, mgr_locs): return make_block(values, placement=mgr_locs) if (isinstance(state, tuple) and len(state) >= 4 and '0.14.1' in state[3]): state = state[3]['0.14.1'] self.axes = [ensure_index(ax) for ax in state['axes']] self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) for b in state['blocks']) else: # discard anything after 3rd, support beta pickling format for a # little while longer ax_arrays, bvalues, bitems = state[:3] self.axes = [ensure_index(ax) for ax in ax_arrays] if len(bitems) == 1 and self.axes[0].equals(bitems[0]): # This is a workaround for pre-0.14.1 pickles that didn't # support unpickling multi-block frames/panels with non-unique # columns/items, because given a manager with items ["a", "b", # "a"] there's no way of knowing which block's "a" is where. # # Single-block case can be supported under the assumption that # block items corresponded to manager items 1-to-1. all_mgr_locs = [slice(0, len(bitems[0]))] else: all_mgr_locs = [self.axes[0].get_indexer(blk_items) for blk_items in bitems] self.blocks = tuple( unpickle_block(values, mgr_locs) for values, mgr_locs in zip(bvalues, all_mgr_locs)) self._post_setstate() def _post_setstate(self): self._is_consolidated = False self._known_consolidated = False self._rebuild_blknos_and_blklocs() def __len__(self): return len(self.items) def __unicode__(self): output = pprint_thing(self.__class__.__name__) for i, ax in enumerate(self.axes): if i == 0: output += u'\nItems: {ax}'.format(ax=ax) else: output += u'\nAxis {i}: {ax}'.format(i=i, ax=ax) for block in self.blocks: output += u'\n{block}'.format(block=pprint_thing(block)) return output def _verify_integrity(self): mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError('Number of manager items must equal union of ' 'block items\n# manager items: {0}, # ' 'tot_items: {1}'.format( len(self.items), tot_items)) def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate=True, **kwargs): """ iterate over the blocks, collect and create a new block manager Parameters ---------- f : the callable or function name to operate on at the block level axes : optional (if not supplied, use self.axes) filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check consolidate: boolean, default True. Join together blocks having same dtype Returns ------- Block Manager (new object) """ result_blocks = [] # filter kwarg is used in replace-* family of methods if filter is not None: filter_locs = set(self.items.get_indexer_for(filter)) if len(filter_locs) == len(self.items): # All items are included, as if there were no filtering filter = None else: kwargs['filter'] = filter_locs if consolidate: self._consolidate_inplace() if f == 'where': align_copy = True if kwargs.get('align', True): align_keys = ['other', 'cond'] else: align_keys = ['cond'] elif f == 'putmask': align_copy = False if kwargs.get('align', True): align_keys = ['new', 'mask'] else: align_keys = ['mask'] elif f == 'fillna': # fillna internally does putmask, maybe it's better to do this # at mgr, not block level? align_copy = False align_keys = ['value'] else: align_keys = [] # TODO(EA): may interfere with ExtensionBlock.setitem for blocks # with a .values attribute. aligned_args = {k: kwargs[k] for k in align_keys if hasattr(kwargs[k], 'values') and not isinstance(kwargs[k], ABCExtensionArray)} for b in self.blocks: if filter is not None: if not b.mgr_locs.isin(filter_locs).any(): result_blocks.append(b) continue if aligned_args: b_items = self.items[b.mgr_locs.indexer] for k, obj in aligned_args.items(): axis = getattr(obj, '_info_axis_number', 0) kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy) applied = getattr(b, f)(**kwargs) result_blocks = _extend_blocks(applied, result_blocks) if len(result_blocks) == 0: return self.make_empty(axes or self.axes) bm = self.__class__(result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm def quantile(self, axis=0, consolidate=True, transposed=False, interpolation='linear', qs=None, numeric_only=None): """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: boolean, default True. Join together blocks having same dtype transposed: boolean, default False we are holding transposed data interpolation : type of interpolation, default 'linear' qs : a scalar or list of the quantiles to be computed numeric_only : ignored Returns ------- Block Manager (new object) """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 if consolidate: self._consolidate_inplace() def get_axe(block, qs, axes): from pandas import Float64Index if is_list_like(qs): ax = Float64Index(qs) elif block.ndim == 1: ax = Float64Index([qs]) else: ax = axes[0] return ax axes, blocks = [], [] for b in self.blocks: block = b.quantile(axis=axis, qs=qs, interpolation=interpolation) axe = get_axe(b, qs, axes=self.axes) axes.append(axe) blocks.append(block) # note that some DatetimeTZ, Categorical are always ndim==1 ndim = {b.ndim for b in blocks} assert 0 not in ndim, ndim if 2 in ndim: new_axes = list(self.axes) # multiple blocks that are reduced if len(blocks) > 1: new_axes[1] = axes[0] # reset the placement to the original for b, sb in zip(blocks, self.blocks): b.mgr_locs = sb.mgr_locs else: new_axes[axis] = Index(np.concatenate( [ax.values for ax in axes])) if transposed: new_axes = new_axes[::-1] blocks = [b.make_block(b.values.T, placement=np.arange(b.shape[1]) ) for b in blocks] return self.__class__(blocks, new_axes) # single block, i.e. ndim == {1} values = _concat._concat_compat([b.values for b in blocks]) # compute the orderings of our original data if len(self.blocks) > 1: indexer = np.empty(len(self.axes[0]), dtype=np.intp) i = 0 for b in self.blocks: for j in b.mgr_locs: indexer[j] = i i = i + 1 values = values.take(indexer) return SingleBlockManager( [make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]) def isna(self, func, **kwargs): return self.apply('apply', func=func, **kwargs) def where(self, **kwargs): return self.apply('where', **kwargs) def setitem(self, **kwargs): return self.apply('setitem', **kwargs) def putmask(self, **kwargs): return self.apply('putmask', **kwargs) def diff(self, **kwargs): return self.apply('diff', **kwargs) def interpolate(self, **kwargs): return self.apply('interpolate', **kwargs) def shift(self, **kwargs): return self.apply('shift', **kwargs) def fillna(self, **kwargs): return self.apply('fillna', **kwargs) def downcast(self, **kwargs): return self.apply('downcast', **kwargs) def astype(self, dtype, **kwargs): return self.apply('astype', dtype=dtype, **kwargs) def convert(self, **kwargs): return self.apply('convert', **kwargs) def replace(self, **kwargs): return self.apply('replace', **kwargs) def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ inplace = validate_bool_kwarg(inplace, 'inplace') # figure out our mask a-priori to avoid repeated replacements values = self.as_array() def comp(s, regex=False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return isna(values) if hasattr(s, 'asm8'): return _compare_or_regex_search(maybe_convert_objects(values), getattr(s, 'asm8'), regex) return _compare_or_regex_search(values, s, regex) masks = [comp(s, regex) for i, s in enumerate(src_list)] result_blocks = [] src_len = len(src_list) - 1 for blk in self.blocks: # its possible to get multiple result blocks here # replace ALWAYS will return a list rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_list, dest_list)): new_rb = [] for b in rb: m = masks[i][b.mgr_locs.indexer] convert = i == src_len result = b._replace_coerce(mask=m, to_replace=s, value=d, inplace=inplace, convert=convert, regex=regex) if m.any(): new_rb = _extend_blocks(result, new_rb) else: new_rb.append(b) rb = new_rb result_blocks.extend(rb) bm = self.__class__(result_blocks, self.axes) bm._consolidate_inplace() return bm def is_consolidated(self): """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self): ftypes = [blk.ftype for blk in self.blocks] self._is_consolidated = len(ftypes) == len(set(ftypes)) self._known_consolidated = True @property def is_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return len(self.blocks) > 1 @property def is_numeric_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return all(block.is_numeric for block in self.blocks) @property def is_datelike_mixed_type(self): # Warning, consolidation needs to get checked upstairs self._consolidate_inplace() return any(block.is_datelike for block in self.blocks) @property def any_extension_types(self): """Whether any of the blocks in this manager are extension blocks""" return any(block.is_extension for block in self.blocks) @property def is_view(self): """ return a boolean if we are a single block and are a view """ if len(self.blocks) == 1: return self.blocks[0].is_view # It is technically possible to figure out which blocks are views # e.g. [ b.values.base is not None for b in self.blocks ] # but then we have the case of possibly some blocks being a view # and some blocks not. setting in theory is possible on the non-view # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit # complicated return False def get_bool_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_bool], copy) def get_numeric_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_numeric], copy) def combine(self, blocks, copy=True): """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_blocks = [] for b in blocks: b = b.copy(deep=copy) b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False) new_blocks.append(b) axes = list(self.axes) axes[0] = self.items.take(indexer) return self.__class__(new_blocks, axes, do_integrity_check=False) def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0(slobj) else: slicer = [slice(None)] * (axis + 1) slicer[axis] = slobj slicer = tuple(slicer) new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axes[axis][slobj] bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) bm._consolidate_inplace() return bm def __contains__(self, item): return item in self.items @property def nblocks(self): return len(self.blocks) def copy(self, deep=True): """ Make deep or shallow copy of BlockManager Parameters ---------- deep : boolean o rstring, default True If False, return shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- copy : BlockManager """ # this preserves the notion of view copying of axes if deep: if deep == 'all': copy = lambda ax: ax.copy(deep=True) else: copy = lambda ax: ax.view() new_axes = [copy(ax) for ax in self.axes] else: new_axes = list(self.axes) return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False) def as_array(self, transpose=False, items=None): """Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray """ if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if transpose else arr if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block and mgr.blocks[0].is_datetimetz: # TODO(Block.get_values): Make DatetimeTZBlock.get_values # always be object dtype. Some callers seem to want the # DatetimeArray (previously DTI) arr = mgr.blocks[0].get_values(dtype=object) elif self._is_single_block or not self.is_mixed_type: arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() return arr.transpose() if transpose else arr def _interleave(self): """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ from pandas.core.dtypes.common import is_sparse dtype = _interleaved_dtype(self.blocks) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if is_sparse(dtype): dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = 'object' result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result def to_dict(self, copy=True): """ Return a dict of str(dtype) -> BlockManager Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> BlockManager Notes ----- This consolidates based on str(dtype) """ self._consolidate_inplace() bd = {} for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()} def xs(self, key, axis=1, copy=True, takeable=False): if axis < 1: raise AssertionError( 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) # take by position if takeable: loc = key else: loc = self.axes[axis].get_loc(key) slicer = [slice(None, None) for _ in range(self.ndim)] slicer[axis] = loc slicer = tuple(slicer) new_axes = list(self.axes) # could be an array indexer! if isinstance(loc, (slice, np.ndarray)): new_axes[axis] = new_axes[axis][loc] else: new_axes.pop(axis) new_blocks = [] if len(self.blocks) > 1: # we must copy here as we are mixed type for blk in self.blocks: newb = make_block(values=blk.values[slicer], klass=blk.__class__, placement=blk.mgr_locs) new_blocks.append(newb) elif len(self.blocks) == 1: block = self.blocks[0] vals = block.values[slicer] if copy: vals = vals.copy() new_blocks = [make_block(values=vals, placement=block.mgr_locs, klass=block.__class__)] return self.__class__(new_blocks, new_axes) def fast_xs(self, loc): """ get a cross sectional for a given location in the items ; handle dups return the result, is *could* be a view in the case of a single block """ if len(self.blocks) == 1: return self.blocks[0].iget((slice(None), loc)) items = self.items # non-unique (GH4726) if not items.is_unique: result = self._interleave() if self.ndim == 2: result = result.T return result[loc] # unique dtype = _interleaved_dtype(self.blocks) n = len(items) if is_extension_array_dtype(dtype): # we'll eventually construct an ExtensionArray. result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk._try_coerce_result(blk.iget((i, loc))) if is_extension_array_dtype(dtype): result = dtype.construct_array_type()._from_sequence( result, dtype=dtype ) return result def consolidate(self): """ Join together blocks having same dtype Returns ------- y : BlockManager """ if self.is_consolidated(): return self bm = self.__class__(self.blocks, self.axes) bm._is_consolidated = False bm._consolidate_inplace() return bm def _consolidate_inplace(self): if not self.is_consolidated(): self.blocks = tuple(_consolidate(self.blocks)) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() def get(self, item, fastpath=True): """ Return values for selected item (ndarray or BlockManager). """ if self.items.is_unique: if not isna(item): loc = self.items.get_loc(item) else: indexer = np.arange(len(self.items))[isna(self.items)] # allow a single nan location indexer if not is_scalar(indexer): if len(indexer) == 1: loc = indexer.item() else: raise ValueError("cannot label index with a null key") return self.iget(loc, fastpath=fastpath) else: if isna(item): raise TypeError("cannot label index with a null key") indexer = self.items.get_indexer_for([item]) return self.reindex_indexer(new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True) def iget(self, i, fastpath=True): """ Return the data as a SingleBlockManager if fastpath=True and possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) if not fastpath or not block._box_to_block_values or values.ndim != 1: return values # fastpath shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( [block.make_block_same_class(values, placement=slice(0, len(values)), ndim=1)], self.axes[1]) def delete(self, item): """ Delete selected item (items if non-unique) in-place. """ indexer = self.items.get_loc(item) is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True ref_loc_offset = -is_deleted.cumsum() is_blk_deleted = [False] * len(self.blocks) if isinstance(indexer, int): affected_start = indexer else: affected_start = is_deleted.nonzero()[0][0] for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): blk = self.blocks[blkno] bml = blk.mgr_locs blk_del = is_deleted[bml.indexer].nonzero()[0] if len(blk_del) == len(bml): is_blk_deleted[blkno] = True continue elif len(blk_del) != 0: blk.delete(blk_del) bml = blk.mgr_locs blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) # FIXME: use Index.delete as soon as it uses fastpath=True self.axes[0] = self.items[~is_deleted] self.blocks = tuple(b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]) self._shape = None self._rebuild_blknos_and_blklocs() def set(self, item, value): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical # TODO(EA): Remove an is_extension_ when all extension types satisfy # the interface value_is_extension_type = (is_extension_type(value) or is_extension_array_dtype(value)) # categorical/spares/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: if value.ndim == self.ndim - 1: value = _safe_reshape(value, (1,) + value.shape) def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible ' 'with manager shape') try: loc = self.items.get_loc(item) except KeyError: # This item wasn't present, just insert at end self.insert(len(self.items), item, value) return if isinstance(loc, int): loc = [loc] blknos = self._blknos[loc] blklocs = self._blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno, val_locs in libinternals.get_blkno_placements(blknos, self.nblocks, group=True): blk = self.blocks[blkno] blk_locs = blklocs[val_locs.indexer] if blk.should_store(value): blk.set(blk_locs, value_getitem(val_locs)) else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno) else: self._blklocs[blk.mgr_locs.indexer] = -1 blk.delete(blk_locs) self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.int64) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False) self.blocks = tuple(blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)) if unfit_val_locs: unfit_mgr_locs = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_mgr_locs) new_blocks = [] if value_is_extension_type: # This code (ab-)uses the fact that sparse blocks contain only # one item. new_blocks.extend( make_block(values=value.copy(), ndim=self.ndim, placement=slice(mgr_loc, mgr_loc + 1)) for mgr_loc in unfit_mgr_locs) self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + len(self.blocks)) self._blklocs[unfit_mgr_locs] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( make_block(values=value_getitem(unfit_val_items), ndim=self.ndim, placement=unfit_mgr_locs)) self._blknos[unfit_mgr_locs] = len(self.blocks) self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def insert(self, loc, item, value, allow_duplicates=False): """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : array_like allow_duplicates: bool If False, trying to insert non-unique item will raise """ if not allow_duplicates and item in self.items: # Should this be a different kind of error?? raise ValueError('cannot insert {}, already exists'.format(item)) if not isinstance(loc, int): raise TypeError("loc must be int") # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) for blkno, count in _fast_count_smallints(self._blknos[loc:]): blk = self.blocks[blkno] if count == len(blk.mgr_locs): blk.mgr_locs = blk.mgr_locs.add(1) else: new_mgr_locs = blk.mgr_locs.as_array.copy() new_mgr_locs[new_mgr_locs >= loc] += 1 blk.mgr_locs = new_mgr_locs if loc == self._blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) else: self._blklocs = np.insert(self._blklocs, loc, 0) self._blknos = np.insert(self._blknos, loc, len(self.blocks)) self.axes[0] = new_axis self.blocks += (block,) self._shape = None self._known_consolidated = False if len(self.blocks) > 100: self._consolidate_inplace() def reindex_axis(self, new_index, axis, method=None, limit=None, fill_value=None, copy=True): """ Conform block manager to new index. """ new_index = ensure_index(new_index) new_index, indexer = self.axes[axis].reindex(new_index, method=method, limit=limit) return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, copy=copy) def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True): """ Parameters ---------- new_axis : Index indexer : ndarray of int64 or None axis : int fill_value : object allow_dups : bool pandas-indexer with -1's only. """ if indexer is None: if new_axis is self.axes[axis] and not copy: return self result = self.copy(deep=copy) result.axes = list(self.axes) result.axes[axis] = new_axis return result self._consolidate_inplace() # some axes don't allow reindexing with dups if not allow_dups: self.axes[axis]._can_reindex(indexer) if axis >= self.ndim: raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,)) else: new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( fill_value if fill_value is not None else blk.fill_value,)) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axis return self.__class__(new_blocks, new_axes) def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): """ Slice/take blocks along axis=0. Overloaded for SingleBlock Returns ------- new_blocks : list of Block """ allow_fill = fill_tuple is not None sl_type, slobj, sllen = _preprocess_slice_or_indexer( slice_or_indexer, self.shape[0], allow_fill=allow_fill) if self._is_single_block: blk = self.blocks[0] if sl_type in ('slice', 'mask'): return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] elif not allow_fill or self.ndim == 1: if allow_fill and fill_tuple[0] is None: _, fill_value = maybe_promote(blk.dtype) fill_tuple = (fill_value, ) return [blk.take_nd(slobj, axis=0, new_mgr_locs=slice(0, sllen), fill_tuple=fill_tuple)] if sl_type in ('slice', 'mask'): blknos = self._blknos[slobj] blklocs = self._blklocs[slobj] else: blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, allow_fill=allow_fill) blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill) # When filling blknos, make sure blknos is updated before appending to # blocks list, that way new blkno is exactly len(blocks). # # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, # pytables serialization will break otherwise. blocks = [] for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, self.nblocks, group=True): if blkno == -1: # If we've got here, fill_tuple was not None. fill_value = fill_tuple[0] blocks.append(self._make_na_block(placement=mgr_locs, fill_value=fill_value)) else: blk = self.blocks[blkno] # Otherwise, slicing along items axis is necessary. if not blk._can_consolidate: # A non-consolidatable block, it's easy, because there's # only one item and each mgr loc is a copy of that single # item. for mgr_loc in mgr_locs: newblk = blk.copy(deep=True) newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) blocks.append(newblk) else: blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], axis=0, new_mgr_locs=mgr_locs, fill_tuple=None)) return blocks def _make_na_block(self, placement, fill_value=None): # TODO: infer dtypes other than float64 from fill_value if fill_value is None: fill_value = np.nan block_shape = list(self.shape) block_shape[0] = len(placement) dtype, fill_value = infer_dtype_from_scalar(fill_value) block_values = np.empty(block_shape, dtype=dtype) block_values.fill(fill_value) return make_block(block_values, placement=placement) def take(self, indexer, axis=1, verify=True, convert=True): """ Take items along any axis. """ self._consolidate_inplace() indexer = (np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) else np.asanyarray(indexer, dtype='int64')) n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True) def merge(self, other, lsuffix='', rsuffix=''): # We assume at this point that the axes of self and other match. # This is only called from Panel.join, which reindexes prior # to calling to ensure this assumption holds. l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, right=other.items, rsuffix=rsuffix) new_items = _concat_indexes([l, r]) new_blocks = [blk.copy(deep=False) for blk in self.blocks] offset = self.shape[0] for blk in other.blocks: blk = blk.copy(deep=False) blk.mgr_locs = blk.mgr_locs.add(offset) new_blocks.append(blk) new_axes = list(self.axes) new_axes[0] = new_items return self.__class__(_consolidate(new_blocks), new_axes) def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): return False if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): return False self._consolidate_inplace() other._consolidate_inplace() if len(self.blocks) != len(other.blocks): return False # canonicalize block order, using a tuple combining the type # name and then mgr_locs because there might be unconsolidated # blocks (say, Categorical) which can only be distinguished by # the iteration order def canonicalize(block): return (block.dtype.name, block.mgr_locs.as_array.tolist()) self_blocks = sorted(self.blocks, key=canonicalize) other_blocks = sorted(other.blocks, key=canonicalize) return all(block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)) def unstack(self, unstacker_func, fill_value): """Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ n_rows = self.shape[-1] dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() new_blocks = [] columns_mask = [] for blk in self.blocks: blocks, mask = blk._unstack( partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]), new_columns, n_rows, fill_value ) new_blocks.extend(blocks) columns_mask.extend(mask) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index]) return bm class SingleBlockManager(BlockManager): """ manage a single block with """ ndim = 1 _is_consolidated = True _known_consolidated = True __slots__ = () def __init__(self, block, axis, do_integrity_check=False, fastpath=False): if isinstance(axis, list): if len(axis) != 1: raise ValueError("cannot create SingleBlockManager with more " "than 1 axis") axis = axis[0] # passed from constructor, single block, single axis if fastpath: self.axes = [axis] if isinstance(block, list): # empty block if len(block) == 0: block = [np.array([])] elif len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] else: self.axes = [ensure_index(axis)] # create the block here if isinstance(block, list): # provide consolidation to the interleaved_dtype if len(block) > 1: dtype = _interleaved_dtype(block) block = [b.astype(dtype) for b in block] block = _consolidate(block) if len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] if not isinstance(block, Block): block = make_block(block, placement=slice(0, len(axis)), ndim=1) self.blocks = [block] def _post_setstate(self): pass @property def _block(self): return self.blocks[0] @property def _values(self): return self._block.values @property def _blknos(self): """ compat with BlockManager """ return None @property def _blklocs(self): """ compat with BlockManager """ return None def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") return self.__class__(self._block._slice(slobj), self.index[slobj], fastpath=True) @property def index(self): return self.axes[0] def convert(self, **kwargs): """ convert the whole block as one """ kwargs['by_item'] = False return self.apply('convert', **kwargs) @property def dtype(self): return self._block.dtype @property def array_dtype(self): return self._block.array_dtype @property def ftype(self): return self._block.ftype def get_dtype_counts(self): return {self.dtype.name: 1} def get_ftype_counts(self): return {self.ftype: 1} def get_dtypes(self): return np.array([self._block.dtype]) def get_ftypes(self): return np.array([self._block.ftype]) def external_values(self): return self._block.external_values() def internal_values(self): return self._block.internal_values() def formatting_values(self): """Return the internal values used by the DataFrame/SeriesFormatter""" return self._block.formatting_values() def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) @property def asobject(self): """ return a object dtype array. datetime/timedelta like values are boxed to Timestamp/Timedelta instances. """ return self._block.get_values(dtype=object) @property def _can_hold_na(self): return self._block._can_hold_na def is_consolidated(self): return True def _consolidate_check(self): pass def _consolidate_inplace(self): pass def delete(self, item): """ Delete single item from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ loc = self.items.get_loc(item) self._block.delete(loc) self.axes[0] = self.axes[0].delete(loc) def fast_xs(self, loc): """ fast path for getting a cross-section return a view of the data """ return self._block.values[loc] def concat(self, to_concat, new_axis): """ Concatenate a list of SingleBlockManagers into a single SingleBlockManager. Used for pd.concat of Series objects with axis=0. Parameters ---------- to_concat : list of SingleBlockManagers new_axis : Index of the result Returns ------- SingleBlockManager """ non_empties = [x for x in to_concat if len(x) > 0] # check if all series are of the same block type: if len(non_empties) > 0: blocks = [obj.blocks[0] for obj in non_empties] if len({b.dtype for b in blocks}) == 1: new_block = blocks[0].concat_same_type(blocks) else: values = [x.values for x in blocks] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) else: values = [x._block.values for x in to_concat] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) mgr = SingleBlockManager(new_block, new_axis) return mgr # -------------------------------------------------------------------- # Constructor Helpers def create_block_manager_from_blocks(blocks, axes): try: if len(blocks) == 1 and not isinstance(blocks[0], Block): # if blocks[0] is of length 0, return empty blocks if not len(blocks[0]): blocks = [] else: # It's OK if a single block is passed as values, its placement # is basically "all items", but if there're many, don't bother # converting, it's an error anyway. blocks = [make_block(values=blocks[0], placement=slice(0, len(axes[0])))] mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except (ValueError) as e: blocks = [getattr(b, 'values', b) for b in blocks] tot_items = sum(b.shape[0] for b in blocks) construction_error(tot_items, blocks[0].shape[1:], axes, e) def create_block_manager_from_arrays(arrays, names, axes): try: blocks = form_blocks(arrays, names, axes) mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr except ValueError as e: construction_error(len(arrays), arrays[0].shape, axes, e) def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) # Correcting the user facing error message during dataframe construction if len(passed) <= 2: passed = passed[::-1] implied = tuple(len(ax) for ax in axes) # Correcting the user facing error message during dataframe construction if len(implied) <= 2: implied = implied[::-1] if passed == implied and e is not None: raise e if block_shape[0] == 0: raise ValueError("Empty data passed with indices specified.") raise ValueError("Shape of passed values is {0}, indices imply {1}".format( passed, implied)) # ----------------------------------------------------------------------- def form_blocks(arrays, names, axes): # put "leftover" items in float bucket, where else? # generalize? items_dict = defaultdict(list) extra_locs = [] names_idx = ensure_index(names) if names_idx.equals(axes[0]): names_indexer = np.arange(len(names_idx)) else: assert names_idx.intersection(axes[0]).is_unique names_indexer = names_idx.get_indexer_for(axes[0]) for i, name_idx in enumerate(names_indexer): if name_idx == -1: extra_locs.append(i) continue k = names[name_idx] v = arrays[name_idx] block_type = get_block_type(v) items_dict[block_type.__name__].append((i, k, v)) blocks = [] if len(items_dict['FloatBlock']): float_blocks = _multi_blockify(items_dict['FloatBlock']) blocks.extend(float_blocks) if len(items_dict['ComplexBlock']): complex_blocks = _multi_blockify(items_dict['ComplexBlock']) blocks.extend(complex_blocks) if len(items_dict['TimeDeltaBlock']): timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) blocks.extend(timedelta_blocks) if len(items_dict['IntBlock']): int_blocks = _multi_blockify(items_dict['IntBlock']) blocks.extend(int_blocks) if len(items_dict['DatetimeBlock']): datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], _NS_DTYPE) blocks.extend(datetime_blocks) if len(items_dict['DatetimeTZBlock']): dttz_blocks = [make_block(array, klass=DatetimeTZBlock, placement=[i]) for i, _, array in items_dict['DatetimeTZBlock']] blocks.extend(dttz_blocks) if len(items_dict['BoolBlock']): bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) blocks.extend(bool_blocks) if len(items_dict['ObjectBlock']) > 0: object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) blocks.extend(object_blocks) if len(items_dict['SparseBlock']) > 0: sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) blocks.extend(sparse_blocks) if len(items_dict['CategoricalBlock']) > 0: cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) for i, _, array in items_dict['CategoricalBlock']] blocks.extend(cat_blocks) if len(items_dict['ExtensionBlock']): external_blocks = [ make_block(array, klass=ExtensionBlock, placement=[i]) for i, _, array in items_dict['ExtensionBlock'] ] blocks.extend(external_blocks) if len(items_dict['ObjectValuesExtensionBlock']): external_blocks = [ make_block(array, klass=ObjectValuesExtensionBlock, placement=[i]) for i, _, array in items_dict['ObjectValuesExtensionBlock'] ] blocks.extend(external_blocks) if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) # empty items -> dtype object block_values = np.empty(shape, dtype=object) block_values.fill(np.nan) na_block = make_block(block_values, placement=extra_locs) blocks.append(na_block) return blocks def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) block = make_block(values, placement=placement) return [block] def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: values, placement = _stack_arrays(list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks def _sparse_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes (and are sparse) """ new_blocks = [] for i, names, array in tuples: array = _maybe_to_sparse(array) block = make_block(array, placement=[i]) new_blocks.append(block) return new_blocks def _stack_arrays(tuples, dtype): # fml def _asarray_compat(x): if isinstance(x, ABCSeries): return x._values else: return np.asarray(x) def _shape_compat(x): if isinstance(x, ABCSeries): return len(x), else: return x.shape placement, names, arrays = zip(*tuples) first = arrays[0] shape = (len(arrays),) + _shape_compat(first) stacked = np.empty(shape, dtype=dtype) for i, arr in enumerate(arrays): stacked[i] = _asarray_compat(arr) return stacked, placement def _interleaved_dtype(blocks): # type: (List[Block]) -> Optional[Union[np.dtype, ExtensionDtype]] """Find the common dtype for `blocks`. Parameters ---------- blocks : List[Block] Returns ------- dtype : Optional[Union[np.dtype, ExtensionDtype]] None is returned when `blocks` is empty. """ if not len(blocks): return None return find_common_type([b.dtype for b in blocks]) def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) new_blocks = _extend_blocks(merged_blocks, new_blocks) return new_blocks def _compare_or_regex_search(a, b, regex=False): """ Compare two array_like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array_like or scalar b : array_like or scalar regex : bool, default False Returns ------- mask : array_like of bool """ if not regex: op = lambda x: operator.eq(x, b) else: op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) else False) is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) # numpy deprecation warning to have i8 vs integer comparisons if is_datetimelike_v_numeric(a, b): result = False # numpy deprecation warning if comparing numeric vs string-like elif is_numeric_v_string_like(a, b): result = False else: result = op(a) if is_scalar(result) and (is_a_array or is_b_array): type_names = [type(a).__name__, type(b).__name__] if is_a_array: type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) if is_b_array: type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) raise TypeError( "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], b=type_names[1])) return result def _concat_indexes(indexes): return indexes[0].append(indexes[1:]) def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return left, right else: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) def renamer(x, suffix): """Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return '{x}{suffix}'.format(x=x, suffix=suffix) return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) return (_transform_index(left, lrenamer), _transform_index(right, rrenamer)) def _transform_index(index, func, level=None): """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(index, MultiIndex): if level is not None: items = [tuple(func(y) if i == level else y for i, y in enumerate(x)) for x in index] else: items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] return Index(items, name=index.name, tupleize_cols=False) def _fast_count_smallints(arr): """Faster version of set(arr) for sequences of small numbers.""" counts = np.bincount(arr.astype(np.int_)) nz = counts.nonzero()[0] return np.c_[nz, counts[nz]] def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): if isinstance(slice_or_indexer, slice): return ('slice', slice_or_indexer, libinternals.slice_len(slice_or_indexer, length)) elif (isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_): return 'mask', slice_or_indexer, slice_or_indexer.sum() else: indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) if not allow_fill: indexer = maybe_convert_indices(indexer, length) return 'fancy', indexer, len(indexer) def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool """ concat_plans = [get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers] concat_plan = combine_concat_plans(concat_plans, concat_axis) blocks = [] for placement, join_units in concat_plan: if len(join_units) == 1 and not join_units[0].indexers: b = join_units[0].block values = b.values if copy: values = values.copy() elif not copy: values = values.view() b = b.make_block_same_class(values, placement=placement) elif is_uniform_join_units(join_units): b = join_units[0].block.concat_same_type( [ju.block for ju in join_units], placement=placement) else: b = make_block( concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement) blocks.append(b) return BlockManager(blocks, axes)
import datetime import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples from pandas.util import hash_array, hash_pandas_object import pandas.util.testing as tm @pytest.fixture(params=[ Series([1, 2, 3] * 3, dtype="int32"), Series([None, 2.5, 3.5] * 3, dtype="float32"), Series(["a", "b", "c"] * 3, dtype="category"), Series(["d", "e", "f"] * 3), Series([True, False, True] * 3), Series(pd.date_range("20130101", periods=9)), Series(pd.date_range("20130101", periods=9, tz="US/Eastern")), Series(pd.timedelta_range("2000", periods=9))]) def series(request): return request.param @pytest.fixture(params=[True, False]) def index(request): return request.param def _check_equal(obj, **kwargs): """ Check that hashing an objects produces the same value each time. Parameters ---------- obj : object The object to hash. kwargs : kwargs Keyword arguments to pass to the hashing function. """ a = hash_pandas_object(obj, **kwargs) b = hash_pandas_object(obj, **kwargs) tm.assert_series_equal(a, b) def _check_not_equal_with_index(obj): """ Check the hash of an object with and without its index is not the same. Parameters ---------- obj : object The object to hash. """ if not isinstance(obj, Index): a = hash_pandas_object(obj, index=True) b = hash_pandas_object(obj, index=False) if len(obj): assert not (a == b).all() def test_consistency(): # Check that our hash doesn't change because of a mistake # in the actual code; this is the ground truth. result = hash_pandas_object(Index(["foo", "bar", "baz"])) expected = Series(np.array([3600424527151052760, 1374399572096150070, 477881037637427054], dtype="uint64"), index=["foo", "bar", "baz"]) tm.assert_series_equal(result, expected) def test_hash_array(series): arr = series.values tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr)) @pytest.mark.parametrize("arr2", [ np.array([3, 4, "All"]), np.array([3, 4, "All"], dtype=object), ]) def test_hash_array_mixed(arr2): result1 = hash_array(np.array(["3", "4", "All"])) result2 = hash_array(arr2) tm.assert_numpy_array_equal(result1, result2) @pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) def test_hash_array_errors(val): msg = "must pass a ndarray-like" with pytest.raises(TypeError, match=msg): hash_array(val) def test_hash_tuples(): tuples = [(1, "one"), (1, "two"), (2, "one")] result = hash_tuples(tuples) expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values tm.assert_numpy_array_equal(result, expected) result = hash_tuples(tuples[0]) assert result == expected[0] @pytest.mark.parametrize("tup", [ (1, "one"), (1, np.nan), (1.0, pd.NaT, "A"), ("A", pd.Timestamp("2012-01-01"))]) def test_hash_tuple(tup): # Test equivalence between # hash_tuples and hash_tuple. result = hash_tuple(tup) expected = hash_tuples([tup])[0] assert result == expected @pytest.mark.parametrize("val", [ 1, 1.4, "A", b"A", u"A", pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Europe/Brussels"), datetime.datetime(2012, 1, 1), pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(), pd.Timedelta("1 days"), datetime.timedelta(1), pd.Period("2012-01-01", freq="D"), pd.Interval(0, 1), np.nan, pd.NaT, None]) def test_hash_scalar(val): result = _hash_scalar(val) expected = hash_array(np.array([val], dtype=object), categorize=True) assert result[0] == expected[0] @pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) def test_hash_tuples_err(val): msg = "must be convertible to a list-of-tuples" with pytest.raises(TypeError, match=msg): hash_tuples(val) def test_multiindex_unique(): mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)]) assert mi.is_unique is True result = hash_pandas_object(mi) assert result.is_unique is True def test_multiindex_objects(): mi = MultiIndex(levels=[["b", "d", "a"], [1, 2, 3]], codes=[[0, 1, 0, 2], [2, 0, 0, 1]], names=["col1", "col2"]) recons = mi._sort_levels_monotonic() # These are equal. assert mi.equals(recons) assert Index(mi.values).equals(Index(recons.values)) # _hashed_values and hash_pandas_object(..., index=False) equivalency. expected = hash_pandas_object(mi, index=False).values result = mi._hashed_values tm.assert_numpy_array_equal(result, expected) expected = hash_pandas_object(recons, index=False).values result = recons._hashed_values tm.assert_numpy_array_equal(result, expected) expected = mi._hashed_values result = recons._hashed_values # Values should match, but in different order. tm.assert_numpy_array_equal(np.sort(result), np.sort(expected)) @pytest.mark.parametrize("obj", [ Series([1, 2, 3]), Series([1.0, 1.5, 3.2]), Series([1.0, 1.5, np.nan]), Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]), Series(["a", "b", "c"]), Series(["a", np.nan, "c"]), Series(["a", None, "c"]), Series([True, False, True]), Series(), Index([1, 2, 3]), Index([True, False, True]), DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}), DataFrame(), tm.makeMissingDataframe(), tm.makeMixedDataFrame(), tm.makeTimeDataFrame(), tm.makeTimeSeries(), tm.makeTimedeltaIndex(), tm.makePeriodIndex(), Series(tm.makePeriodIndex()), Series(pd.date_range("20130101", periods=3, tz="US/Eastern")), MultiIndex.from_product([range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]), MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]) ]) def test_hash_pandas_object(obj, index): _check_equal(obj, index=index) _check_not_equal_with_index(obj) def test_hash_pandas_object2(series, index): _check_equal(series, index=index) _check_not_equal_with_index(series) @pytest.mark.parametrize("obj", [ Series([], dtype="float64"), Series([], dtype="object"), Index([])]) def test_hash_pandas_empty_object(obj, index): # These are by-definition the same with # or without the index as the data is empty. _check_equal(obj, index=index) @pytest.mark.parametrize("s1", [ Series(["a", "b", "c", "d"]), Series([1000, 2000, 3000, 4000]), Series(pd.date_range(0, periods=4))]) @pytest.mark.parametrize("categorize", [True, False]) def test_categorical_consistency(s1, categorize): # see gh-15143 # # Check that categoricals hash consistent with their values, # not codes. This should work for categoricals of any dtype. s2 = s1.astype("category").cat.set_categories(s1) s3 = s2.cat.set_categories(list(reversed(s1))) # These should all hash identically. h1 = hash_pandas_object(s1, categorize=categorize) h2 = hash_pandas_object(s2, categorize=categorize) h3 = hash_pandas_object(s3, categorize=categorize) tm.assert_series_equal(h1, h2) tm.assert_series_equal(h1, h3) def test_categorical_with_nan_consistency(): c = pd.Categorical.from_codes( [-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")) expected = hash_array(c, categorize=False) c = pd.Categorical.from_codes( [-1, 0], categories=[pd.Timestamp("2012-01-01")]) result = hash_array(c, categorize=False) assert result[0] in expected assert result[1] in expected @pytest.mark.parametrize("obj", [pd.Timestamp("20130101")]) def test_pandas_errors(obj): msg = "Unexpected type for hashing" with pytest.raises(TypeError, match=msg): hash_pandas_object(obj) def test_hash_keys(): # Using different hash keys, should have # different hashes for the same data. # # This only matters for object dtypes. obj = Series(list("abc")) a = hash_pandas_object(obj, hash_key="9876543210123456") b = hash_pandas_object(obj, hash_key="9876543210123465") assert (a != b).all() def test_invalid_key(): # This only matters for object dtypes. msg = "key should be a 16-byte string encoded" with pytest.raises(ValueError, match=msg): hash_pandas_object(Series(list("abc")), hash_key="foo") def test_already_encoded(index): # If already encoded, then ok. obj = Series(list("abc")).str.encode("utf8") _check_equal(obj, index=index) def test_alternate_encoding(index): obj = Series(list("abc")) _check_equal(obj, index=index, encoding="ascii") @pytest.mark.parametrize("l_exp", range(8)) @pytest.mark.parametrize("l_add", [0, 1]) def test_same_len_hash_collisions(l_exp, l_add): length = 2**(l_exp + 8) + l_add s = tm.rands_array(length, 2) result = hash_array(s, "utf8") assert not result[0] == result[1] def test_hash_collisions(): # Hash collisions are bad. # # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 hashes = ["Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe"] # noqa # These should be different. result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8") expected1 = np.array([14963968704024874985], dtype=np.uint64) tm.assert_numpy_array_equal(result1, expected1) result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8") expected2 = np.array([16428432627716348016], dtype=np.uint64) tm.assert_numpy_array_equal(result2, expected2) result = hash_array(np.asarray(hashes, dtype=object), "utf8") tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))
MJuddBooth/pandas
pandas/tests/util/test_hashing.py
pandas/core/internals/managers.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import re import datetime from website.identifiers.clients.base import AbstractIdentifierClient from website import settings from datacite import DataCiteMDSClient, schema40 logger = logging.getLogger(__name__) class DataCiteClient(AbstractIdentifierClient): def __init__(self, base_url, prefix, client=None): self.base_url = base_url self.prefix = prefix self._client = client or DataCiteMDSClient( url=self.base_url, username=settings.DATACITE_USERNAME, password=settings.DATACITE_PASSWORD, prefix=self.prefix ) def build_metadata(self, node): """Return the formatted datacite metadata XML as a string. """ data = { 'identifier': { 'identifier': self.build_doi(node), 'identifierType': 'DOI', }, 'creators': [ {'creatorName': user.fullname, 'givenName': user.given_name, 'familyName': user.family_name} for user in node.visible_contributors ], 'titles': [ {'title': node.title} ], 'publisher': 'Open Science Framework', 'publicationYear': str(datetime.datetime.now().year), 'resourceType': { 'resourceType': 'Project', 'resourceTypeGeneral': 'Text' } } if node.description: data['descriptions'] = [{ 'descriptionType': 'Abstract', 'description': node.description }] if node.node_license: data['rightsList'] = [{ 'rights': node.node_license.name, 'rightsURI': node.node_license.url }] # Validate dictionary assert schema40.validate(data) # Generate DataCite XML from dictionary. return schema40.tostring(data) def build_doi(self, object): return settings.DOI_FORMAT.format(prefix=self.prefix, guid=object._id) def get_identifier(self, identifier): self._client.doi_get(identifier) def create_identifier(self, node, category): if category == 'doi': metadata = self.build_metadata(node) resp = self._client.metadata_post(metadata) # Typical response: 'OK (10.70102/FK2osf.io/cq695)' to doi 10.70102/FK2osf.io/cq695 doi = re.match(r'OK \((?P<doi>[a-zA-Z0-9 .\/]{0,})\)', resp).groupdict()['doi'] if settings.DATACITE_MINT_DOIS: self._client.doi_post(doi, node.absolute_url) return {'doi': doi} else: raise NotImplementedError('Creating an identifier with category {} is not supported'.format(category)) def update_identifier(self, node, category): if not node.is_public or node.is_deleted: if category == 'doi': doi = self.build_doi(node) self._client.metadata_delete(doi) return {'doi': doi} else: raise NotImplementedError('Updating metadata not supported for {}'.format(category)) else: return self.create_identifier(node, category)
import unittest from datetime import timedelta import pytest from django.utils import timezone from website import maintenance from osf.models import MaintenanceState pytestmark = pytest.mark.django_db class TestMaintenance(unittest.TestCase): def tearDown(self): MaintenanceState.objects.all().delete() def test_set_maintenance_twice(self): assert not MaintenanceState.objects.exists() maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().count() == 1 maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().count() == 1 def test_set_maintenance_with_start_date(self): start = timezone.now() maintenance.set_maintenance(message='', start=start.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == start assert current_state.end == start + timedelta(1) def test_set_maintenance_with_end_date(self): end = timezone.now() maintenance.set_maintenance(message='', end=end.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == end - timedelta(1) assert current_state.end == end def test_set_maintenance_in_future(self): start = (timezone.now() + timedelta(1)) maintenance.set_maintenance(message='', start=start.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == start assert current_state.end == start + timedelta(1) def test_set_maintenance_level(self): maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().first().level == 1 maintenance.unset_maintenance() maintenance.set_maintenance(message='', level=3) assert MaintenanceState.objects.all().first().level == 3 maintenance.unset_maintenance() def test_unset_maintenance(self): maintenance.set_maintenance(message='') assert MaintenanceState.objects.exists() maintenance.unset_maintenance() assert not MaintenanceState.objects.exists()
adlius/osf.io
osf_tests/test_maintenance.py
website/identifiers/clients/datacite.py
from django.db import models from datetime import datetime from osf.utils.storage import BannerImageStorage from osf.exceptions import ValidationValueError from osf.utils.fields import NonNaiveDateTimeField def validate_banner_dates(banner_id, start_date, end_date): if start_date > end_date: raise ValidationValueError('Start date must be before end date.') overlapping = ScheduledBanner.objects.filter( (models.Q(start_date__gte=start_date) & models.Q(start_date__lte=end_date)) | (models.Q(end_date__gte=start_date) & models.Q(end_date__lte=end_date)) | (models.Q(start_date__lte=start_date) & models.Q(end_date__gte=end_date)) ).exclude(id=banner_id).exists() if overlapping: raise ValidationValueError('Banners dates cannot be overlapping.') class ScheduledBanner(models.Model): class Meta: # Custom permissions for use in the OSF Admin App permissions = ( ('view_scheduledbanner', 'Can view scheduled banner details'), ) name = models.CharField(unique=True, max_length=256) start_date = NonNaiveDateTimeField() end_date = NonNaiveDateTimeField() color = models.CharField(max_length=7) license = models.CharField(blank=True, null=True, max_length=256) link = models.URLField(blank=True, default='https://www.crowdrise.com/centerforopenscience') default_photo = models.FileField(storage=BannerImageStorage()) default_alt_text = models.TextField() mobile_photo = models.FileField(storage=BannerImageStorage()) mobile_alt_text = models.TextField(blank=True, null=True) def save(self, *args, **kwargs): self.start_date = datetime.combine(self.start_date, datetime.min.time()) self.end_date = datetime.combine(self.end_date, datetime.max.time()) validate_banner_dates(self.id, self.start_date, self.end_date) super(ScheduledBanner, self).save(*args, **kwargs)
import unittest from datetime import timedelta import pytest from django.utils import timezone from website import maintenance from osf.models import MaintenanceState pytestmark = pytest.mark.django_db class TestMaintenance(unittest.TestCase): def tearDown(self): MaintenanceState.objects.all().delete() def test_set_maintenance_twice(self): assert not MaintenanceState.objects.exists() maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().count() == 1 maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().count() == 1 def test_set_maintenance_with_start_date(self): start = timezone.now() maintenance.set_maintenance(message='', start=start.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == start assert current_state.end == start + timedelta(1) def test_set_maintenance_with_end_date(self): end = timezone.now() maintenance.set_maintenance(message='', end=end.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == end - timedelta(1) assert current_state.end == end def test_set_maintenance_in_future(self): start = (timezone.now() + timedelta(1)) maintenance.set_maintenance(message='', start=start.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == start assert current_state.end == start + timedelta(1) def test_set_maintenance_level(self): maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().first().level == 1 maintenance.unset_maintenance() maintenance.set_maintenance(message='', level=3) assert MaintenanceState.objects.all().first().level == 3 maintenance.unset_maintenance() def test_unset_maintenance(self): maintenance.set_maintenance(message='') assert MaintenanceState.objects.exists() maintenance.unset_maintenance() assert not MaintenanceState.objects.exists()
adlius/osf.io
osf_tests/test_maintenance.py
osf/models/banner.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from include import IncludeManager from osf.models.base import BaseModel, ObjectIDMixin from osf.utils.workflows import DefaultStates, DefaultTriggers, ReviewStates, ReviewTriggers from osf.utils import permissions class BaseAction(ObjectIDMixin, BaseModel): class Meta: abstract = True objects = IncludeManager() creator = models.ForeignKey('OSFUser', related_name='+', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=DefaultTriggers.choices()) from_state = models.CharField(max_length=31, choices=DefaultStates.choices()) to_state = models.CharField(max_length=31, choices=DefaultStates.choices()) comment = models.TextField(blank=True) is_deleted = models.BooleanField(default=False) auto = models.BooleanField(default=False) @property def target(self): raise NotImplementedError() class ReviewAction(BaseAction): target = models.ForeignKey('Preprint', related_name='actions', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=ReviewTriggers.choices()) from_state = models.CharField(max_length=31, choices=ReviewStates.choices()) to_state = models.CharField(max_length=31, choices=ReviewStates.choices()) class NodeRequestAction(BaseAction): target = models.ForeignKey('NodeRequest', related_name='actions', on_delete=models.CASCADE) permissions = models.CharField( max_length=5, choices=[(permission, permission.title()) for permission in permissions.API_CONTRIBUTOR_PERMISSIONS], default=permissions.READ ) visible = models.BooleanField(default=True) class PreprintRequestAction(BaseAction): target = models.ForeignKey('PreprintRequest', related_name='actions', on_delete=models.CASCADE)
import unittest from datetime import timedelta import pytest from django.utils import timezone from website import maintenance from osf.models import MaintenanceState pytestmark = pytest.mark.django_db class TestMaintenance(unittest.TestCase): def tearDown(self): MaintenanceState.objects.all().delete() def test_set_maintenance_twice(self): assert not MaintenanceState.objects.exists() maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().count() == 1 maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().count() == 1 def test_set_maintenance_with_start_date(self): start = timezone.now() maintenance.set_maintenance(message='', start=start.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == start assert current_state.end == start + timedelta(1) def test_set_maintenance_with_end_date(self): end = timezone.now() maintenance.set_maintenance(message='', end=end.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == end - timedelta(1) assert current_state.end == end def test_set_maintenance_in_future(self): start = (timezone.now() + timedelta(1)) maintenance.set_maintenance(message='', start=start.isoformat()) current_state = MaintenanceState.objects.all().first() assert current_state.start == start assert current_state.end == start + timedelta(1) def test_set_maintenance_level(self): maintenance.set_maintenance(message='') assert MaintenanceState.objects.all().first().level == 1 maintenance.unset_maintenance() maintenance.set_maintenance(message='', level=3) assert MaintenanceState.objects.all().first().level == 3 maintenance.unset_maintenance() def test_unset_maintenance(self): maintenance.set_maintenance(message='') assert MaintenanceState.objects.exists() maintenance.unset_maintenance() assert not MaintenanceState.objects.exists()
adlius/osf.io
osf_tests/test_maintenance.py
osf/models/action.py
"""Support for Traccar.""" from aiohttp import web import voluptuous as vol from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER from homeassistant.const import CONF_WEBHOOK_ID, HTTP_OK, HTTP_UNPROCESSABLE_ENTITY from homeassistant.helpers import config_entry_flow import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from .const import ( ATTR_ACCURACY, ATTR_ALTITUDE, ATTR_BATTERY, ATTR_BEARING, ATTR_ID, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_SPEED, ATTR_TIMESTAMP, DOMAIN, ) TRACKER_UPDATE = f"{DOMAIN}_tracker_update" DEFAULT_ACCURACY = HTTP_OK DEFAULT_BATTERY = -1 def _id(value: str) -> str: """Coerce id by removing '-'.""" return value.replace("-", "") WEBHOOK_SCHEMA = vol.Schema( { vol.Required(ATTR_ID): vol.All(cv.string, _id), vol.Required(ATTR_LATITUDE): cv.latitude, vol.Required(ATTR_LONGITUDE): cv.longitude, vol.Optional(ATTR_ACCURACY, default=DEFAULT_ACCURACY): vol.Coerce(float), vol.Optional(ATTR_ALTITUDE): vol.Coerce(float), vol.Optional(ATTR_BATTERY, default=DEFAULT_BATTERY): vol.Coerce(float), vol.Optional(ATTR_BEARING): vol.Coerce(float), vol.Optional(ATTR_SPEED): vol.Coerce(float), vol.Optional(ATTR_TIMESTAMP): vol.Coerce(int), } ) async def async_setup(hass, hass_config): """Set up the Traccar component.""" hass.data[DOMAIN] = {"devices": set(), "unsub_device_tracker": {}} return True async def handle_webhook(hass, webhook_id, request): """Handle incoming webhook with Traccar request.""" try: data = WEBHOOK_SCHEMA(dict(request.query)) except vol.MultipleInvalid as error: return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY) attrs = { ATTR_ALTITUDE: data.get(ATTR_ALTITUDE), ATTR_BEARING: data.get(ATTR_BEARING), ATTR_SPEED: data.get(ATTR_SPEED), } device = data[ATTR_ID] async_dispatcher_send( hass, TRACKER_UPDATE, device, data[ATTR_LATITUDE], data[ATTR_LONGITUDE], data[ATTR_BATTERY], data[ATTR_ACCURACY], attrs, ) return web.Response(text=f"Setting location for {device}", status=HTTP_OK) async def async_setup_entry(hass, entry): """Configure based on config entry.""" hass.components.webhook.async_register( DOMAIN, "Traccar", entry.data[CONF_WEBHOOK_ID], handle_webhook ) hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, DEVICE_TRACKER) ) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID]) hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)() await hass.config_entries.async_forward_entry_unload(entry, DEVICE_TRACKER) return True async_remove_entry = config_entry_flow.webhook_async_remove_entry
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/traccar/__init__.py
"""Support for Tibber sensors.""" import asyncio from datetime import timedelta import logging from random import randrange import aiohttp from homeassistant.components.sensor import DEVICE_CLASS_POWER from homeassistant.const import POWER_WATT from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle, dt as dt_util from .const import DOMAIN as TIBBER_DOMAIN, MANUFACTURER _LOGGER = logging.getLogger(__name__) ICON = "mdi:currency-usd" SCAN_INTERVAL = timedelta(minutes=1) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5) PARALLEL_UPDATES = 0 async def async_setup_entry(hass, entry, async_add_entities): """Set up the Tibber sensor.""" tibber_connection = hass.data.get(TIBBER_DOMAIN) dev = [] for home in tibber_connection.get_homes(only_active=False): try: await home.update_info() except asyncio.TimeoutError as err: _LOGGER.error("Timeout connecting to Tibber home: %s ", err) raise PlatformNotReady() from err except aiohttp.ClientError as err: _LOGGER.error("Error connecting to Tibber home: %s ", err) raise PlatformNotReady() from err if home.has_active_subscription: dev.append(TibberSensorElPrice(home)) if home.has_real_time_consumption: dev.append(TibberSensorRT(home)) async_add_entities(dev, True) class TibberSensor(Entity): """Representation of a generic Tibber sensor.""" def __init__(self, tibber_home): """Initialize the sensor.""" self._tibber_home = tibber_home self._last_updated = None self._state = None self._is_available = False self._device_state_attributes = {} self._name = tibber_home.info["viewer"]["home"]["appNickname"] if self._name is None: self._name = tibber_home.info["viewer"]["home"]["address"].get( "address1", "" ) self._spread_load_constant = randrange(3600) @property def device_state_attributes(self): """Return the state attributes.""" return self._device_state_attributes @property def model(self): """Return the model of the sensor.""" return None @property def state(self): """Return the state of the device.""" return self._state @property def device_id(self): """Return the ID of the physical device this sensor is part of.""" home = self._tibber_home.info["viewer"]["home"] return home["meteringPointData"]["consumptionEan"] @property def device_info(self): """Return the device_info of the device.""" device_info = { "identifiers": {(TIBBER_DOMAIN, self.device_id)}, "name": self.name, "manufacturer": MANUFACTURER, } if self.model is not None: device_info["model"] = self.model return device_info class TibberSensorElPrice(TibberSensor): """Representation of a Tibber sensor for el price.""" async def async_update(self): """Get the latest data and updates the states.""" now = dt_util.now() if ( self._tibber_home.current_price_total and self._last_updated and self._last_updated.hour == now.hour and self._tibber_home.last_data_timestamp ): return if ( not self._tibber_home.last_data_timestamp or (self._tibber_home.last_data_timestamp - now).total_seconds() < 5 * 3600 + self._spread_load_constant or not self._is_available ): _LOGGER.debug("Asking for new data") await self._fetch_data() res = self._tibber_home.current_price_data() self._state, price_level, self._last_updated = res self._device_state_attributes["price_level"] = price_level attrs = self._tibber_home.current_attributes() self._device_state_attributes.update(attrs) self._is_available = self._state is not None @property def available(self): """Return True if entity is available.""" return self._is_available @property def name(self): """Return the name of the sensor.""" return f"Electricity price {self._name}" @property def model(self): """Return the model of the sensor.""" return "Price Sensor" @property def icon(self): """Return the icon to use in the frontend.""" return ICON @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return self._tibber_home.price_unit @property def unique_id(self): """Return a unique ID.""" return self.device_id @Throttle(MIN_TIME_BETWEEN_UPDATES) async def _fetch_data(self): _LOGGER.debug("Fetching data") try: await self._tibber_home.update_info_and_price_info() except (asyncio.TimeoutError, aiohttp.ClientError): return data = self._tibber_home.info["viewer"]["home"] self._device_state_attributes["app_nickname"] = data["appNickname"] self._device_state_attributes["grid_company"] = data["meteringPointData"][ "gridCompany" ] self._device_state_attributes["estimated_annual_consumption"] = data[ "meteringPointData" ]["estimatedAnnualConsumption"] class TibberSensorRT(TibberSensor): """Representation of a Tibber sensor for real time consumption.""" async def async_added_to_hass(self): """Start listen for real time data.""" await self._tibber_home.rt_subscribe(self.hass.loop, self._async_callback) async def _async_callback(self, payload): """Handle received data.""" errors = payload.get("errors") if errors: _LOGGER.error(errors[0]) return data = payload.get("data") if data is None: return live_measurement = data.get("liveMeasurement") if live_measurement is None: return self._state = live_measurement.pop("power", None) for key, value in live_measurement.items(): if value is None: continue self._device_state_attributes[key] = value self.async_write_ha_state() @property def available(self): """Return True if entity is available.""" return self._tibber_home.rt_subscription_running @property def model(self): """Return the model of the sensor.""" return "Tibber Pulse" @property def name(self): """Return the name of the sensor.""" return f"Real time consumption {self._name}" @property def should_poll(self): """Return the polling state.""" return False @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return POWER_WATT @property def unique_id(self): """Return a unique ID.""" return f"{self.device_id}_rt_consumption" @property def device_class(self): """Return the device class of the sensor.""" return DEVICE_CLASS_POWER
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/tibber/sensor.py
"""Config flow for IFTTT.""" from homeassistant.helpers import config_entry_flow from .const import DOMAIN config_entry_flow.register_webhook_flow( DOMAIN, "IFTTT Webhook", { "applet_url": "https://ifttt.com/maker_webhooks", "docs_url": "https://www.home-assistant.io/integrations/ifttt/", }, )
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/ifttt/config_flow.py
"""Adds config flow for Dune HD integration.""" import ipaddress import logging import re from pdunehd import DuneHDPlayer import voluptuous as vol from homeassistant import config_entries, exceptions from homeassistant.const import CONF_HOST from .const import DOMAIN # pylint:disable=unused-import _LOGGER = logging.getLogger(__name__) def host_valid(host): """Return True if hostname or IP address is valid.""" try: if ipaddress.ip_address(host).version == (4 or 6): return True except ValueError: if len(host) > 253: return False allowed = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in host.split(".")) class DuneHDConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Dune HD integration.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self): """Initialize.""" self.host = None async def init_device(self, host): """Initialize Dune HD player.""" player = DuneHDPlayer(host) state = await self.hass.async_add_executor_job(player.update_state) if not state: raise CannotConnect() async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: if host_valid(user_input[CONF_HOST]): self.host = user_input[CONF_HOST] try: if self.host_already_configured(self.host): raise AlreadyConfigured() await self.init_device(self.host) except CannotConnect: errors[CONF_HOST] = "cannot_connect" except AlreadyConfigured: errors[CONF_HOST] = "already_configured" else: return self.async_create_entry(title=self.host, data=user_input) else: errors[CONF_HOST] = "invalid_host" return self.async_show_form( step_id="user", data_schema=vol.Schema({vol.Required(CONF_HOST, default=""): str}), errors=errors, ) async def async_step_import(self, user_input=None): """Handle configuration by yaml file.""" self.host = user_input[CONF_HOST] if self.host_already_configured(self.host): return self.async_abort(reason="already_configured") try: await self.init_device(self.host) except CannotConnect: _LOGGER.error("Import aborted, cannot connect to %s", self.host) return self.async_abort(reason="cannot_connect") else: return self.async_create_entry(title=self.host, data=user_input) def host_already_configured(self, host): """See if we already have a dunehd entry matching user input configured.""" existing_hosts = { entry.data[CONF_HOST] for entry in self._async_current_entries() } return host in existing_hosts class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class AlreadyConfigured(exceptions.HomeAssistantError): """Error to indicate device is already configured."""
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/dunehd/config_flow.py
"""Support for the OpenWeatherMap (OWM) service.""" from .abstract_owm_sensor import AbstractOpenWeatherMapSensor from .const import ( ATTR_API_FORECAST, DOMAIN, ENTRY_NAME, ENTRY_WEATHER_COORDINATOR, FORECAST_MONITORED_CONDITIONS, FORECAST_SENSOR_TYPES, MONITORED_CONDITIONS, WEATHER_SENSOR_TYPES, ) from .weather_update_coordinator import WeatherUpdateCoordinator async def async_setup_entry(hass, config_entry, async_add_entities): """Set up OpenWeatherMap sensor entities based on a config entry.""" domain_data = hass.data[DOMAIN][config_entry.entry_id] name = domain_data[ENTRY_NAME] weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR] weather_sensor_types = WEATHER_SENSOR_TYPES forecast_sensor_types = FORECAST_SENSOR_TYPES entities = [] for sensor_type in MONITORED_CONDITIONS: unique_id = f"{config_entry.unique_id}-{sensor_type}" entities.append( OpenWeatherMapSensor( name, unique_id, sensor_type, weather_sensor_types[sensor_type], weather_coordinator, ) ) for sensor_type in FORECAST_MONITORED_CONDITIONS: unique_id = f"{config_entry.unique_id}-forecast-{sensor_type}" entities.append( OpenWeatherMapForecastSensor( f"{name} Forecast", unique_id, sensor_type, forecast_sensor_types[sensor_type], weather_coordinator, ) ) async_add_entities(entities) class OpenWeatherMapSensor(AbstractOpenWeatherMapSensor): """Implementation of an OpenWeatherMap sensor.""" def __init__( self, name, unique_id, sensor_type, sensor_configuration, weather_coordinator: WeatherUpdateCoordinator, ): """Initialize the sensor.""" super().__init__( name, unique_id, sensor_type, sensor_configuration, weather_coordinator ) self._weather_coordinator = weather_coordinator @property def state(self): """Return the state of the device.""" return self._weather_coordinator.data.get(self._sensor_type, None) class OpenWeatherMapForecastSensor(AbstractOpenWeatherMapSensor): """Implementation of an OpenWeatherMap this day forecast sensor.""" def __init__( self, name, unique_id, sensor_type, sensor_configuration, weather_coordinator: WeatherUpdateCoordinator, ): """Initialize the sensor.""" super().__init__( name, unique_id, sensor_type, sensor_configuration, weather_coordinator ) self._weather_coordinator = weather_coordinator @property def state(self): """Return the state of the device.""" forecasts = self._weather_coordinator.data.get(ATTR_API_FORECAST) if forecasts is not None and len(forecasts) > 0: return forecasts[0].get(self._sensor_type, None) return None
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/openweathermap/sensor.py
"""Adds a simulated sensor.""" from datetime import datetime import math from random import Random import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity import homeassistant.util.dt as dt_util CONF_AMP = "amplitude" CONF_FWHM = "spread" CONF_MEAN = "mean" CONF_PERIOD = "period" CONF_PHASE = "phase" CONF_SEED = "seed" CONF_UNIT = "unit" CONF_RELATIVE_TO_EPOCH = "relative_to_epoch" DEFAULT_AMP = 1 DEFAULT_FWHM = 0 DEFAULT_MEAN = 0 DEFAULT_NAME = "simulated" DEFAULT_PERIOD = 60 DEFAULT_PHASE = 0 DEFAULT_SEED = 999 DEFAULT_UNIT = "value" DEFAULT_RELATIVE_TO_EPOCH = True ICON = "mdi:chart-line" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_AMP, default=DEFAULT_AMP): vol.Coerce(float), vol.Optional(CONF_FWHM, default=DEFAULT_FWHM): vol.Coerce(float), vol.Optional(CONF_MEAN, default=DEFAULT_MEAN): vol.Coerce(float), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.positive_int, vol.Optional(CONF_PHASE, default=DEFAULT_PHASE): vol.Coerce(float), vol.Optional(CONF_SEED, default=DEFAULT_SEED): cv.positive_int, vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): cv.string, vol.Optional( CONF_RELATIVE_TO_EPOCH, default=DEFAULT_RELATIVE_TO_EPOCH ): cv.boolean, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the simulated sensor.""" name = config.get(CONF_NAME) unit = config.get(CONF_UNIT) amp = config.get(CONF_AMP) mean = config.get(CONF_MEAN) period = config.get(CONF_PERIOD) phase = config.get(CONF_PHASE) fwhm = config.get(CONF_FWHM) seed = config.get(CONF_SEED) relative_to_epoch = config.get(CONF_RELATIVE_TO_EPOCH) sensor = SimulatedSensor( name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch ) add_entities([sensor], True) class SimulatedSensor(Entity): """Class for simulated sensor.""" def __init__( self, name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch ): """Init the class.""" self._name = name self._unit = unit self._amp = amp self._mean = mean self._period = period self._phase = phase # phase in degrees self._fwhm = fwhm self._seed = seed self._random = Random(seed) # A local seeded Random self._start_time = ( datetime(1970, 1, 1, tzinfo=dt_util.UTC) if relative_to_epoch else dt_util.utcnow() ) self._relative_to_epoch = relative_to_epoch self._state = None def time_delta(self): """Return the time delta.""" dt0 = self._start_time dt1 = dt_util.utcnow() return dt1 - dt0 def signal_calc(self): """Calculate the signal.""" mean = self._mean amp = self._amp time_delta = self.time_delta().total_seconds() * 1e6 # to milliseconds period = self._period * 1e6 # to milliseconds fwhm = self._fwhm / 2 phase = math.radians(self._phase) if period == 0: periodic = 0 else: periodic = amp * (math.sin((2 * math.pi * time_delta / period) + phase)) noise = self._random.gauss(mu=0, sigma=fwhm) return round(mean + periodic + noise, 3) async def async_update(self): """Update the sensor.""" self._state = self.signal_calc() @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def icon(self): """Icon to use in the frontend, if any.""" return ICON @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" return self._unit @property def device_state_attributes(self): """Return other details about the sensor state.""" return { "amplitude": self._amp, "mean": self._mean, "period": self._period, "phase": self._phase, "spread": self._fwhm, "seed": self._seed, "relative_to_epoch": self._relative_to_epoch, }
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/simulated/sensor.py
"""Platform for Time of Flight sensor VL53L1X from STMicroelectronics.""" import asyncio from functools import partial from VL53L1X2 import VL53L1X # pylint: disable=import-error import voluptuous as vol from homeassistant.components import rpi_gpio from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, LENGTH_MILLIMETERS import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity CONF_I2C_ADDRESS = "i2c_address" CONF_I2C_BUS = "i2c_bus" CONF_XSHUT = "xshut" DEFAULT_NAME = "VL53L1X" DEFAULT_I2C_ADDRESS = 0x29 DEFAULT_I2C_BUS = 1 DEFAULT_XSHUT = 16 DEFAULT_RANGE = 2 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int), vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int), vol.Optional(CONF_XSHUT, default=DEFAULT_XSHUT): cv.positive_int, } ) def init_tof_0(xshut, sensor): """XSHUT port LOW resets the device.""" sensor.open() rpi_gpio.setup_output(xshut) rpi_gpio.write_output(xshut, 0) def init_tof_1(xshut): """XSHUT port HIGH enables the device.""" rpi_gpio.setup_output(xshut) rpi_gpio.write_output(xshut, 1) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Reset and initialize the VL53L1X ToF Sensor from STMicroelectronics.""" name = config.get(CONF_NAME) bus_number = config.get(CONF_I2C_BUS) i2c_address = config.get(CONF_I2C_ADDRESS) unit = LENGTH_MILLIMETERS xshut = config.get(CONF_XSHUT) sensor = await hass.async_add_executor_job(partial(VL53L1X, bus_number)) await hass.async_add_executor_job(init_tof_0, xshut, sensor) await asyncio.sleep(0.01) await hass.async_add_executor_job(init_tof_1, xshut) await asyncio.sleep(0.01) dev = [VL53L1XSensor(sensor, name, unit, i2c_address)] async_add_entities(dev, True) class VL53L1XSensor(Entity): """Implementation of VL53L1X sensor.""" def __init__(self, vl53l1x_sensor, name, unit, i2c_address): """Initialize the sensor.""" self._name = name self._unit_of_measurement = unit self.vl53l1x_sensor = vl53l1x_sensor self.i2c_address = i2c_address self._state = None self.init = True @property def name(self) -> str: """Return the name of the sensor.""" return self._name @property def state(self) -> int: """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self) -> str: """Return the unit of measurement.""" return self._unit_of_measurement def update(self): """Get the latest measurement and update state.""" if self.init: self.vl53l1x_sensor.add_sensor(self.i2c_address, self.i2c_address) self.init = False self.vl53l1x_sensor.start_ranging(self.i2c_address, DEFAULT_RANGE) self.vl53l1x_sensor.update(self.i2c_address) self.vl53l1x_sensor.stop_ranging(self.i2c_address) self._state = self.vl53l1x_sensor.distance
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/tof/sensor.py
"""Support for an exposed aREST RESTful API of a device.""" from datetime import timedelta import logging import requests import voluptuous as vol from homeassistant.components.binary_sensor import ( DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA, BinarySensorEntity, ) from homeassistant.const import ( CONF_DEVICE_CLASS, CONF_NAME, CONF_PIN, CONF_RESOURCE, HTTP_OK, ) import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_RESOURCE): cv.url, vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_PIN): cv.string, vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the aREST binary sensor.""" resource = config[CONF_RESOURCE] pin = config[CONF_PIN] device_class = config.get(CONF_DEVICE_CLASS) try: response = requests.get(resource, timeout=10).json() except requests.exceptions.MissingSchema: _LOGGER.error( "Missing resource or schema in configuration. Add http:// to your URL" ) return False except requests.exceptions.ConnectionError: _LOGGER.error("No route to device at %s", resource) return False arest = ArestData(resource, pin) add_entities( [ ArestBinarySensor( arest, resource, config.get(CONF_NAME, response[CONF_NAME]), device_class, pin, ) ], True, ) class ArestBinarySensor(BinarySensorEntity): """Implement an aREST binary sensor for a pin.""" def __init__(self, arest, resource, name, device_class, pin): """Initialize the aREST device.""" self.arest = arest self._resource = resource self._name = name self._device_class = device_class self._pin = pin if self._pin is not None: request = requests.get(f"{self._resource}/mode/{self._pin}/i", timeout=10) if request.status_code != HTTP_OK: _LOGGER.error("Can't set mode of %s", self._resource) @property def name(self): """Return the name of the binary sensor.""" return self._name @property def is_on(self): """Return true if the binary sensor is on.""" return bool(self.arest.data.get("state")) @property def device_class(self): """Return the class of this sensor.""" return self._device_class def update(self): """Get the latest data from aREST API.""" self.arest.update() class ArestData: """Class for handling the data retrieval for pins.""" def __init__(self, resource, pin): """Initialize the aREST data object.""" self._resource = resource self._pin = pin self.data = {} @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest data from aREST device.""" try: response = requests.get(f"{self._resource}/digital/{self._pin}", timeout=10) self.data = {"state": response.json()["return_value"]} except requests.exceptions.ConnectionError: _LOGGER.error("No route to device '%s'", self._resource)
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/arest/binary_sensor.py
"""Support for Netgear LTE binary sensors.""" from homeassistant.components.binary_sensor import DOMAIN, BinarySensorEntity from homeassistant.exceptions import PlatformNotReady from . import CONF_MONITORED_CONDITIONS, DATA_KEY, LTEEntity from .sensor_types import BINARY_SENSOR_CLASSES async def async_setup_platform(hass, config, async_add_entities, discovery_info): """Set up Netgear LTE binary sensor devices.""" if discovery_info is None: return modem_data = hass.data[DATA_KEY].get_modem_data(discovery_info) if not modem_data or not modem_data.data: raise PlatformNotReady binary_sensor_conf = discovery_info[DOMAIN] monitored_conditions = binary_sensor_conf[CONF_MONITORED_CONDITIONS] binary_sensors = [] for sensor_type in monitored_conditions: binary_sensors.append(LTEBinarySensor(modem_data, sensor_type)) async_add_entities(binary_sensors) class LTEBinarySensor(LTEEntity, BinarySensorEntity): """Netgear LTE binary sensor entity.""" @property def is_on(self): """Return true if the binary sensor is on.""" return getattr(self.modem_data.data, self.sensor_type) @property def device_class(self): """Return the class of binary sensor.""" return BINARY_SENSOR_CLASSES[self.sensor_type]
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/netgear_lte/binary_sensor.py
"""Support for powering relays in a DoorBird video doorbell.""" import datetime from homeassistant.components.switch import SwitchEntity from homeassistant.core import callback from homeassistant.helpers.event import async_track_point_in_utc_time import homeassistant.util.dt as dt_util from .const import DOMAIN, DOOR_STATION, DOOR_STATION_INFO from .entity import DoorBirdEntity IR_RELAY = "__ir_light__" async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the DoorBird switch platform.""" entities = [] config_entry_id = config_entry.entry_id doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION] doorstation_info = hass.data[DOMAIN][config_entry_id][DOOR_STATION_INFO] relays = doorstation_info["RELAYS"] relays.append(IR_RELAY) for relay in relays: switch = DoorBirdSwitch(doorstation, doorstation_info, relay) entities.append(switch) async_add_entities(entities) class DoorBirdSwitch(DoorBirdEntity, SwitchEntity): """A relay in a DoorBird device.""" def __init__(self, doorstation, doorstation_info, relay): """Initialize a relay in a DoorBird device.""" super().__init__(doorstation, doorstation_info) self._doorstation = doorstation self._relay = relay self._state = False if relay == IR_RELAY: self._time = datetime.timedelta(minutes=5) else: self._time = datetime.timedelta(seconds=5) self._unique_id = f"{self._mac_addr}_{self._relay}" self._reset_sub = None @property def unique_id(self): """Switch unique id.""" return self._unique_id @property def name(self): """Return the name of the switch.""" if self._relay == IR_RELAY: return f"{self._doorstation.name} IR" return f"{self._doorstation.name} Relay {self._relay}" @property def icon(self): """Return the icon to display.""" return "mdi:lightbulb" if self._relay == IR_RELAY else "mdi:dip-switch" @property def should_poll(self): """No need to poll.""" return False @property def is_on(self): """Get the assumed state of the relay.""" return self._state async def async_turn_on(self, **kwargs): """Power the relay.""" if self._reset_sub is not None: self._reset_sub() self._reset_sub = None self._reset_sub = async_track_point_in_utc_time( self.hass, self._async_turn_off, dt_util.utcnow() + self._time ) await self.hass.async_add_executor_job(self._turn_on) self.async_write_ha_state() def _turn_on(self): """Power the relay.""" if self._relay == IR_RELAY: self._state = self._doorstation.device.turn_light_on() else: self._state = self._doorstation.device.energize_relay(self._relay) async def async_turn_off(self, **kwargs): """Turn off the relays is not needed. They are time-based.""" raise NotImplementedError("DoorBird relays cannot be manually turned off.") @callback def _async_turn_off(self, *_): """Wait for the correct amount of assumed time to pass.""" self._state = False self._reset_sub = None self.async_write_ha_state()
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/doorbird/switch.py
"""Provides functionality to interact with climate devices.""" from abc import abstractmethod from datetime import timedelta import functools as ft import logging from typing import Any, Dict, List, Optional import voluptuous as vol from homeassistant.const import ( ATTR_TEMPERATURE, PRECISION_TENTHS, PRECISION_WHOLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, TEMP_CELSIUS, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import ( # noqa: F401 PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE, make_entity_service_schema, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.temperature import display_temp as show_temp from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType from homeassistant.util.temperature import convert as convert_temperature from .const import ( ATTR_AUX_HEAT, ATTR_CURRENT_HUMIDITY, ATTR_CURRENT_TEMPERATURE, ATTR_FAN_MODE, ATTR_FAN_MODES, ATTR_HUMIDITY, ATTR_HVAC_ACTION, ATTR_HVAC_MODE, ATTR_HVAC_MODES, ATTR_MAX_HUMIDITY, ATTR_MAX_TEMP, ATTR_MIN_HUMIDITY, ATTR_MIN_TEMP, ATTR_PRESET_MODE, ATTR_PRESET_MODES, ATTR_SWING_MODE, ATTR_SWING_MODES, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_STEP, DOMAIN, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, HVAC_MODES, SERVICE_SET_AUX_HEAT, SERVICE_SET_FAN_MODE, SERVICE_SET_HUMIDITY, SERVICE_SET_HVAC_MODE, SERVICE_SET_PRESET_MODE, SERVICE_SET_SWING_MODE, SERVICE_SET_TEMPERATURE, SUPPORT_AUX_HEAT, SUPPORT_FAN_MODE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_HUMIDITY, SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_RANGE, ) DEFAULT_MIN_TEMP = 7 DEFAULT_MAX_TEMP = 35 DEFAULT_MIN_HUMIDITY = 30 DEFAULT_MAX_HUMIDITY = 99 ENTITY_ID_FORMAT = DOMAIN + ".{}" SCAN_INTERVAL = timedelta(seconds=60) CONVERTIBLE_ATTRIBUTE = [ATTR_TEMPERATURE, ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH] _LOGGER = logging.getLogger(__name__) SET_TEMPERATURE_SCHEMA = vol.All( cv.has_at_least_one_key( ATTR_TEMPERATURE, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW ), make_entity_service_schema( { vol.Exclusive(ATTR_TEMPERATURE, "temperature"): vol.Coerce(float), vol.Inclusive(ATTR_TARGET_TEMP_HIGH, "temperature"): vol.Coerce(float), vol.Inclusive(ATTR_TARGET_TEMP_LOW, "temperature"): vol.Coerce(float), vol.Optional(ATTR_HVAC_MODE): vol.In(HVAC_MODES), } ), ) async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool: """Set up climate entities.""" component = hass.data[DOMAIN] = EntityComponent( _LOGGER, DOMAIN, hass, SCAN_INTERVAL ) await component.async_setup(config) component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on") component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off") component.async_register_entity_service( SERVICE_SET_HVAC_MODE, {vol.Required(ATTR_HVAC_MODE): vol.In(HVAC_MODES)}, "async_set_hvac_mode", ) component.async_register_entity_service( SERVICE_SET_PRESET_MODE, {vol.Required(ATTR_PRESET_MODE): cv.string}, "async_set_preset_mode", [SUPPORT_PRESET_MODE], ) component.async_register_entity_service( SERVICE_SET_AUX_HEAT, {vol.Required(ATTR_AUX_HEAT): cv.boolean}, async_service_aux_heat, [SUPPORT_AUX_HEAT], ) component.async_register_entity_service( SERVICE_SET_TEMPERATURE, SET_TEMPERATURE_SCHEMA, async_service_temperature_set, [SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_RANGE], ) component.async_register_entity_service( SERVICE_SET_HUMIDITY, {vol.Required(ATTR_HUMIDITY): vol.Coerce(float)}, "async_set_humidity", [SUPPORT_TARGET_HUMIDITY], ) component.async_register_entity_service( SERVICE_SET_FAN_MODE, {vol.Required(ATTR_FAN_MODE): cv.string}, "async_set_fan_mode", [SUPPORT_FAN_MODE], ) component.async_register_entity_service( SERVICE_SET_SWING_MODE, {vol.Required(ATTR_SWING_MODE): cv.string}, "async_set_swing_mode", [SUPPORT_SWING_MODE], ) return True async def async_setup_entry(hass: HomeAssistantType, entry): """Set up a config entry.""" return await hass.data[DOMAIN].async_setup_entry(entry) async def async_unload_entry(hass: HomeAssistantType, entry): """Unload a config entry.""" return await hass.data[DOMAIN].async_unload_entry(entry) class ClimateEntity(Entity): """Representation of a climate entity.""" @property def state(self) -> str: """Return the current state.""" return self.hvac_mode @property def precision(self) -> float: """Return the precision of the system.""" if self.hass.config.units.temperature_unit == TEMP_CELSIUS: return PRECISION_TENTHS return PRECISION_WHOLE @property def capability_attributes(self) -> Optional[Dict[str, Any]]: """Return the capability attributes.""" supported_features = self.supported_features data = { ATTR_HVAC_MODES: self.hvac_modes, ATTR_MIN_TEMP: show_temp( self.hass, self.min_temp, self.temperature_unit, self.precision ), ATTR_MAX_TEMP: show_temp( self.hass, self.max_temp, self.temperature_unit, self.precision ), } if self.target_temperature_step: data[ATTR_TARGET_TEMP_STEP] = self.target_temperature_step if supported_features & SUPPORT_TARGET_HUMIDITY: data[ATTR_MIN_HUMIDITY] = self.min_humidity data[ATTR_MAX_HUMIDITY] = self.max_humidity if supported_features & SUPPORT_FAN_MODE: data[ATTR_FAN_MODES] = self.fan_modes if supported_features & SUPPORT_PRESET_MODE: data[ATTR_PRESET_MODES] = self.preset_modes if supported_features & SUPPORT_SWING_MODE: data[ATTR_SWING_MODES] = self.swing_modes return data @property def state_attributes(self) -> Dict[str, Any]: """Return the optional state attributes.""" supported_features = self.supported_features data = { ATTR_CURRENT_TEMPERATURE: show_temp( self.hass, self.current_temperature, self.temperature_unit, self.precision, ), } if supported_features & SUPPORT_TARGET_TEMPERATURE: data[ATTR_TEMPERATURE] = show_temp( self.hass, self.target_temperature, self.temperature_unit, self.precision, ) if supported_features & SUPPORT_TARGET_TEMPERATURE_RANGE: data[ATTR_TARGET_TEMP_HIGH] = show_temp( self.hass, self.target_temperature_high, self.temperature_unit, self.precision, ) data[ATTR_TARGET_TEMP_LOW] = show_temp( self.hass, self.target_temperature_low, self.temperature_unit, self.precision, ) if self.current_humidity is not None: data[ATTR_CURRENT_HUMIDITY] = self.current_humidity if supported_features & SUPPORT_TARGET_HUMIDITY: data[ATTR_HUMIDITY] = self.target_humidity if supported_features & SUPPORT_FAN_MODE: data[ATTR_FAN_MODE] = self.fan_mode if self.hvac_action: data[ATTR_HVAC_ACTION] = self.hvac_action if supported_features & SUPPORT_PRESET_MODE: data[ATTR_PRESET_MODE] = self.preset_mode if supported_features & SUPPORT_SWING_MODE: data[ATTR_SWING_MODE] = self.swing_mode if supported_features & SUPPORT_AUX_HEAT: data[ATTR_AUX_HEAT] = STATE_ON if self.is_aux_heat else STATE_OFF return data @property def temperature_unit(self) -> str: """Return the unit of measurement used by the platform.""" raise NotImplementedError() @property def current_humidity(self) -> Optional[int]: """Return the current humidity.""" return None @property def target_humidity(self) -> Optional[int]: """Return the humidity we try to reach.""" return None @property @abstractmethod def hvac_mode(self) -> str: """Return hvac operation ie. heat, cool mode. Need to be one of HVAC_MODE_*. """ @property @abstractmethod def hvac_modes(self) -> List[str]: """Return the list of available hvac operation modes. Need to be a subset of HVAC_MODES. """ @property def hvac_action(self) -> Optional[str]: """Return the current running hvac operation if supported. Need to be one of CURRENT_HVAC_*. """ return None @property def current_temperature(self) -> Optional[float]: """Return the current temperature.""" return None @property def target_temperature(self) -> Optional[float]: """Return the temperature we try to reach.""" return None @property def target_temperature_step(self) -> Optional[float]: """Return the supported step of target temperature.""" return None @property def target_temperature_high(self) -> Optional[float]: """Return the highbound target temperature we try to reach. Requires SUPPORT_TARGET_TEMPERATURE_RANGE. """ raise NotImplementedError @property def target_temperature_low(self) -> Optional[float]: """Return the lowbound target temperature we try to reach. Requires SUPPORT_TARGET_TEMPERATURE_RANGE. """ raise NotImplementedError @property def preset_mode(self) -> Optional[str]: """Return the current preset mode, e.g., home, away, temp. Requires SUPPORT_PRESET_MODE. """ raise NotImplementedError @property def preset_modes(self) -> Optional[List[str]]: """Return a list of available preset modes. Requires SUPPORT_PRESET_MODE. """ raise NotImplementedError @property def is_aux_heat(self) -> Optional[bool]: """Return true if aux heater. Requires SUPPORT_AUX_HEAT. """ raise NotImplementedError @property def fan_mode(self) -> Optional[str]: """Return the fan setting. Requires SUPPORT_FAN_MODE. """ raise NotImplementedError @property def fan_modes(self) -> Optional[List[str]]: """Return the list of available fan modes. Requires SUPPORT_FAN_MODE. """ raise NotImplementedError @property def swing_mode(self) -> Optional[str]: """Return the swing setting. Requires SUPPORT_SWING_MODE. """ raise NotImplementedError @property def swing_modes(self) -> Optional[List[str]]: """Return the list of available swing modes. Requires SUPPORT_SWING_MODE. """ raise NotImplementedError def set_temperature(self, **kwargs) -> None: """Set new target temperature.""" raise NotImplementedError() async def async_set_temperature(self, **kwargs) -> None: """Set new target temperature.""" await self.hass.async_add_executor_job( ft.partial(self.set_temperature, **kwargs) ) def set_humidity(self, humidity: int) -> None: """Set new target humidity.""" raise NotImplementedError() async def async_set_humidity(self, humidity: int) -> None: """Set new target humidity.""" await self.hass.async_add_executor_job(self.set_humidity, humidity) def set_fan_mode(self, fan_mode: str) -> None: """Set new target fan mode.""" raise NotImplementedError() async def async_set_fan_mode(self, fan_mode: str) -> None: """Set new target fan mode.""" await self.hass.async_add_executor_job(self.set_fan_mode, fan_mode) def set_hvac_mode(self, hvac_mode: str) -> None: """Set new target hvac mode.""" raise NotImplementedError() async def async_set_hvac_mode(self, hvac_mode: str) -> None: """Set new target hvac mode.""" await self.hass.async_add_executor_job(self.set_hvac_mode, hvac_mode) def set_swing_mode(self, swing_mode: str) -> None: """Set new target swing operation.""" raise NotImplementedError() async def async_set_swing_mode(self, swing_mode: str) -> None: """Set new target swing operation.""" await self.hass.async_add_executor_job(self.set_swing_mode, swing_mode) def set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" raise NotImplementedError() async def async_set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode) def turn_aux_heat_on(self) -> None: """Turn auxiliary heater on.""" raise NotImplementedError() async def async_turn_aux_heat_on(self) -> None: """Turn auxiliary heater on.""" await self.hass.async_add_executor_job(self.turn_aux_heat_on) def turn_aux_heat_off(self) -> None: """Turn auxiliary heater off.""" raise NotImplementedError() async def async_turn_aux_heat_off(self) -> None: """Turn auxiliary heater off.""" await self.hass.async_add_executor_job(self.turn_aux_heat_off) async def async_turn_on(self) -> None: """Turn the entity on.""" if hasattr(self, "turn_on"): # pylint: disable=no-member await self.hass.async_add_executor_job(self.turn_on) return # Fake turn on for mode in (HVAC_MODE_HEAT_COOL, HVAC_MODE_HEAT, HVAC_MODE_COOL): if mode not in self.hvac_modes: continue await self.async_set_hvac_mode(mode) break async def async_turn_off(self) -> None: """Turn the entity off.""" if hasattr(self, "turn_off"): # pylint: disable=no-member await self.hass.async_add_executor_job(self.turn_off) return # Fake turn off if HVAC_MODE_OFF in self.hvac_modes: await self.async_set_hvac_mode(HVAC_MODE_OFF) @property def supported_features(self) -> int: """Return the list of supported features.""" raise NotImplementedError() @property def min_temp(self) -> float: """Return the minimum temperature.""" return convert_temperature( DEFAULT_MIN_TEMP, TEMP_CELSIUS, self.temperature_unit ) @property def max_temp(self) -> float: """Return the maximum temperature.""" return convert_temperature( DEFAULT_MAX_TEMP, TEMP_CELSIUS, self.temperature_unit ) @property def min_humidity(self) -> int: """Return the minimum humidity.""" return DEFAULT_MIN_HUMIDITY @property def max_humidity(self) -> int: """Return the maximum humidity.""" return DEFAULT_MAX_HUMIDITY async def async_service_aux_heat( entity: ClimateEntity, service: ServiceDataType ) -> None: """Handle aux heat service.""" if service.data[ATTR_AUX_HEAT]: await entity.async_turn_aux_heat_on() else: await entity.async_turn_aux_heat_off() async def async_service_temperature_set( entity: ClimateEntity, service: ServiceDataType ) -> None: """Handle set temperature service.""" hass = entity.hass kwargs = {} for value, temp in service.data.items(): if value in CONVERTIBLE_ATTRIBUTE: kwargs[value] = convert_temperature( temp, hass.config.units.temperature_unit, entity.temperature_unit ) else: kwargs[value] = temp await entity.async_set_temperature(**kwargs) class ClimateDevice(ClimateEntity): """Representation of a climate entity (for backwards compatibility).""" def __init_subclass__(cls, **kwargs): """Print deprecation warning.""" super().__init_subclass__(**kwargs) _LOGGER.warning( "ClimateDevice is deprecated, modify %s to extend ClimateEntity", cls.__name__, )
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/climate/__init__.py
"""Support for binary sensor using Orange Pi GPIO.""" from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity from . import edge_detect, read_input, setup_input, setup_mode from .const import CONF_INVERT_LOGIC, CONF_PIN_MODE, CONF_PORTS, PORT_SCHEMA PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(PORT_SCHEMA) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Orange Pi GPIO platform.""" binary_sensors = [] invert_logic = config[CONF_INVERT_LOGIC] pin_mode = config[CONF_PIN_MODE] ports = config[CONF_PORTS] setup_mode(pin_mode) for port_num, port_name in ports.items(): binary_sensors.append( OPiGPIOBinarySensor(hass, port_name, port_num, invert_logic) ) async_add_entities(binary_sensors) class OPiGPIOBinarySensor(BinarySensorEntity): """Represent a binary sensor that uses Orange Pi GPIO.""" def __init__(self, hass, name, port, invert_logic): """Initialize the Orange Pi binary sensor.""" self._name = name self._port = port self._invert_logic = invert_logic self._state = None async def async_added_to_hass(self): """Run when entity about to be added to hass.""" def gpio_edge_listener(port): """Update GPIO when edge change is detected.""" self.schedule_update_ha_state(True) def setup_entity(): setup_input(self._port) edge_detect(self._port, gpio_edge_listener) self.schedule_update_ha_state(True) await self.hass.async_add_executor_job(setup_entity) @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return the state of the entity.""" return self._state != self._invert_logic def update(self): """Update state with new GPIO data.""" self._state = read_input(self._port)
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/orangepi_gpio/binary_sensor.py
"""Support for Plum Lightpad lights.""" import asyncio from typing import Callable, List from plumlightpad import Plum from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.entity import Entity import homeassistant.util.color as color_util from .const import DOMAIN async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable[[List[Entity]], None], ) -> None: """Set up Plum Lightpad dimmer lights and glow rings.""" plum: Plum = hass.data[DOMAIN][entry.entry_id] def setup_entities(device) -> None: entities = [] if "lpid" in device: lightpad = plum.get_lightpad(device["lpid"]) entities.append(GlowRing(lightpad=lightpad)) if "llid" in device: logical_load = plum.get_load(device["llid"]) entities.append(PlumLight(load=logical_load)) if entities: async_add_entities(entities) async def new_load(device): setup_entities(device) async def new_lightpad(device): setup_entities(device) device_web_session = async_get_clientsession(hass, verify_ssl=False) asyncio.create_task( plum.discover( hass.loop, loadListener=new_load, lightpadListener=new_lightpad, websession=device_web_session, ) ) class PlumLight(LightEntity): """Representation of a Plum Lightpad dimmer.""" def __init__(self, load): """Initialize the light.""" self._load = load self._brightness = load.level async def async_added_to_hass(self): """Subscribe to dimmerchange events.""" self._load.add_event_listener("dimmerchange", self.dimmerchange) def dimmerchange(self, event): """Change event handler updating the brightness.""" self._brightness = event["level"] self.schedule_update_ha_state() @property def should_poll(self): """No polling needed.""" return False @property def unique_id(self): """Combine logical load ID with .light to guarantee it is unique.""" return f"{self._load.llid}.light" @property def name(self): """Return the name of the switch if any.""" return self._load.name @property def device_info(self): """Return the device info.""" return { "name": self.name, "identifiers": {(DOMAIN, self.unique_id)}, "model": "Dimmer", "manufacturer": "Plum", } @property def brightness(self) -> int: """Return the brightness of this switch between 0..255.""" return self._brightness @property def is_on(self) -> bool: """Return true if light is on.""" return self._brightness > 0 @property def supported_features(self): """Flag supported features.""" if self._load.dimmable: return SUPPORT_BRIGHTNESS return 0 async def async_turn_on(self, **kwargs): """Turn the light on.""" if ATTR_BRIGHTNESS in kwargs: await self._load.turn_on(kwargs[ATTR_BRIGHTNESS]) else: await self._load.turn_on() async def async_turn_off(self, **kwargs): """Turn the light off.""" await self._load.turn_off() class GlowRing(LightEntity): """Representation of a Plum Lightpad dimmer glow ring.""" def __init__(self, lightpad): """Initialize the light.""" self._lightpad = lightpad self._name = f"{lightpad.friendly_name} Glow Ring" self._state = lightpad.glow_enabled self._glow_intensity = lightpad.glow_intensity self._red = lightpad.glow_color["red"] self._green = lightpad.glow_color["green"] self._blue = lightpad.glow_color["blue"] async def async_added_to_hass(self): """Subscribe to configchange events.""" self._lightpad.add_event_listener("configchange", self.configchange_event) def configchange_event(self, event): """Handle Configuration change event.""" config = event["changes"] self._state = config["glowEnabled"] self._glow_intensity = config["glowIntensity"] self._red = config["glowColor"]["red"] self._green = config["glowColor"]["green"] self._blue = config["glowColor"]["blue"] self.schedule_update_ha_state() @property def hs_color(self): """Return the hue and saturation color value [float, float].""" return color_util.color_RGB_to_hs(self._red, self._green, self._blue) @property def should_poll(self): """No polling needed.""" return False @property def unique_id(self): """Combine LightPad ID with .glow to guarantee it is unique.""" return f"{self._lightpad.lpid}.glow" @property def name(self): """Return the name of the switch if any.""" return self._name @property def device_info(self): """Return the device info.""" return { "name": self.name, "identifiers": {(DOMAIN, self.unique_id)}, "model": "Glow Ring", "manufacturer": "Plum", } @property def brightness(self) -> int: """Return the brightness of this switch between 0..255.""" return min(max(int(round(self._glow_intensity * 255, 0)), 0), 255) @property def glow_intensity(self): """Brightness in float form.""" return self._glow_intensity @property def is_on(self) -> bool: """Return true if light is on.""" return self._state @property def icon(self): """Return the crop-portrait icon representing the glow ring.""" return "mdi:crop-portrait" @property def supported_features(self): """Flag supported features.""" return SUPPORT_BRIGHTNESS | SUPPORT_COLOR async def async_turn_on(self, **kwargs): """Turn the light on.""" if ATTR_BRIGHTNESS in kwargs: brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0 await self._lightpad.set_config({"glowIntensity": brightness_pct}) elif ATTR_HS_COLOR in kwargs: hs_color = kwargs[ATTR_HS_COLOR] red, green, blue = color_util.color_hs_to_RGB(*hs_color) await self._lightpad.set_glow_color(red, green, blue, 0) else: await self._lightpad.set_config({"glowEnabled": True}) async def async_turn_off(self, **kwargs): """Turn the light off.""" if ATTR_BRIGHTNESS in kwargs: brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0 await self._lightpad.set_config({"glowIntensity": brightness_pct}) else: await self._lightpad.set_config({"glowEnabled": False})
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/plum_lightpad/light.py
"""Zwave util methods.""" import asyncio import logging import homeassistant.util.dt as dt_util from . import const _LOGGER = logging.getLogger(__name__) def check_node_schema(node, schema): """Check if node matches the passed node schema.""" if const.DISC_NODE_ID in schema and node.node_id not in schema[const.DISC_NODE_ID]: _LOGGER.debug( "node.node_id %s not in node_id %s", node.node_id, schema[const.DISC_NODE_ID], ) return False if ( const.DISC_GENERIC_DEVICE_CLASS in schema and node.generic not in schema[const.DISC_GENERIC_DEVICE_CLASS] ): _LOGGER.debug( "node.generic %s not in generic_device_class %s", node.generic, schema[const.DISC_GENERIC_DEVICE_CLASS], ) return False if ( const.DISC_SPECIFIC_DEVICE_CLASS in schema and node.specific not in schema[const.DISC_SPECIFIC_DEVICE_CLASS] ): _LOGGER.debug( "node.specific %s not in specific_device_class %s", node.specific, schema[const.DISC_SPECIFIC_DEVICE_CLASS], ) return False return True def check_value_schema(value, schema): """Check if the value matches the passed value schema.""" if ( const.DISC_COMMAND_CLASS in schema and value.command_class not in schema[const.DISC_COMMAND_CLASS] ): _LOGGER.debug( "value.command_class %s not in command_class %s", value.command_class, schema[const.DISC_COMMAND_CLASS], ) return False if const.DISC_TYPE in schema and value.type not in schema[const.DISC_TYPE]: _LOGGER.debug( "value.type %s not in type %s", value.type, schema[const.DISC_TYPE] ) return False if const.DISC_GENRE in schema and value.genre not in schema[const.DISC_GENRE]: _LOGGER.debug( "value.genre %s not in genre %s", value.genre, schema[const.DISC_GENRE] ) return False if const.DISC_INDEX in schema and value.index not in schema[const.DISC_INDEX]: _LOGGER.debug( "value.index %s not in index %s", value.index, schema[const.DISC_INDEX] ) return False if ( const.DISC_INSTANCE in schema and value.instance not in schema[const.DISC_INSTANCE] ): _LOGGER.debug( "value.instance %s not in instance %s", value.instance, schema[const.DISC_INSTANCE], ) return False if const.DISC_SCHEMAS in schema: found = False for schema_item in schema[const.DISC_SCHEMAS]: found = found or check_value_schema(value, schema_item) if not found: return False return True def node_name(node): """Return the name of the node.""" if is_node_parsed(node): return node.name or f"{node.manufacturer_name} {node.product_name}" return f"Unknown Node {node.node_id}" def node_device_id_and_name(node, instance=1): """Return the name and device ID for the value with the given index.""" name = node_name(node) if instance == 1: return ((const.DOMAIN, node.node_id), name) name = f"{name} ({instance})" return ((const.DOMAIN, node.node_id, instance), name) async def check_has_unique_id(entity, ready_callback, timeout_callback): """Wait for entity to have unique_id.""" start_time = dt_util.utcnow() while True: waited = int((dt_util.utcnow() - start_time).total_seconds()) if entity.unique_id: ready_callback(waited) return if waited >= const.NODE_READY_WAIT_SECS: # Wait up to NODE_READY_WAIT_SECS seconds for unique_id to appear. timeout_callback(waited) return await asyncio.sleep(1) def is_node_parsed(node): """Check whether the node has been parsed or still waiting to be parsed.""" return bool((node.manufacturer_name and node.product_name) or node.name)
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/zwave/util.py
"""Support for Logitech UE Smart Radios.""" import logging import requests import voluptuous as vol from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity from homeassistant.components.media_player.const import ( MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, ) from homeassistant.const import ( CONF_PASSWORD, CONF_USERNAME, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) ICON = "mdi:radio" URL = "http://decibel.logitechmusic.com/jsonrpc.js" SUPPORT_UE_SMART_RADIO = ( SUPPORT_PLAY | SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE ) PLAYBACK_DICT = {"play": STATE_PLAYING, "pause": STATE_PAUSED, "stop": STATE_IDLE} PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string} ) def send_request(payload, session): """Send request to radio.""" try: request = requests.post( URL, cookies={"sdi_squeezenetwork_session": session}, json=payload, timeout=5, ) except requests.exceptions.Timeout: _LOGGER.error("Timed out when sending request") except requests.exceptions.ConnectionError: _LOGGER.error("An error occurred while connecting") else: return request.json() def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Logitech UE Smart Radio platform.""" email = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) session_request = requests.post( "https://www.uesmartradio.com/user/login", data={"email": email, "password": password}, timeout=5, ) session = session_request.cookies["sdi_squeezenetwork_session"] player_request = send_request({"params": ["", ["serverstatus"]]}, session) players = [ UERadioDevice(session, player["playerid"], player["name"]) for player in player_request["result"]["players_loop"] ] add_entities(players) class UERadioDevice(MediaPlayerEntity): """Representation of a Logitech UE Smart Radio device.""" def __init__(self, session, player_id, player_name): """Initialize the Logitech UE Smart Radio device.""" self._session = session self._player_id = player_id self._name = player_name self._state = None self._volume = 0 self._last_volume = 0 self._media_title = None self._media_artist = None self._media_artwork_url = None def send_command(self, command): """Send command to radio.""" send_request( {"method": "slim.request", "params": [self._player_id, command]}, self._session, ) def update(self): """Get the latest details from the device.""" request = send_request( { "method": "slim.request", "params": [ self._player_id, ["status", "-", 1, "tags:cgABbehldiqtyrSuoKLN"], ], }, self._session, ) if request["error"] is not None: self._state = None return if request["result"]["power"] == 0: self._state = STATE_OFF else: self._state = PLAYBACK_DICT[request["result"]["mode"]] media_info = request["result"]["playlist_loop"][0] self._volume = request["result"]["mixer volume"] / 100 self._media_artwork_url = media_info["artwork_url"] self._media_title = media_info["title"] if "artist" in media_info: self._media_artist = media_info["artist"] else: self._media_artist = media_info.get("remote_title") @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._state @property def icon(self): """Return the icon to use in the frontend, if any.""" return ICON @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._volume <= 0 @property def volume_level(self): """Volume level of the media player (0..1).""" return self._volume @property def supported_features(self): """Flag of features that are supported.""" return SUPPORT_UE_SMART_RADIO @property def media_content_type(self): """Return the media content type.""" return MEDIA_TYPE_MUSIC @property def media_image_url(self): """Image URL of current playing media.""" return self._media_artwork_url @property def media_artist(self): """Artist of current playing media, music track only.""" return self._media_artist @property def media_title(self): """Title of current playing media.""" return self._media_title def turn_on(self): """Turn on specified media player or all.""" self.send_command(["power", 1]) def turn_off(self): """Turn off specified media player or all.""" self.send_command(["power", 0]) def media_play(self): """Send the media player the command for play/pause.""" self.send_command(["play"]) def media_pause(self): """Send the media player the command for pause.""" self.send_command(["pause"]) def media_stop(self): """Send the media player the stop command.""" self.send_command(["stop"]) def media_previous_track(self): """Send the media player the command for prev track.""" self.send_command(["button", "rew"]) def media_next_track(self): """Send the media player the command for next track.""" self.send_command(["button", "fwd"]) def mute_volume(self, mute): """Send mute command.""" if mute: self._last_volume = self._volume self.send_command(["mixer", "volume", 0]) else: self.send_command(["mixer", "volume", self._last_volume * 100]) def set_volume_level(self, volume): """Set volume level, range 0..1.""" self.send_command(["mixer", "volume", volume * 100])
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/ue_smart_radio/media_player.py
"""Support to enter a value into a text box.""" import logging import typing import voluptuous as vol from homeassistant.const import ( ATTR_EDITABLE, ATTR_MODE, CONF_ICON, CONF_ID, CONF_MODE, CONF_NAME, CONF_UNIT_OF_MEASUREMENT, SERVICE_RELOAD, ) from homeassistant.core import callback from homeassistant.helpers import collection import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity import homeassistant.helpers.service from homeassistant.helpers.storage import Store from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType _LOGGER = logging.getLogger(__name__) DOMAIN = "input_text" CONF_INITIAL = "initial" CONF_MIN = "min" CONF_MIN_VALUE = 0 CONF_MAX = "max" CONF_MAX_VALUE = 100 CONF_PATTERN = "pattern" CONF_VALUE = "value" MODE_TEXT = "text" MODE_PASSWORD = "password" ATTR_VALUE = CONF_VALUE ATTR_MIN = "min" ATTR_MAX = "max" ATTR_PATTERN = CONF_PATTERN SERVICE_SET_VALUE = "set_value" STORAGE_KEY = DOMAIN STORAGE_VERSION = 1 CREATE_FIELDS = { vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)), vol.Optional(CONF_MIN, default=CONF_MIN_VALUE): vol.Coerce(int), vol.Optional(CONF_MAX, default=CONF_MAX_VALUE): vol.Coerce(int), vol.Optional(CONF_INITIAL, ""): cv.string, vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_PATTERN): cv.string, vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In([MODE_TEXT, MODE_PASSWORD]), } UPDATE_FIELDS = { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_MIN): vol.Coerce(int), vol.Optional(CONF_MAX): vol.Coerce(int), vol.Optional(CONF_INITIAL): cv.string, vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_PATTERN): cv.string, vol.Optional(CONF_MODE): vol.In([MODE_TEXT, MODE_PASSWORD]), } def _cv_input_text(cfg): """Configure validation helper for input box (voluptuous).""" minimum = cfg.get(CONF_MIN) maximum = cfg.get(CONF_MAX) if minimum > maximum: raise vol.Invalid( f"Max len ({minimum}) is not greater than min len ({maximum})" ) state = cfg.get(CONF_INITIAL) if state is not None and (len(state) < minimum or len(state) > maximum): raise vol.Invalid( f"Initial value {state} length not in range {minimum}-{maximum}" ) return cfg CONFIG_SCHEMA = vol.Schema( { DOMAIN: cv.schema_with_slug_keys( vol.All( lambda value: value or {}, { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_MIN, default=CONF_MIN_VALUE): vol.Coerce(int), vol.Optional(CONF_MAX, default=CONF_MAX_VALUE): vol.Coerce(int), vol.Optional(CONF_INITIAL, ""): cv.string, vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_PATTERN): cv.string, vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In( [MODE_TEXT, MODE_PASSWORD] ), }, _cv_input_text, ), ) }, extra=vol.ALLOW_EXTRA, ) RELOAD_SERVICE_SCHEMA = vol.Schema({}) async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool: """Set up an input text.""" component = EntityComponent(_LOGGER, DOMAIN, hass) id_manager = collection.IDManager() yaml_collection = collection.YamlCollection( logging.getLogger(f"{__name__}.yaml_collection"), id_manager ) collection.attach_entity_component_collection( component, yaml_collection, InputText.from_yaml ) storage_collection = InputTextStorageCollection( Store(hass, STORAGE_VERSION, STORAGE_KEY), logging.getLogger(f"{__name__}.storage_collection"), id_manager, ) collection.attach_entity_component_collection( component, storage_collection, InputText ) await yaml_collection.async_load( [{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()] ) await storage_collection.async_load() collection.StorageCollectionWebsocket( storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS ).async_setup(hass) collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection) collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection) async def reload_service_handler(service_call: ServiceCallType) -> None: """Reload yaml entities.""" conf = await component.async_prepare_reload(skip_reset=True) if conf is None: conf = {DOMAIN: {}} await yaml_collection.async_load( [{CONF_ID: id_, **(cfg or {})} for id_, cfg in conf.get(DOMAIN, {}).items()] ) homeassistant.helpers.service.async_register_admin_service( hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=RELOAD_SERVICE_SCHEMA, ) component.async_register_entity_service( SERVICE_SET_VALUE, {vol.Required(ATTR_VALUE): cv.string}, "async_set_value" ) return True class InputTextStorageCollection(collection.StorageCollection): """Input storage based collection.""" CREATE_SCHEMA = vol.Schema(vol.All(CREATE_FIELDS, _cv_input_text)) UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS) async def _process_create_data(self, data: typing.Dict) -> typing.Dict: """Validate the config is valid.""" return self.CREATE_SCHEMA(data) @callback def _get_suggested_id(self, info: typing.Dict) -> str: """Suggest an ID based on the config.""" return info[CONF_NAME] async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict: """Return a new updated data object.""" update_data = self.UPDATE_SCHEMA(update_data) return _cv_input_text({**data, **update_data}) class InputText(RestoreEntity): """Represent a text box.""" def __init__(self, config: typing.Dict): """Initialize a text input.""" self._config = config self.editable = True self._current_value = config.get(CONF_INITIAL) @classmethod def from_yaml(cls, config: typing.Dict) -> "InputText": """Return entity instance initialized from yaml storage.""" input_text = cls(config) input_text.entity_id = f"{DOMAIN}.{config[CONF_ID]}" input_text.editable = False return input_text @property def should_poll(self): """If entity should be polled.""" return False @property def name(self): """Return the name of the text input entity.""" return self._config.get(CONF_NAME) @property def icon(self): """Return the icon to be used for this entity.""" return self._config.get(CONF_ICON) @property def _maximum(self) -> int: """Return max len of the text.""" return self._config[CONF_MAX] @property def _minimum(self) -> int: """Return min len of the text.""" return self._config[CONF_MIN] @property def state(self): """Return the state of the component.""" return self._current_value @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._config.get(CONF_UNIT_OF_MEASUREMENT) @property def unique_id(self) -> typing.Optional[str]: """Return unique id for the entity.""" return self._config[CONF_ID] @property def state_attributes(self): """Return the state attributes.""" return { ATTR_EDITABLE: self.editable, ATTR_MIN: self._minimum, ATTR_MAX: self._maximum, ATTR_PATTERN: self._config.get(CONF_PATTERN), ATTR_MODE: self._config[CONF_MODE], } async def async_added_to_hass(self): """Run when entity about to be added to hass.""" await super().async_added_to_hass() if self._current_value is not None: return state = await self.async_get_last_state() value = state and state.state # Check against None because value can be 0 if value is not None and self._minimum <= len(value) <= self._maximum: self._current_value = value async def async_set_value(self, value): """Select new value.""" if len(value) < self._minimum or len(value) > self._maximum: _LOGGER.warning( "Invalid value: %s (length range %s - %s)", value, self._minimum, self._maximum, ) return self._current_value = value self.async_write_ha_state() async def async_update_config(self, config: typing.Dict) -> None: """Handle when the config is updated.""" self._config = config self.async_write_ha_state()
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/input_text/__init__.py
"""Support for binary sensor using Beaglebone Black GPIO.""" import voluptuous as vol from homeassistant.components import bbb_gpio from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME import homeassistant.helpers.config_validation as cv CONF_PINS = "pins" CONF_BOUNCETIME = "bouncetime" CONF_INVERT_LOGIC = "invert_logic" CONF_PULL_MODE = "pull_mode" DEFAULT_BOUNCETIME = 50 DEFAULT_INVERT_LOGIC = False DEFAULT_PULL_MODE = "UP" PIN_SCHEMA = vol.Schema( { vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_BOUNCETIME, default=DEFAULT_BOUNCETIME): cv.positive_int, vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean, vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): vol.In(["UP", "DOWN"]), } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_PINS, default={}): vol.Schema({cv.string: PIN_SCHEMA})} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Beaglebone Black GPIO devices.""" pins = config[CONF_PINS] binary_sensors = [] for pin, params in pins.items(): binary_sensors.append(BBBGPIOBinarySensor(pin, params)) add_entities(binary_sensors) class BBBGPIOBinarySensor(BinarySensorEntity): """Representation of a binary sensor that uses Beaglebone Black GPIO.""" def __init__(self, pin, params): """Initialize the Beaglebone Black binary sensor.""" self._pin = pin self._name = params[CONF_NAME] or DEVICE_DEFAULT_NAME self._bouncetime = params[CONF_BOUNCETIME] self._pull_mode = params[CONF_PULL_MODE] self._invert_logic = params[CONF_INVERT_LOGIC] bbb_gpio.setup_input(self._pin, self._pull_mode) self._state = bbb_gpio.read_input(self._pin) def read_gpio(pin): """Read state from GPIO.""" self._state = bbb_gpio.read_input(self._pin) self.schedule_update_ha_state() bbb_gpio.edge_detect(self._pin, read_gpio, self._bouncetime) @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return the state of the entity.""" return self._state != self._invert_logic
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/bbb_gpio/binary_sensor.py
"""Media Source models.""" from abc import ABC from dataclasses import dataclass from typing import List, Optional, Tuple from homeassistant.components.media_player import BrowseMedia from homeassistant.components.media_player.const import ( MEDIA_CLASS_CHANNEL, MEDIA_CLASS_DIRECTORY, MEDIA_TYPE_CHANNEL, MEDIA_TYPE_CHANNELS, ) from homeassistant.core import HomeAssistant, callback from .const import DOMAIN, URI_SCHEME, URI_SCHEME_REGEX @dataclass class PlayMedia: """Represents a playable media.""" url: str mime_type: str class BrowseMediaSource(BrowseMedia): """Represent a browsable media file.""" children: Optional[List["BrowseMediaSource"]] def __init__(self, *, domain: Optional[str], identifier: Optional[str], **kwargs): """Initialize media source browse media.""" media_content_id = f"{URI_SCHEME}{domain or ''}" if identifier: media_content_id += f"/{identifier}" super().__init__(media_content_id=media_content_id, **kwargs) self.domain = domain self.identifier = identifier @dataclass class MediaSourceItem: """A parsed media item.""" hass: HomeAssistant domain: Optional[str] identifier: str async def async_browse(self) -> BrowseMediaSource: """Browse this item.""" if self.domain is None: base = BrowseMediaSource( domain=None, identifier=None, media_class=MEDIA_CLASS_DIRECTORY, media_content_type=MEDIA_TYPE_CHANNELS, title="Media Sources", can_play=False, can_expand=True, children_media_class=MEDIA_CLASS_CHANNEL, ) base.children = [ BrowseMediaSource( domain=source.domain, identifier=None, media_class=MEDIA_CLASS_CHANNEL, media_content_type=MEDIA_TYPE_CHANNEL, title=source.name, can_play=False, can_expand=True, ) for source in self.hass.data[DOMAIN].values() ] return base return await self.async_media_source().async_browse_media(self) async def async_resolve(self) -> PlayMedia: """Resolve to playable item.""" return await self.async_media_source().async_resolve_media(self) @callback def async_media_source(self) -> "MediaSource": """Return media source that owns this item.""" return self.hass.data[DOMAIN][self.domain] @classmethod def from_uri(cls, hass: HomeAssistant, uri: str) -> "MediaSourceItem": """Create an item from a uri.""" match = URI_SCHEME_REGEX.match(uri) if not match: raise ValueError("Invalid media source URI") domain = match.group("domain") identifier = match.group("identifier") return cls(hass, domain, identifier) class MediaSource(ABC): """Represents a source of media files.""" name: str = None def __init__(self, domain: str): """Initialize a media source.""" self.domain = domain if not self.name: self.name = domain async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia: """Resolve a media item to a playable item.""" raise NotImplementedError async def async_browse_media( self, item: MediaSourceItem, media_types: Tuple[str] ) -> BrowseMediaSource: """Browse media.""" raise NotImplementedError
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/media_source/models.py
"""Climate platform for Advantage Air integration.""" from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( FAN_AUTO, FAN_HIGH, FAN_LOW, FAN_MEDIUM, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_FAN_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS from .const import ( ADVANTAGE_AIR_STATE_CLOSE, ADVANTAGE_AIR_STATE_OFF, ADVANTAGE_AIR_STATE_ON, ADVANTAGE_AIR_STATE_OPEN, DOMAIN as ADVANTAGE_AIR_DOMAIN, ) from .entity import AdvantageAirEntity ADVANTAGE_AIR_HVAC_MODES = { "heat": HVAC_MODE_HEAT, "cool": HVAC_MODE_COOL, "vent": HVAC_MODE_FAN_ONLY, "dry": HVAC_MODE_DRY, } HASS_HVAC_MODES = {v: k for k, v in ADVANTAGE_AIR_HVAC_MODES.items()} ADVANTAGE_AIR_FAN_MODES = { "auto": FAN_AUTO, "low": FAN_LOW, "medium": FAN_MEDIUM, "high": FAN_HIGH, } HASS_FAN_MODES = {v: k for k, v in ADVANTAGE_AIR_FAN_MODES.items()} FAN_SPEEDS = {FAN_LOW: 30, FAN_MEDIUM: 60, FAN_HIGH: 100} AC_HVAC_MODES = [ HVAC_MODE_OFF, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_FAN_ONLY, HVAC_MODE_DRY, ] ZONE_HVAC_MODES = [HVAC_MODE_OFF, HVAC_MODE_FAN_ONLY] PARALLEL_UPDATES = 0 async def async_setup_entry(hass, config_entry, async_add_entities): """Set up AdvantageAir climate platform.""" instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id] entities = [] for ac_key, ac_device in instance["coordinator"].data["aircons"].items(): entities.append(AdvantageAirAC(instance, ac_key)) for zone_key, zone in ac_device["zones"].items(): # Only add zone climate control when zone is in temperature control if zone["type"] != 0: entities.append(AdvantageAirZone(instance, ac_key, zone_key)) async_add_entities(entities) class AdvantageAirClimateEntity(AdvantageAirEntity, ClimateEntity): """AdvantageAir Climate class.""" @property def temperature_unit(self): """Return the temperature unit.""" return TEMP_CELSIUS @property def target_temperature_step(self): """Return the supported temperature step.""" return PRECISION_WHOLE @property def max_temp(self): """Return the maximum supported temperature.""" return 32 @property def min_temp(self): """Return the minimum supported temperature.""" return 16 class AdvantageAirAC(AdvantageAirClimateEntity): """AdvantageAir AC unit.""" @property def name(self): """Return the name.""" return self._ac["name"] @property def unique_id(self): """Return a unique id.""" return f'{self.coordinator.data["system"]["rid"]}-{self.ac_key}' @property def target_temperature(self): """Return the current target temperature.""" return self._ac["setTemp"] @property def hvac_mode(self): """Return the current HVAC modes.""" if self._ac["state"] == ADVANTAGE_AIR_STATE_ON: return ADVANTAGE_AIR_HVAC_MODES.get(self._ac["mode"]) return HVAC_MODE_OFF @property def hvac_modes(self): """Return the supported HVAC modes.""" return AC_HVAC_MODES @property def fan_mode(self): """Return the current fan modes.""" return ADVANTAGE_AIR_FAN_MODES.get(self._ac["fan"]) @property def fan_modes(self): """Return the supported fan modes.""" return [FAN_AUTO, FAN_LOW, FAN_MEDIUM, FAN_HIGH] @property def supported_features(self): """Return the supported features.""" return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE async def async_set_hvac_mode(self, hvac_mode): """Set the HVAC Mode and State.""" if hvac_mode == HVAC_MODE_OFF: await self.async_change( {self.ac_key: {"info": {"state": ADVANTAGE_AIR_STATE_OFF}}} ) else: await self.async_change( { self.ac_key: { "info": { "state": ADVANTAGE_AIR_STATE_ON, "mode": HASS_HVAC_MODES.get(hvac_mode), } } } ) async def async_set_fan_mode(self, fan_mode): """Set the Fan Mode.""" await self.async_change( {self.ac_key: {"info": {"fan": HASS_FAN_MODES.get(fan_mode)}}} ) async def async_set_temperature(self, **kwargs): """Set the Temperature.""" temp = kwargs.get(ATTR_TEMPERATURE) await self.async_change({self.ac_key: {"info": {"setTemp": temp}}}) class AdvantageAirZone(AdvantageAirClimateEntity): """AdvantageAir Zone control.""" @property def name(self): """Return the name.""" return self._zone["name"] @property def unique_id(self): """Return a unique id.""" return f'{self.coordinator.data["system"]["rid"]}-{self.ac_key}-{self.zone_key}' @property def current_temperature(self): """Return the current temperature.""" return self._zone["measuredTemp"] @property def target_temperature(self): """Return the target temperature.""" return self._zone["setTemp"] @property def hvac_mode(self): """Return the current HVAC modes.""" if self._zone["state"] == ADVANTAGE_AIR_STATE_OPEN: return HVAC_MODE_FAN_ONLY return HVAC_MODE_OFF @property def hvac_modes(self): """Return supported HVAC modes.""" return ZONE_HVAC_MODES @property def supported_features(self): """Return the supported features.""" return SUPPORT_TARGET_TEMPERATURE async def async_set_hvac_mode(self, hvac_mode): """Set the HVAC Mode and State.""" if hvac_mode == HVAC_MODE_OFF: await self.async_change( { self.ac_key: { "zones": {self.zone_key: {"state": ADVANTAGE_AIR_STATE_CLOSE}} } } ) else: await self.async_change( { self.ac_key: { "zones": {self.zone_key: {"state": ADVANTAGE_AIR_STATE_OPEN}} } } ) async def async_set_temperature(self, **kwargs): """Set the Temperature.""" temp = kwargs.get(ATTR_TEMPERATURE) await self.async_change( {self.ac_key: {"zones": {self.zone_key: {"setTemp": temp}}}} )
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/advantage_air/climate.py
"""Provides the Canary DataUpdateCoordinator.""" from datetime import timedelta import logging from async_timeout import timeout from canary.api import Api from requests import ConnectTimeout, HTTPError from homeassistant.helpers.typing import HomeAssistantType from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import DOMAIN _LOGGER = logging.getLogger(__name__) class CanaryDataUpdateCoordinator(DataUpdateCoordinator): """Class to manage fetching Canary data.""" def __init__(self, hass: HomeAssistantType, *, api: Api): """Initialize global Canary data updater.""" self.canary = api update_interval = timedelta(seconds=30) super().__init__( hass, _LOGGER, name=DOMAIN, update_interval=update_interval, ) def _update_data(self) -> dict: """Fetch data from Canary via sync functions.""" locations_by_id = {} readings_by_device_id = {} for location in self.canary.get_locations(): location_id = location.location_id locations_by_id[location_id] = location for device in location.devices: if device.is_online: readings_by_device_id[ device.device_id ] = self.canary.get_latest_readings(device.device_id) return { "locations": locations_by_id, "readings": readings_by_device_id, } async def _async_update_data(self) -> dict: """Fetch data from Canary.""" try: async with timeout(15): return await self.hass.async_add_executor_job(self._update_data) except (ConnectTimeout, HTTPError) as error: raise UpdateFailed(f"Invalid response from API: {error}") from error
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/canary/coordinator.py
"""Interfaces with Egardia/Woonveilig alarm control panel.""" import logging import requests import homeassistant.components.alarm_control_panel as alarm from homeassistant.components.alarm_control_panel.const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, ) from homeassistant.const import ( STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED, ) from . import ( CONF_REPORT_SERVER_CODES, CONF_REPORT_SERVER_ENABLED, CONF_REPORT_SERVER_PORT, EGARDIA_DEVICE, EGARDIA_SERVER, REPORT_SERVER_CODES_IGNORE, ) _LOGGER = logging.getLogger(__name__) STATES = { "ARM": STATE_ALARM_ARMED_AWAY, "DAY HOME": STATE_ALARM_ARMED_HOME, "DISARM": STATE_ALARM_DISARMED, "ARMHOME": STATE_ALARM_ARMED_HOME, "HOME": STATE_ALARM_ARMED_HOME, "NIGHT HOME": STATE_ALARM_ARMED_NIGHT, "TRIGGERED": STATE_ALARM_TRIGGERED, } def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Egardia Alarm Control Panael platform.""" if discovery_info is None: return device = EgardiaAlarm( discovery_info["name"], hass.data[EGARDIA_DEVICE], discovery_info[CONF_REPORT_SERVER_ENABLED], discovery_info.get(CONF_REPORT_SERVER_CODES), discovery_info[CONF_REPORT_SERVER_PORT], ) add_entities([device], True) class EgardiaAlarm(alarm.AlarmControlPanelEntity): """Representation of a Egardia alarm.""" def __init__( self, name, egardiasystem, rs_enabled=False, rs_codes=None, rs_port=52010 ): """Initialize the Egardia alarm.""" self._name = name self._egardiasystem = egardiasystem self._status = None self._rs_enabled = rs_enabled self._rs_codes = rs_codes self._rs_port = rs_port async def async_added_to_hass(self): """Add Egardiaserver callback if enabled.""" if self._rs_enabled: _LOGGER.debug("Registering callback to Egardiaserver") self.hass.data[EGARDIA_SERVER].register_callback(self.handle_status_event) @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._status @property def supported_features(self) -> int: """Return the list of supported features.""" return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY @property def should_poll(self): """Poll if no report server is enabled.""" if not self._rs_enabled: return True return False def handle_status_event(self, event): """Handle the Egardia system status event.""" statuscode = event.get("status") if statuscode is not None: status = self.lookupstatusfromcode(statuscode) self.parsestatus(status) self.schedule_update_ha_state() def lookupstatusfromcode(self, statuscode): """Look at the rs_codes and returns the status from the code.""" status = next( ( status_group.upper() for status_group, codes in self._rs_codes.items() for code in codes if statuscode == code ), "UNKNOWN", ) return status def parsestatus(self, status): """Parse the status.""" _LOGGER.debug("Parsing status %s", status) # Ignore the statuscode if it is IGNORE if status.lower().strip() != REPORT_SERVER_CODES_IGNORE: _LOGGER.debug("Not ignoring status %s", status) newstatus = STATES.get(status.upper()) _LOGGER.debug("newstatus %s", newstatus) self._status = newstatus else: _LOGGER.error("Ignoring status") def update(self): """Update the alarm status.""" status = self._egardiasystem.getstate() self.parsestatus(status) def alarm_disarm(self, code=None): """Send disarm command.""" try: self._egardiasystem.alarm_disarm() except requests.exceptions.RequestException as err: _LOGGER.error( "Egardia device exception occurred when sending disarm command: %s", err, ) def alarm_arm_home(self, code=None): """Send arm home command.""" try: self._egardiasystem.alarm_arm_home() except requests.exceptions.RequestException as err: _LOGGER.error( "Egardia device exception occurred when " "sending arm home command: %s", err, ) def alarm_arm_away(self, code=None): """Send arm away command.""" try: self._egardiasystem.alarm_arm_away() except requests.exceptions.RequestException as err: _LOGGER.error( "Egardia device exception occurred when " "sending arm away command: %s", err, )
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/egardia/alarm_control_panel.py
"""Support for the Pico TTS speech service.""" import logging import os import shutil import subprocess import tempfile import voluptuous as vol from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider _LOGGER = logging.getLogger(__name__) SUPPORT_LANGUAGES = ["en-US", "en-GB", "de-DE", "es-ES", "fr-FR", "it-IT"] DEFAULT_LANG = "en-US" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES)} ) def get_engine(hass, config, discovery_info=None): """Set up Pico speech component.""" if shutil.which("pico2wave") is None: _LOGGER.error("'pico2wave' was not found") return False return PicoProvider(config[CONF_LANG]) class PicoProvider(Provider): """The Pico TTS API provider.""" def __init__(self, lang): """Initialize Pico TTS provider.""" self._lang = lang self.name = "PicoTTS" @property def default_language(self): """Return the default language.""" return self._lang @property def supported_languages(self): """Return list of supported languages.""" return SUPPORT_LANGUAGES def get_tts_audio(self, message, language, options=None): """Load TTS using pico2wave.""" with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpf: fname = tmpf.name cmd = ["pico2wave", "--wave", fname, "-l", language, message] subprocess.call(cmd) data = None try: with open(fname, "rb") as voice: data = voice.read() except OSError: _LOGGER.error("Error trying to read %s", fname) return (None, None) finally: os.remove(fname) if data: return ("wav", data) return (None, None)
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow, ) from homeassistant.config_entries import SOURCE_DISCOVERY from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with patch( "homeassistant.components.tellduslive.config_flow.Session" ) as Session, patch( "homeassistant.components.tellduslive.config_flow.supports_local_api" ) as tellduslive_supports_local_api: tellduslive_supports_local_api.return_value = supports_local_api Session().authorize.return_value = authorize Session().access_token = "token" Session().access_token_secret = "token_secret" Session().authorize_url = "https://example.com" yield Session, tellduslive_supports_local_api async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" with patch.object(hass.config_entries, "async_entries", return_value=[{}]): result = await flow.async_step_import(None) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_setup" async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" result = await flow.async_step_user({"host": "localhost"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["description_placeholders"] == { "auth_url": "https://example.com", "app_name": APPLICATION_NAME, } result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {"token": "token", "host": "localhost"} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"tellduslive": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch( "homeassistant.components.tellduslive.config_flow.load_json", return_value={"localhost": {}}, ), patch("os.path.isfile"): result = await flow.async_step_import( {CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "localhost" assert result["data"]["host"] == "localhost" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == {} @pytest.mark.parametrize("supports_local_api", [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} result = await flow.async_step_discovery(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(["localhost", "tellstick"]) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "Cloud API" assert result["data"]["host"] == "Cloud API" assert result["data"]["scan_interval"] == 60 assert result["data"]["session"] == { "token": "token", "token_secret": "token_secret", } @pytest.mark.parametrize("authorize", [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth("") assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" assert result["errors"]["base"] == "invalid_auth" async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "auth" async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "authorize_url_timeout" async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "unknown_authorize_url_generation" async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if already configured fires from discovery.""" MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass) flow = init_config_flow(hass) flow.context = {"source": SOURCE_DISCOVERY} with pytest.raises(data_entry_flow.AbortFlow): result = await flow.async_step_discovery(["some-host", ""])
tboyce021/home-assistant
tests/components/tellduslive/test_config_flow.py
homeassistant/components/picotts/tts.py