input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
"""A sensor platform that give you information about the next space launch.""" from datetime import timedelta import logging from typing import Optional from pylaunches import PyLaunches, PyLaunchesException import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from .const import ( ATTR_AGENCY, ATTR_AGENCY_COUNTRY_CODE, ATTR_LAUNCH_TIME, ATTR_STREAM, ATTRIBUTION, DEFAULT_NAME, ) _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(hours=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string} ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Create the launch sensor.""" name = config[CONF_NAME] session = async_get_clientsession(hass) launches = PyLaunches(session) async_add_entities([LaunchLibrarySensor(launches, name)], True) class LaunchLibrarySensor(Entity): """Representation of a launch_library Sensor.""" def __init__(self, launches: PyLaunches, name: str) -> None: """Initialize the sensor.""" self.launches = launches self.next_launch = None self._name = name async def async_update(self) -> None: """Get the latest data.""" try: launches = await self.launches.upcoming_launches() except PyLaunchesException as exception: _LOGGER.error("Error getting data, %s", exception) else: if launches: self.next_launch = launches[0] @property def name(self) -> str: """Return the name of the sensor.""" return self._name @property def state(self) -> Optional[str]: """Return the state of the sensor.""" if self.next_launch: return self.next_launch.name return None @property def icon(self) -> str: """Return the icon of the sensor.""" return "mdi:rocket" @property def device_state_attributes(self) -> Optional[dict]: """Return attributes for the sensor.""" if self.next_launch: return { ATTR_LAUNCH_TIME: self.next_launch.net, ATTR_AGENCY: self.next_launch.launch_service_provider.name, ATTR_AGENCY_COUNTRY_CODE: self.next_launch.pad.location.country_code, ATTR_STREAM: self.next_launch.webcast_live, ATTR_ATTRIBUTION: ATTRIBUTION, } return None
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/launch_library/sensor.py
"""Reproduce an Timer state.""" import asyncio import logging from typing import Any, Dict, Iterable, Optional from homeassistant.const import ATTR_ENTITY_ID from homeassistant.core import Context, State from homeassistant.helpers.typing import HomeAssistantType from . import ( ATTR_DURATION, DOMAIN, SERVICE_CANCEL, SERVICE_PAUSE, SERVICE_START, STATUS_ACTIVE, STATUS_IDLE, STATUS_PAUSED, ) _LOGGER = logging.getLogger(__name__) VALID_STATES = {STATUS_IDLE, STATUS_ACTIVE, STATUS_PAUSED} async def _async_reproduce_state( hass: HomeAssistantType, state: State, *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce a single state.""" cur_state = hass.states.get(state.entity_id) if cur_state is None: _LOGGER.warning("Unable to find entity %s", state.entity_id) return if state.state not in VALID_STATES: _LOGGER.warning( "Invalid state specified for %s: %s", state.entity_id, state.state ) return # Return if we are already at the right state. if cur_state.state == state.state and cur_state.attributes.get( ATTR_DURATION ) == state.attributes.get(ATTR_DURATION): return service_data = {ATTR_ENTITY_ID: state.entity_id} if state.state == STATUS_ACTIVE: service = SERVICE_START if ATTR_DURATION in state.attributes: service_data[ATTR_DURATION] = state.attributes[ATTR_DURATION] elif state.state == STATUS_PAUSED: service = SERVICE_PAUSE elif state.state == STATUS_IDLE: service = SERVICE_CANCEL await hass.services.async_call( DOMAIN, service, service_data, context=context, blocking=True ) async def async_reproduce_states( hass: HomeAssistantType, states: Iterable[State], *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce Timer states.""" await asyncio.gather( *( _async_reproduce_state( hass, state, context=context, reproduce_options=reproduce_options ) for state in states ) )
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/timer/reproduce_state.py
"""Support for Neato sensors.""" from datetime import timedelta import logging from pybotvac.exceptions import NeatoRobotException from homeassistant.components.sensor import DEVICE_CLASS_BATTERY from homeassistant.const import PERCENTAGE from homeassistant.helpers.entity import Entity from .const import NEATO_DOMAIN, NEATO_LOGIN, NEATO_ROBOTS, SCAN_INTERVAL_MINUTES _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES) BATTERY = "Battery" async def async_setup_entry(hass, entry, async_add_entities): """Set up the Neato sensor using config entry.""" dev = [] neato = hass.data.get(NEATO_LOGIN) for robot in hass.data[NEATO_ROBOTS]: dev.append(NeatoSensor(neato, robot)) if not dev: return _LOGGER.debug("Adding robots for sensors %s", dev) async_add_entities(dev, True) class NeatoSensor(Entity): """Neato sensor.""" def __init__(self, neato, robot): """Initialize Neato sensor.""" self.robot = robot self._available = False self._robot_name = f"{self.robot.name} {BATTERY}" self._robot_serial = self.robot.serial self._state = None def update(self): """Update Neato Sensor.""" try: self._state = self.robot.state except NeatoRobotException as ex: if self._available: _LOGGER.error( "Neato sensor connection error for '%s': %s", self.entity_id, ex ) self._state = None self._available = False return self._available = True _LOGGER.debug("self._state=%s", self._state) @property def name(self): """Return the name of this sensor.""" return self._robot_name @property def unique_id(self): """Return unique ID.""" return self._robot_serial @property def device_class(self): """Return the device class.""" return DEVICE_CLASS_BATTERY @property def available(self): """Return availability.""" return self._available @property def state(self): """Return the state.""" return self._state["details"]["charge"] @property def unit_of_measurement(self): """Return unit of measurement.""" return PERCENTAGE @property def device_info(self): """Device info for neato robot.""" return {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}}
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/neato/sensor.py
"""Support for Synology DSM binary sensors.""" from typing import Dict from homeassistant.components.binary_sensor import BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_DISKS from homeassistant.helpers.typing import HomeAssistantType from . import SynologyDSMDeviceEntity, SynologyDSMDispatcherEntity from .const import ( DOMAIN, SECURITY_BINARY_SENSORS, STORAGE_DISK_BINARY_SENSORS, SYNO_API, UPGRADE_BINARY_SENSORS, ) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up the Synology NAS binary sensor.""" api = hass.data[DOMAIN][entry.unique_id][SYNO_API] entities = [ SynoDSMSecurityBinarySensor( api, sensor_type, SECURITY_BINARY_SENSORS[sensor_type] ) for sensor_type in SECURITY_BINARY_SENSORS ] entities += [ SynoDSMUpgradeBinarySensor( api, sensor_type, UPGRADE_BINARY_SENSORS[sensor_type] ) for sensor_type in UPGRADE_BINARY_SENSORS ] # Handle all disks if api.storage.disks_ids: for disk in entry.data.get(CONF_DISKS, api.storage.disks_ids): entities += [ SynoDSMStorageBinarySensor( api, sensor_type, STORAGE_DISK_BINARY_SENSORS[sensor_type], disk ) for sensor_type in STORAGE_DISK_BINARY_SENSORS ] async_add_entities(entities) class SynoDSMSecurityBinarySensor(SynologyDSMDispatcherEntity, BinarySensorEntity): """Representation a Synology Security binary sensor.""" @property def is_on(self) -> bool: """Return the state.""" return getattr(self._api.security, self.entity_type) != "safe" @property def available(self) -> bool: """Return True if entity is available.""" return bool(self._api.security) @property def device_state_attributes(self) -> Dict[str, str]: """Return security checks details.""" return self._api.security.status_by_check class SynoDSMStorageBinarySensor(SynologyDSMDeviceEntity, BinarySensorEntity): """Representation a Synology Storage binary sensor.""" @property def is_on(self) -> bool: """Return the state.""" return getattr(self._api.storage, self.entity_type)(self._device_id) class SynoDSMUpgradeBinarySensor(SynologyDSMDispatcherEntity, BinarySensorEntity): """Representation a Synology Upgrade binary sensor.""" @property def is_on(self) -> bool: """Return the state.""" return getattr(self._api.upgrade, self.entity_type) @property def available(self) -> bool: """Return True if entity is available.""" return bool(self._api.upgrade)
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/synology_dsm/binary_sensor.py
"""Support for Powerview scenes from a Powerview hub.""" from typing import Any from aiopvapi.resources.scene import Scene as PvScene import voluptuous as vol from homeassistant.components.scene import Scene from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import CONF_HOST, CONF_PLATFORM import homeassistant.helpers.config_validation as cv from .const import ( COORDINATOR, DEVICE_INFO, DOMAIN, HUB_ADDRESS, PV_API, PV_ROOM_DATA, PV_SCENE_DATA, ROOM_NAME_UNICODE, STATE_ATTRIBUTE_ROOM_NAME, ) from .entity import HDEntity PLATFORM_SCHEMA = vol.Schema( {vol.Required(CONF_PLATFORM): DOMAIN, vol.Required(HUB_ADDRESS): cv.string} ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Import platform from yaml.""" hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: config[HUB_ADDRESS]}, ) ) async def async_setup_entry(hass, entry, async_add_entities): """Set up powerview scene entries.""" pv_data = hass.data[DOMAIN][entry.entry_id] room_data = pv_data[PV_ROOM_DATA] scene_data = pv_data[PV_SCENE_DATA] pv_request = pv_data[PV_API] coordinator = pv_data[COORDINATOR] device_info = pv_data[DEVICE_INFO] pvscenes = ( PowerViewScene( PvScene(raw_scene, pv_request), room_data, coordinator, device_info ) for scene_id, raw_scene in scene_data.items() ) async_add_entities(pvscenes) class PowerViewScene(HDEntity, Scene): """Representation of a Powerview scene.""" def __init__(self, scene, room_data, coordinator, device_info): """Initialize the scene.""" super().__init__(coordinator, device_info, scene.id) self._scene = scene self._room_name = room_data.get(scene.room_id, {}).get(ROOM_NAME_UNICODE, "") @property def name(self): """Return the name of the scene.""" return self._scene.name @property def device_state_attributes(self): """Return the state attributes.""" return {STATE_ATTRIBUTE_ROOM_NAME: self._room_name} @property def icon(self): """Icon to use in the frontend.""" return "mdi:blinds" async def async_activate(self, **kwargs: Any) -> None: """Activate scene. Try to get entities into requested state.""" await self._scene.activate()
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/hunterdouglas_powerview/scene.py
"""The Global Disaster Alert and Coordination System (GDACS) integration.""" import asyncio from datetime import timedelta import logging from aio_georss_gdacs import GdacsFeedManager import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, CONF_UNIT_SYSTEM_IMPERIAL, LENGTH_MILES, ) from homeassistant.core import callback from homeassistant.helpers import aiohttp_client, config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.event import async_track_time_interval from homeassistant.util.unit_system import METRIC_SYSTEM from .const import ( CONF_CATEGORIES, DEFAULT_RADIUS, DEFAULT_SCAN_INTERVAL, DOMAIN, FEED, PLATFORMS, VALID_CATEGORIES, ) _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude, vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude, vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): vol.Coerce(float), vol.Optional( CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL ): cv.time_period, vol.Optional(CONF_CATEGORIES, default=[]): vol.All( cv.ensure_list, [vol.In(VALID_CATEGORIES)] ), } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the GDACS component.""" if DOMAIN not in config: return True conf = config[DOMAIN] latitude = conf.get(CONF_LATITUDE, hass.config.latitude) longitude = conf.get(CONF_LONGITUDE, hass.config.longitude) scan_interval = conf[CONF_SCAN_INTERVAL] categories = conf[CONF_CATEGORIES] hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={ CONF_LATITUDE: latitude, CONF_LONGITUDE: longitude, CONF_RADIUS: conf[CONF_RADIUS], CONF_SCAN_INTERVAL: scan_interval, CONF_CATEGORIES: categories, }, ) ) return True async def async_setup_entry(hass, config_entry): """Set up the GDACS component as config entry.""" hass.data.setdefault(DOMAIN, {}) feeds = hass.data[DOMAIN].setdefault(FEED, {}) radius = config_entry.data[CONF_RADIUS] if hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL: radius = METRIC_SYSTEM.length(radius, LENGTH_MILES) # Create feed entity manager for all platforms. manager = GdacsFeedEntityManager(hass, config_entry, radius) feeds[config_entry.entry_id] = manager _LOGGER.debug("Feed entity manager added for %s", config_entry.entry_id) await manager.async_init() return True async def async_unload_entry(hass, config_entry): """Unload an GDACS component config entry.""" manager = hass.data[DOMAIN][FEED].pop(config_entry.entry_id) await manager.async_stop() await asyncio.wait( [ hass.config_entries.async_forward_entry_unload(config_entry, domain) for domain in PLATFORMS ] ) return True class GdacsFeedEntityManager: """Feed Entity Manager for GDACS feed.""" def __init__(self, hass, config_entry, radius_in_km): """Initialize the Feed Entity Manager.""" self._hass = hass self._config_entry = config_entry coordinates = ( config_entry.data[CONF_LATITUDE], config_entry.data[CONF_LONGITUDE], ) categories = config_entry.data[CONF_CATEGORIES] websession = aiohttp_client.async_get_clientsession(hass) self._feed_manager = GdacsFeedManager( websession, self._generate_entity, self._update_entity, self._remove_entity, coordinates, filter_radius=radius_in_km, filter_categories=categories, status_async_callback=self._status_update, ) self._config_entry_id = config_entry.entry_id self._scan_interval = timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL]) self._track_time_remove_callback = None self._status_info = None self.listeners = [] async def async_init(self): """Schedule initial and regular updates based on configured time interval.""" for domain in PLATFORMS: self._hass.async_create_task( self._hass.config_entries.async_forward_entry_setup( self._config_entry, domain ) ) async def update(event_time): """Update.""" await self.async_update() # Trigger updates at regular intervals. self._track_time_remove_callback = async_track_time_interval( self._hass, update, self._scan_interval ) _LOGGER.debug("Feed entity manager initialized") async def async_update(self): """Refresh data.""" await self._feed_manager.update() _LOGGER.debug("Feed entity manager updated") async def async_stop(self): """Stop this feed entity manager from refreshing.""" for unsub_dispatcher in self.listeners: unsub_dispatcher() self.listeners = [] if self._track_time_remove_callback: self._track_time_remove_callback() _LOGGER.debug("Feed entity manager stopped") @callback def async_event_new_entity(self): """Return manager specific event to signal new entity.""" return f"gdacs_new_geolocation_{self._config_entry_id}" def get_entry(self, external_id): """Get feed entry by external id.""" return self._feed_manager.feed_entries.get(external_id) def status_info(self): """Return latest status update info received.""" return self._status_info async def _generate_entity(self, external_id): """Generate new entity.""" async_dispatcher_send( self._hass, self.async_event_new_entity(), self, self._config_entry.unique_id, external_id, ) async def _update_entity(self, external_id): """Update entity.""" async_dispatcher_send(self._hass, f"gdacs_update_{external_id}") async def _remove_entity(self, external_id): """Remove entity.""" async_dispatcher_send(self._hass, f"gdacs_delete_{external_id}") async def _status_update(self, status_info): """Propagate status update.""" _LOGGER.debug("Status update received: %s", status_info) self._status_info = status_info async_dispatcher_send(self._hass, f"gdacs_status_{self._config_entry_id}")
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/gdacs/__init__.py
"""Support for Nexia / Trane XL Thermostats.""" from homeassistant.components.binary_sensor import BinarySensorEntity from .const import DOMAIN, NEXIA_DEVICE, UPDATE_COORDINATOR from .entity import NexiaThermostatEntity async def async_setup_entry(hass, config_entry, async_add_entities): """Set up sensors for a Nexia device.""" nexia_data = hass.data[DOMAIN][config_entry.entry_id] nexia_home = nexia_data[NEXIA_DEVICE] coordinator = nexia_data[UPDATE_COORDINATOR] entities = [] for thermostat_id in nexia_home.get_thermostat_ids(): thermostat = nexia_home.get_thermostat_by_id(thermostat_id) entities.append( NexiaBinarySensor( coordinator, thermostat, "is_blower_active", "Blower Active" ) ) if thermostat.has_emergency_heat(): entities.append( NexiaBinarySensor( coordinator, thermostat, "is_emergency_heat_active", "Emergency Heat Active", ) ) async_add_entities(entities, True) class NexiaBinarySensor(NexiaThermostatEntity, BinarySensorEntity): """Provices Nexia BinarySensor support.""" def __init__(self, coordinator, thermostat, sensor_call, sensor_name): """Initialize the nexia sensor.""" super().__init__( coordinator, thermostat, name=f"{thermostat.get_name()} {sensor_name}", unique_id=f"{thermostat.thermostat_id}_{sensor_call}", ) self._call = sensor_call self._state = None @property def is_on(self): """Return the status of the sensor.""" return getattr(self._thermostat, self._call)()
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/nexia/binary_sensor.py
"""Support for Twilio.""" from twilio.rest import Client from twilio.twiml import TwiML import voluptuous as vol from homeassistant.const import CONF_WEBHOOK_ID from homeassistant.helpers import config_entry_flow import homeassistant.helpers.config_validation as cv from .const import DOMAIN CONF_ACCOUNT_SID = "account_sid" CONF_AUTH_TOKEN = "auth_token" DATA_TWILIO = DOMAIN RECEIVED_DATA = f"{DOMAIN}_data_received" CONFIG_SCHEMA = vol.Schema( { vol.Optional(DOMAIN): vol.Schema( { vol.Required(CONF_ACCOUNT_SID): cv.string, vol.Required(CONF_AUTH_TOKEN): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Twilio component.""" if DOMAIN not in config: return True conf = config[DOMAIN] hass.data[DATA_TWILIO] = Client( conf.get(CONF_ACCOUNT_SID), conf.get(CONF_AUTH_TOKEN) ) return True async def handle_webhook(hass, webhook_id, request): """Handle incoming webhook from Twilio for inbound messages and calls.""" data = dict(await request.post()) data["webhook_id"] = webhook_id hass.bus.async_fire(RECEIVED_DATA, dict(data)) return TwiML().to_xml() async def async_setup_entry(hass, entry): """Configure based on config entry.""" hass.components.webhook.async_register( DOMAIN, "Twilio", entry.data[CONF_WEBHOOK_ID], handle_webhook ) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID]) return True async_remove_entry = config_entry_flow.webhook_async_remove_entry
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/twilio/__init__.py
"""Support for Lupusec Security System switches.""" from datetime import timedelta import lupupy.constants as CONST from homeassistant.components.switch import SwitchEntity from . import DOMAIN as LUPUSEC_DOMAIN, LupusecDevice SCAN_INTERVAL = timedelta(seconds=2) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up Lupusec switch devices.""" if discovery_info is None: return data = hass.data[LUPUSEC_DOMAIN] devices = [] for device in data.lupusec.get_devices(generic_type=CONST.TYPE_SWITCH): devices.append(LupusecSwitch(data, device)) add_entities(devices) class LupusecSwitch(LupusecDevice, SwitchEntity): """Representation of a Lupusec switch.""" def turn_on(self, **kwargs): """Turn on the device.""" self._device.switch_on() def turn_off(self, **kwargs): """Turn off the device.""" self._device.switch_off() @property def is_on(self): """Return true if device is on.""" return self._device.is_on
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/lupusec/switch.py
"""Reproduce an input boolean state.""" import asyncio import logging from typing import Any, Dict, Iterable, Optional from homeassistant.const import ( ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, ) from homeassistant.core import Context, State from homeassistant.helpers.typing import HomeAssistantType from . import DOMAIN _LOGGER = logging.getLogger(__name__) async def _async_reproduce_states( hass: HomeAssistantType, state: State, *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce input boolean states.""" cur_state = hass.states.get(state.entity_id) if cur_state is None: _LOGGER.warning("Unable to find entity %s", state.entity_id) return if state.state not in (STATE_ON, STATE_OFF): _LOGGER.warning( "Invalid state specified for %s: %s", state.entity_id, state.state ) return if cur_state.state == state.state: return service = SERVICE_TURN_ON if state.state == STATE_ON else SERVICE_TURN_OFF await hass.services.async_call( DOMAIN, service, {ATTR_ENTITY_ID: state.entity_id}, context=context, blocking=True, ) async def async_reproduce_states( hass: HomeAssistantType, states: Iterable[State], *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce component states.""" await asyncio.gather( *( _async_reproduce_states( hass, state, context=context, reproduce_options=reproduce_options ) for state in states ) )
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/input_boolean/reproduce_state.py
"""The ATAG Integration.""" from datetime import timedelta import logging import async_timeout from pyatag import AtagException, AtagOne from homeassistant.components.climate import DOMAIN as CLIMATE from homeassistant.components.sensor import DOMAIN as SENSOR from homeassistant.components.water_heater import DOMAIN as WATER_HEATER from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, asyncio from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, DataUpdateCoordinator, UpdateFailed, ) _LOGGER = logging.getLogger(__name__) DOMAIN = "atag" PLATFORMS = [CLIMATE, WATER_HEATER, SENSOR] async def async_setup(hass: HomeAssistant, config): """Set up the Atag component.""" return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """Set up Atag integration from a config entry.""" session = async_get_clientsession(hass) coordinator = AtagDataUpdateCoordinator(hass, session, entry) await coordinator.async_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = coordinator if entry.unique_id is None: hass.config_entries.async_update_entry(entry, unique_id=coordinator.atag.id) for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, platform) ) return True class AtagDataUpdateCoordinator(DataUpdateCoordinator): """Define an object to hold Atag data.""" def __init__(self, hass, session, entry): """Initialize.""" self.atag = AtagOne(session=session, **entry.data) super().__init__( hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=30) ) async def _async_update_data(self): """Update data via library.""" with async_timeout.timeout(20): try: if not await self.atag.update(): raise UpdateFailed("No data received") except AtagException as error: raise UpdateFailed(error) from error return self.atag.report async def async_unload_entry(hass, entry): """Unload Atag config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok class AtagEntity(CoordinatorEntity): """Defines a base Atag entity.""" def __init__(self, coordinator: AtagDataUpdateCoordinator, atag_id: str) -> None: """Initialize the Atag entity.""" super().__init__(coordinator) self._id = atag_id self._name = DOMAIN.title() @property def device_info(self) -> dict: """Return info for device registry.""" device = self.coordinator.atag.id version = self.coordinator.atag.apiversion return { "identifiers": {(DOMAIN, device)}, "name": "Atag Thermostat", "model": "Atag One", "sw_version": version, "manufacturer": "Atag", } @property def name(self) -> str: """Return the name of the entity.""" return self._name @property def unique_id(self): """Return a unique ID to use for this entity.""" return f"{self.coordinator.atag.id}-{self._id}"
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/atag/__init__.py
"""The Dune HD component.""" import asyncio from pdunehd import DuneHDPlayer from homeassistant.const import CONF_HOST from .const import DOMAIN PLATFORMS = ["media_player"] async def async_setup(hass, config): """Set up the Dune HD component.""" return True async def async_setup_entry(hass, config_entry): """Set up a config entry.""" host = config_entry.data[CONF_HOST] player = DuneHDPlayer(host) hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][config_entry.entry_id] = player for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, component) ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(config_entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) return unload_ok
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/dunehd/__init__.py
"""Provides device trigger for lights.""" from typing import List import voluptuous as vol from homeassistant.components.automation import AutomationActionType from homeassistant.components.device_automation import toggle_entity from homeassistant.const import CONF_DOMAIN from homeassistant.core import CALLBACK_TYPE, HomeAssistant from homeassistant.helpers.typing import ConfigType from . import DOMAIN TRIGGER_SCHEMA = toggle_entity.TRIGGER_SCHEMA.extend( {vol.Required(CONF_DOMAIN): DOMAIN} ) async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Listen for state changes based on configuration.""" return await toggle_entity.async_attach_trigger( hass, config, action, automation_info ) async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]: """List device triggers.""" return await toggle_entity.async_get_triggers(hass, device_id, DOMAIN) async def async_get_trigger_capabilities(hass: HomeAssistant, config: dict) -> dict: """List trigger capabilities.""" return await toggle_entity.async_get_trigger_capabilities(hass, config)
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/light/device_trigger.py
"""Support for switch controlled using a telnet connection.""" from datetime import timedelta import logging import telnetlib import voluptuous as vol from homeassistant.components.switch import ( ENTITY_ID_FORMAT, PLATFORM_SCHEMA, SwitchEntity, ) from homeassistant.const import ( CONF_COMMAND_OFF, CONF_COMMAND_ON, CONF_COMMAND_STATE, CONF_NAME, CONF_PORT, CONF_RESOURCE, CONF_SWITCHES, CONF_TIMEOUT, CONF_VALUE_TEMPLATE, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_PORT = 23 DEFAULT_TIMEOUT = 0.2 SWITCH_SCHEMA = vol.Schema( { vol.Required(CONF_COMMAND_OFF): cv.string, vol.Required(CONF_COMMAND_ON): cv.string, vol.Required(CONF_RESOURCE): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_COMMAND_STATE): cv.string, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(float), } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)} ) SCAN_INTERVAL = timedelta(seconds=10) def setup_platform(hass, config, add_entities, discovery_info=None): """Find and return switches controlled by telnet commands.""" devices = config.get(CONF_SWITCHES, {}) switches = [] for object_id, device_config in devices.items(): value_template = device_config.get(CONF_VALUE_TEMPLATE) if value_template is not None: value_template.hass = hass switches.append( TelnetSwitch( hass, object_id, device_config.get(CONF_RESOURCE), device_config.get(CONF_PORT), device_config.get(CONF_NAME, object_id), device_config.get(CONF_COMMAND_ON), device_config.get(CONF_COMMAND_OFF), device_config.get(CONF_COMMAND_STATE), value_template, device_config.get(CONF_TIMEOUT), ) ) if not switches: _LOGGER.error("No switches added") return add_entities(switches) class TelnetSwitch(SwitchEntity): """Representation of a switch that can be toggled using telnet commands.""" def __init__( self, hass, object_id, resource, port, friendly_name, command_on, command_off, command_state, value_template, timeout, ): """Initialize the switch.""" self._hass = hass self.entity_id = ENTITY_ID_FORMAT.format(object_id) self._resource = resource self._port = port self._name = friendly_name self._state = False self._command_on = command_on self._command_off = command_off self._command_state = command_state self._value_template = value_template self._timeout = timeout def _telnet_command(self, command): try: telnet = telnetlib.Telnet(self._resource, self._port) telnet.write(command.encode("ASCII") + b"\r") response = telnet.read_until(b"\r", timeout=self._timeout) _LOGGER.debug("telnet response: %s", response.decode("ASCII").strip()) return response.decode("ASCII").strip() except OSError as error: _LOGGER.error( 'Command "%s" failed with exception: %s', command, repr(error) ) return None @property def name(self): """Return the name of the switch.""" return self._name @property def should_poll(self): """Only poll if we have state command.""" return self._command_state is not None @property def is_on(self): """Return true if device is on.""" return self._state @property def assumed_state(self): """Return true if no state command is defined, false otherwise.""" return self._command_state is None def update(self): """Update device state.""" response = self._telnet_command(self._command_state) if response: rendered = self._value_template.render_with_possible_json_value(response) self._state = rendered == "True" else: _LOGGER.warning("Empty response for command: %s", self._command_state) def turn_on(self, **kwargs): """Turn the device on.""" self._telnet_command(self._command_on) if self.assumed_state: self._state = True def turn_off(self, **kwargs): """Turn the device off.""" self._telnet_command(self._command_off) if self.assumed_state: self._state = False
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/telnet/switch.py
"""Support for Google Nest SDM Cameras.""" import datetime import logging from typing import Optional from google_nest_sdm.camera_traits import ( CameraEventImageTrait, CameraImageTrait, CameraLiveStreamTrait, ) from google_nest_sdm.device import Device from google_nest_sdm.exceptions import GoogleNestException from haffmpeg.tools import IMAGE_JPEG from homeassistant.components.camera import SUPPORT_STREAM, Camera from homeassistant.components.ffmpeg import async_get_image from homeassistant.config_entries import ConfigEntry from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.event import async_track_point_in_utc_time from homeassistant.helpers.typing import HomeAssistantType from homeassistant.util.dt import utcnow from .const import DATA_SUBSCRIBER, DOMAIN from .device_info import DeviceInfo _LOGGER = logging.getLogger(__name__) # Used to schedule an alarm to refresh the stream before expiration STREAM_EXPIRATION_BUFFER = datetime.timedelta(seconds=30) async def async_setup_sdm_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up the cameras.""" subscriber = hass.data[DOMAIN][DATA_SUBSCRIBER] try: device_manager = await subscriber.async_get_device_manager() except GoogleNestException as err: raise PlatformNotReady from err # Fetch initial data so we have data when entities subscribe. entities = [] for device in device_manager.devices.values(): if ( CameraImageTrait.NAME in device.traits or CameraLiveStreamTrait.NAME in device.traits ): entities.append(NestCamera(device)) async_add_entities(entities) class NestCamera(Camera): """Devices that support cameras.""" def __init__(self, device: Device): """Initialize the camera.""" super().__init__() self._device = device self._device_info = DeviceInfo(device) self._stream = None self._stream_refresh_unsub = None # Cache of most recent event image self._event_id = None self._event_image_bytes = None self._event_image_cleanup_unsub = None @property def should_poll(self) -> bool: """Disable polling since entities have state pushed via pubsub.""" return False @property def unique_id(self) -> Optional[str]: """Return a unique ID.""" # The API "name" field is a unique device identifier. return f"{self._device.name}-camera" @property def name(self): """Return the name of the camera.""" return self._device_info.device_name @property def device_info(self): """Return device specific attributes.""" return self._device_info.device_info @property def brand(self): """Return the camera brand.""" return self._device_info.device_brand @property def model(self): """Return the camera model.""" return self._device_info.device_model @property def supported_features(self): """Flag supported features.""" supported_features = 0 if CameraLiveStreamTrait.NAME in self._device.traits: supported_features |= SUPPORT_STREAM return supported_features async def stream_source(self): """Return the source of the stream.""" if CameraLiveStreamTrait.NAME not in self._device.traits: return None trait = self._device.traits[CameraLiveStreamTrait.NAME] if not self._stream: _LOGGER.debug("Fetching stream url") self._stream = await trait.generate_rtsp_stream() self._schedule_stream_refresh() if self._stream.expires_at < utcnow(): _LOGGER.warning("Stream already expired") return self._stream.rtsp_stream_url def _schedule_stream_refresh(self): """Schedules an alarm to refresh the stream url before expiration.""" _LOGGER.debug("New stream url expires at %s", self._stream.expires_at) refresh_time = self._stream.expires_at - STREAM_EXPIRATION_BUFFER # Schedule an alarm to extend the stream if self._stream_refresh_unsub is not None: self._stream_refresh_unsub() self._stream_refresh_unsub = async_track_point_in_utc_time( self.hass, self._handle_stream_refresh, refresh_time, ) async def _handle_stream_refresh(self, now): """Alarm that fires to check if the stream should be refreshed.""" if not self._stream: return _LOGGER.debug("Extending stream url") try: self._stream = await self._stream.extend_rtsp_stream() except GoogleNestException as err: _LOGGER.debug("Failed to extend stream: %s", err) # Next attempt to catch a url will get a new one self._stream = None return # Update the stream worker with the latest valid url if self.stream: self.stream.update_source(self._stream.rtsp_stream_url) self._schedule_stream_refresh() async def async_will_remove_from_hass(self): """Invalidates the RTSP token when unloaded.""" if self._stream: _LOGGER.debug("Invalidating stream") await self._stream.stop_rtsp_stream() if self._stream_refresh_unsub: self._stream_refresh_unsub() self._event_id = None self._event_image_bytes = None if self._event_image_cleanup_unsub is not None: self._event_image_cleanup_unsub() async def async_added_to_hass(self): """Run when entity is added to register update signal handler.""" self.async_on_remove( self._device.add_update_listener(self.async_write_ha_state) ) async def async_camera_image(self): """Return bytes of camera image.""" # Returns the snapshot of the last event for ~30 seconds after the event active_event_image = await self._async_active_event_image() if active_event_image: return active_event_image # Fetch still image from the live stream stream_url = await self.stream_source() if not stream_url: return None return await async_get_image(self.hass, stream_url, output_format=IMAGE_JPEG) async def _async_active_event_image(self): """Return image from any active events happening.""" if CameraEventImageTrait.NAME not in self._device.traits: return None trait = self._device.active_event_trait if not trait: return None # Reuse image bytes if they have already been fetched event = trait.last_event if self._event_id is not None and self._event_id == event.event_id: return self._event_image_bytes _LOGGER.debug("Generating event image URL for event_id %s", event.event_id) image_bytes = await self._async_fetch_active_event_image(trait) if image_bytes is None: return None self._event_id = event.event_id self._event_image_bytes = image_bytes self._schedule_event_image_cleanup(event.expires_at) return image_bytes async def _async_fetch_active_event_image(self, trait): """Return image bytes for an active event.""" try: event_image = await trait.generate_active_event_image() except GoogleNestException as err: _LOGGER.debug("Unable to generate event image URL: %s", err) return None if not event_image: return None try: return await event_image.contents() except GoogleNestException as err: _LOGGER.debug("Unable to fetch event image: %s", err) return None def _schedule_event_image_cleanup(self, point_in_time): """Schedules an alarm to remove the image bytes from memory, honoring expiration.""" if self._event_image_cleanup_unsub is not None: self._event_image_cleanup_unsub() self._event_image_cleanup_unsub = async_track_point_in_utc_time( self.hass, self._handle_event_image_cleanup, point_in_time, ) def _handle_event_image_cleanup(self, now): """Clear images cached from events and scheduled callback.""" self._event_id = None self._event_image_bytes = None self._event_image_cleanup_unsub = None
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/nest/camera_sdm.py
"""Support for testing internet speed via Speedtest.net.""" from datetime import timedelta import logging import speedtest import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( CONF_MONITORED_CONDITIONS, CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_STARTED, ) from homeassistant.core import CoreState, callback from homeassistant.exceptions import ConfigEntryNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( CONF_MANUAL, CONF_SERVER_ID, DEFAULT_SCAN_INTERVAL, DEFAULT_SERVER, DOMAIN, SENSOR_TYPES, SPEED_TEST_SERVICE, ) _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_SERVER_ID): cv.positive_int, vol.Optional( CONF_SCAN_INTERVAL, default=timedelta(minutes=DEFAULT_SCAN_INTERVAL) ): cv.positive_time_period, vol.Optional(CONF_MANUAL, default=False): cv.boolean, vol.Optional( CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES) ): vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES))]), } ) }, extra=vol.ALLOW_EXTRA, ) def server_id_valid(server_id): """Check if server_id is valid.""" try: api = speedtest.Speedtest() api.get_servers([int(server_id)]) except (speedtest.ConfigRetrievalError, speedtest.NoMatchedServers): return False return True async def async_setup(hass, config): """Import integration from config.""" if DOMAIN in config: hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=config[DOMAIN] ) ) return True async def async_setup_entry(hass, config_entry): """Set up the Speedtest.net component.""" coordinator = SpeedTestDataCoordinator(hass, config_entry) await coordinator.async_setup() async def _enable_scheduled_speedtests(*_): """Activate the data update coordinator.""" coordinator.update_interval = timedelta( minutes=config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL) ) await coordinator.async_refresh() if not config_entry.options[CONF_MANUAL]: if hass.state == CoreState.running: await _enable_scheduled_speedtests() if not coordinator.last_update_success: raise ConfigEntryNotReady else: # Running a speed test during startup can prevent # integrations from being able to setup because it # can saturate the network interface. hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STARTED, _enable_scheduled_speedtests ) hass.data[DOMAIN] = coordinator hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, "sensor") ) return True async def async_unload_entry(hass, config_entry): """Unload SpeedTest Entry from config_entry.""" hass.services.async_remove(DOMAIN, SPEED_TEST_SERVICE) hass.data[DOMAIN].async_unload() await hass.config_entries.async_forward_entry_unload(config_entry, "sensor") hass.data.pop(DOMAIN) return True class SpeedTestDataCoordinator(DataUpdateCoordinator): """Get the latest data from speedtest.net.""" def __init__(self, hass, config_entry): """Initialize the data object.""" self.hass = hass self.config_entry = config_entry self.api = None self.servers = {} self._unsub_update_listener = None super().__init__( self.hass, _LOGGER, name=DOMAIN, update_method=self.async_update, ) def update_servers(self): """Update list of test servers.""" try: server_list = self.api.get_servers() except speedtest.ConfigRetrievalError: _LOGGER.debug("Error retrieving server list") return self.servers[DEFAULT_SERVER] = {} for server in sorted( server_list.values(), key=lambda server: server[0]["country"] + server[0]["sponsor"], ): self.servers[ f"{server[0]['country']} - {server[0]['sponsor']} - {server[0]['name']}" ] = server[0] def update_data(self): """Get the latest data from speedtest.net.""" self.update_servers() self.api.closest.clear() if self.config_entry.options.get(CONF_SERVER_ID): server_id = self.config_entry.options.get(CONF_SERVER_ID) self.api.get_servers(servers=[server_id]) self.api.get_best_server() _LOGGER.debug( "Executing speedtest.net speed test with server_id: %s", self.api.best["id"] ) self.api.download() self.api.upload() return self.api.results.dict() async def async_update(self, *_): """Update Speedtest data.""" try: return await self.hass.async_add_executor_job(self.update_data) except (speedtest.ConfigRetrievalError, speedtest.NoMatchedServers) as err: raise UpdateFailed from err async def async_set_options(self): """Set options for entry.""" if not self.config_entry.options: data = {**self.config_entry.data} options = { CONF_SCAN_INTERVAL: data.pop(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL), CONF_MANUAL: data.pop(CONF_MANUAL, False), CONF_SERVER_ID: str(data.pop(CONF_SERVER_ID, "")), } self.hass.config_entries.async_update_entry( self.config_entry, data=data, options=options ) async def async_setup(self): """Set up SpeedTest.""" try: self.api = await self.hass.async_add_executor_job(speedtest.Speedtest) except speedtest.ConfigRetrievalError as err: raise ConfigEntryNotReady from err async def request_update(call): """Request update.""" await self.async_request_refresh() await self.async_set_options() await self.hass.async_add_executor_job(self.update_servers) self.hass.services.async_register(DOMAIN, SPEED_TEST_SERVICE, request_update) self._unsub_update_listener = self.config_entry.add_update_listener( options_updated_listener ) @callback def async_unload(self): """Unload the coordinator.""" if not self._unsub_update_listener: return self._unsub_update_listener() self._unsub_update_listener = None async def options_updated_listener(hass, entry): """Handle options update.""" if entry.options[CONF_MANUAL]: hass.data[DOMAIN].update_interval = None return hass.data[DOMAIN].update_interval = timedelta( minutes=entry.options[CONF_SCAN_INTERVAL] ) await hass.data[DOMAIN].async_request_refresh()
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/speedtestdotnet/__init__.py
""" Module with location helpers. detect_location_info and elevation are mocked by default during tests. """ import asyncio import collections import math from typing import Any, Dict, Optional, Tuple import aiohttp ELEVATION_URL = "https://api.open-elevation.com/api/v1/lookup" IP_API = "http://ip-api.com/json" IPAPI = "https://ipapi.co/json/" # Constants from https://github.com/maurycyp/vincenty # Earth ellipsoid according to WGS 84 # Axis a of the ellipsoid (Radius of the earth in meters) AXIS_A = 6378137 # Flattening f = (a-b) / a FLATTENING = 1 / 298.257223563 # Axis b of the ellipsoid in meters. AXIS_B = 6356752.314245 MILES_PER_KILOMETER = 0.621371 MAX_ITERATIONS = 200 CONVERGENCE_THRESHOLD = 1e-12 LocationInfo = collections.namedtuple( "LocationInfo", [ "ip", "country_code", "country_name", "region_code", "region_name", "city", "zip_code", "time_zone", "latitude", "longitude", "use_metric", ], ) async def async_detect_location_info( session: aiohttp.ClientSession, ) -> Optional[LocationInfo]: """Detect location information.""" data = await _get_ipapi(session) if data is None: data = await _get_ip_api(session) if data is None: return None data["use_metric"] = data["country_code"] not in ("US", "MM", "LR") return LocationInfo(**data) def distance( lat1: Optional[float], lon1: Optional[float], lat2: float, lon2: float ) -> Optional[float]: """Calculate the distance in meters between two points. Async friendly. """ if lat1 is None or lon1 is None: return None result = vincenty((lat1, lon1), (lat2, lon2)) if result is None: return None return result * 1000 # Author: https://github.com/maurycyp # Source: https://github.com/maurycyp/vincenty # License: https://github.com/maurycyp/vincenty/blob/master/LICENSE def vincenty( point1: Tuple[float, float], point2: Tuple[float, float], miles: bool = False ) -> Optional[float]: """ Vincenty formula (inverse method) to calculate the distance. Result in kilometers or miles between two points on the surface of a spheroid. Async friendly. """ # short-circuit coincident points if point1[0] == point2[0] and point1[1] == point2[1]: return 0.0 # pylint: disable=invalid-name U1 = math.atan((1 - FLATTENING) * math.tan(math.radians(point1[0]))) U2 = math.atan((1 - FLATTENING) * math.tan(math.radians(point2[0]))) L = math.radians(point2[1] - point1[1]) Lambda = L sinU1 = math.sin(U1) cosU1 = math.cos(U1) sinU2 = math.sin(U2) cosU2 = math.cos(U2) for _ in range(MAX_ITERATIONS): sinLambda = math.sin(Lambda) cosLambda = math.cos(Lambda) sinSigma = math.sqrt( (cosU2 * sinLambda) ** 2 + (cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2 ) if sinSigma == 0.0: return 0.0 # coincident points cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda sigma = math.atan2(sinSigma, cosSigma) sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma cosSqAlpha = 1 - sinAlpha ** 2 try: cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha except ZeroDivisionError: cos2SigmaM = 0 C = FLATTENING / 16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha)) LambdaPrev = Lambda Lambda = L + (1 - C) * FLATTENING * sinAlpha * ( sigma + C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM ** 2)) ) if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD: break # successful convergence else: return None # failure to converge uSq = cosSqAlpha * (AXIS_A ** 2 - AXIS_B ** 2) / (AXIS_B ** 2) A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq))) B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq))) deltaSigma = ( B * sinSigma * ( cos2SigmaM + B / 4 * ( cosSigma * (-1 + 2 * cos2SigmaM ** 2) - B / 6 * cos2SigmaM * (-3 + 4 * sinSigma ** 2) * (-3 + 4 * cos2SigmaM ** 2) ) ) ) s = AXIS_B * A * (sigma - deltaSigma) s /= 1000 # Conversion of meters to kilometers if miles: s *= MILES_PER_KILOMETER # kilometers to miles return round(s, 6) async def _get_ipapi(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]: """Query ipapi.co for location data.""" try: resp = await session.get(IPAPI, timeout=5) except (aiohttp.ClientError, asyncio.TimeoutError): return None try: raw_info = await resp.json() except (aiohttp.ClientError, ValueError): return None # ipapi allows 30k free requests/month. Some users exhaust those. if raw_info.get("latitude") == "Sign up to access": return None return { "ip": raw_info.get("ip"), "country_code": raw_info.get("country"), "country_name": raw_info.get("country_name"), "region_code": raw_info.get("region_code"), "region_name": raw_info.get("region"), "city": raw_info.get("city"), "zip_code": raw_info.get("postal"), "time_zone": raw_info.get("timezone"), "latitude": raw_info.get("latitude"), "longitude": raw_info.get("longitude"), } async def _get_ip_api(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]: """Query ip-api.com for location data.""" try: resp = await session.get(IP_API, timeout=5) except (aiohttp.ClientError, asyncio.TimeoutError): return None try: raw_info = await resp.json() except (aiohttp.ClientError, ValueError): return None return { "ip": raw_info.get("query"), "country_code": raw_info.get("countryCode"), "country_name": raw_info.get("country"), "region_code": raw_info.get("region"), "region_name": raw_info.get("regionName"), "city": raw_info.get("city"), "zip_code": raw_info.get("zip"), "time_zone": raw_info.get("timezone"), "latitude": raw_info.get("lat"), "longitude": raw_info.get("lon"), }
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/util/location.py
import logging import pprint import os import sys import time import numbers from jmbase import get_log, jmprint, bintohex, hextobin from .configure import jm_single, validate_address, is_burn_destination from .schedule import human_readable_schedule_entry, tweak_tumble_schedule,\ schedule_to_text from .wallet import BaseWallet, estimate_tx_fee, compute_tx_locktime, \ FidelityBondMixin from jmbitcoin import make_shuffled_tx, amount_to_str, mk_burn_script,\ PartiallySignedTransaction, CMutableTxOut,\ human_readable_transaction, Hash160 from jmbase.support import EXIT_SUCCESS log = get_log() """ Utility functions for tumbler-style takers; Currently re-used by CLI script tumbler.py and joinmarket-qt """ def direct_send(wallet_service, amount, mixdepth, destination, answeryes=False, accept_callback=None, info_callback=None, error_callback=None, return_transaction=False, with_final_psbt=False, optin_rbf=False, custom_change_addr=None): """Send coins directly from one mixdepth to one destination address; does not need IRC. Sweep as for normal sendpayment (set amount=0). If answeryes is True, callback/command line query is not performed. If optin_rbf is True, the nSequence values are changed as appropriate. If accept_callback is None, command line input for acceptance is assumed, else this callback is called: accept_callback: ==== args: deserialized tx, destination address, amount in satoshis, fee in satoshis, custom change address returns: True if accepted, False if not ==== info_callback and error_callback takes one parameter, the information message (when tx is pushed or error occured), and returns nothing. This function returns: 1. False if there is any failure. 2. The txid if transaction is pushed, and return_transaction is False, and with_final_psbt is False. 3. The full CMutableTransaction if return_transaction is True and with_final_psbt is False. 4. The PSBT object if with_final_psbt is True, and in this case the transaction is *NOT* broadcast. """ #Sanity checks assert validate_address(destination)[0] or is_burn_destination(destination) assert custom_change_addr is None or validate_address(custom_change_addr)[0] assert amount > 0 or custom_change_addr is None assert isinstance(mixdepth, numbers.Integral) assert mixdepth >= 0 assert isinstance(amount, numbers.Integral) assert amount >=0 assert isinstance(wallet_service.wallet, BaseWallet) if is_burn_destination(destination): #Additional checks if not isinstance(wallet_service.wallet, FidelityBondMixin): log.error("Only fidelity bond wallets can burn coins") return if answeryes: log.error("Burning coins not allowed without asking for confirmation") return if mixdepth != FidelityBondMixin.FIDELITY_BOND_MIXDEPTH: log.error("Burning coins only allowed from mixdepth " + str( FidelityBondMixin.FIDELITY_BOND_MIXDEPTH)) return if amount != 0: log.error("Only sweeping allowed when burning coins, to keep the tx " + "small. Tip: use the coin control feature to freeze utxos") return txtype = wallet_service.get_txtype() if amount == 0: #doing a sweep utxos = wallet_service.get_utxos_by_mixdepth()[mixdepth] if utxos == {}: log.error( "There are no available utxos in mixdepth: " + str(mixdepth) + ", quitting.") return total_inputs_val = sum([va['value'] for u, va in utxos.items()]) if is_burn_destination(destination): if len(utxos) > 1: log.error("Only one input allowed when burning coins, to keep " + "the tx small. Tip: use the coin control feature to freeze utxos") return address_type = FidelityBondMixin.BIP32_BURN_ID index = wallet_service.wallet.get_next_unused_index(mixdepth, address_type) path = wallet_service.wallet.get_path(mixdepth, address_type, index) privkey, engine = wallet_service.wallet._get_key_from_path(path) pubkey = engine.privkey_to_pubkey(privkey) pubkeyhash = Hash160(pubkey) #size of burn output is slightly different from regular outputs burn_script = mk_burn_script(pubkeyhash) fee_est = estimate_tx_fee(len(utxos), 0, txtype=txtype, extra_bytes=len(burn_script)/2) outs = [{"script": burn_script, "value": total_inputs_val - fee_est}] destination = "BURNER OUTPUT embedding pubkey at " \ + wallet_service.wallet.get_path_repr(path) \ + "\n\nWARNING: This transaction if broadcasted will PERMANENTLY DESTROY your bitcoins\n" else: #regular sweep (non-burn) fee_est = estimate_tx_fee(len(utxos), 1, txtype=txtype) outs = [{"address": destination, "value": total_inputs_val - fee_est}] else: #not doing a sweep; we will have change #8 inputs to be conservative initial_fee_est = estimate_tx_fee(8,2, txtype=txtype) utxos = wallet_service.select_utxos(mixdepth, amount + initial_fee_est) if len(utxos) < 8: fee_est = estimate_tx_fee(len(utxos), 2, txtype=txtype) else: fee_est = initial_fee_est total_inputs_val = sum([va['value'] for u, va in utxos.items()]) changeval = total_inputs_val - fee_est - amount outs = [{"value": amount, "address": destination}] change_addr = wallet_service.get_internal_addr(mixdepth) if custom_change_addr is None \ else custom_change_addr outs.append({"value": changeval, "address": change_addr}) #compute transaction locktime, has special case for spending timelocked coins tx_locktime = compute_tx_locktime() if mixdepth == FidelityBondMixin.FIDELITY_BOND_MIXDEPTH and \ isinstance(wallet_service.wallet, FidelityBondMixin): for outpoint, utxo in utxos.items(): path = wallet_service.script_to_path( wallet_service.addr_to_script(utxo["address"])) if not FidelityBondMixin.is_timelocked_path(path): continue path_locktime = path[-1] tx_locktime = max(tx_locktime, path_locktime+1) #compute_tx_locktime() gives a locktime in terms of block height #timelocked addresses use unix time instead #OP_CHECKLOCKTIMEVERIFY can only compare like with like, so we #must use unix time as the transaction locktime #Now ready to construct transaction log.info("Using a fee of: " + amount_to_str(fee_est) + ".") if amount != 0: log.info("Using a change value of: " + amount_to_str(changeval) + ".") tx = make_shuffled_tx(list(utxos.keys()), outs, 2, tx_locktime) if optin_rbf: for inp in tx.vin: inp.nSequence = 0xffffffff - 2 inscripts = {} spent_outs = [] for i, txinp in enumerate(tx.vin): u = (txinp.prevout.hash[::-1], txinp.prevout.n) inscripts[i] = (utxos[u]["script"], utxos[u]["value"]) spent_outs.append(CMutableTxOut(utxos[u]["value"], utxos[u]["script"])) if with_final_psbt: # here we have the PSBTWalletMixin do the signing stage # for us: new_psbt = wallet_service.create_psbt_from_tx(tx, spent_outs=spent_outs) serialized_psbt, err = wallet_service.sign_psbt(new_psbt.serialize()) if err: log.error("Failed to sign PSBT, quitting. Error message: " + err) return False new_psbt_signed = PartiallySignedTransaction.deserialize(serialized_psbt) print("Completed PSBT created: ") print(wallet_service.human_readable_psbt(new_psbt_signed)) return new_psbt_signed else: success, msg = wallet_service.sign_tx(tx, inscripts) if not success: log.error("Failed to sign transaction, quitting. Error msg: " + msg) return log.info("Got signed transaction:\n") log.info(human_readable_transaction(tx)) actual_amount = amount if amount != 0 else total_inputs_val - fee_est sending_info = "Sends: " + amount_to_str(actual_amount) + \ " to destination: " + destination if custom_change_addr: sending_info += ", custom change to: " + custom_change_addr log.info(sending_info) if not answeryes: if not accept_callback: if input('Would you like to push to the network? (y/n):')[0] != 'y': log.info("You chose not to broadcast the transaction, quitting.") return False else: accepted = accept_callback(human_readable_transaction(tx), destination, actual_amount, fee_est, custom_change_addr) if not accepted: return False if jm_single().bc_interface.pushtx(tx.serialize()): txid = bintohex(tx.GetTxid()[::-1]) successmsg = "Transaction sent: " + txid cb = log.info if not info_callback else info_callback cb(successmsg) txinfo = txid if not return_transaction else tx return txinfo else: errormsg = "Transaction broadcast failed!" cb = log.error if not error_callback else error_callback cb(errormsg) return False def get_tumble_log(logsdir): tumble_log = logging.getLogger('tumbler') tumble_log.setLevel(logging.DEBUG) logFormatter = logging.Formatter( ('%(asctime)s %(message)s')) fileHandler = logging.FileHandler(os.path.join(logsdir, 'TUMBLE.log')) fileHandler.setFormatter(logFormatter) tumble_log.addHandler(fileHandler) return tumble_log def restart_wait(txid): """ Returns true only if the transaction txid is seen in the wallet, and confirmed (it must be an in-wallet transaction since it always spends coins from the wallet). """ res = jm_single().bc_interface.get_transaction(hextobin(txid)) if not res: return False if res["confirmations"] == 0: return False if res["confirmations"] < 0: log.warn("Tx: " + txid + " has a conflict, abandoning.") sys.exit(EXIT_SUCCESS) else: log.debug("Tx: " + str(txid) + " has " + str( res["confirmations"]) + " confirmations.") return True def restart_waiter(txid): """Given a txid, wait for confirmation by polling the blockchain interface instance. Note that this is currently blocking, so only used by the CLI version; the Qt/GUI uses the underlying restart_wait() fn. """ ctr = 0 log.info("Waiting for confirmation of last transaction: " + str(txid)) while True: time.sleep(10) ctr += 1 if not (ctr % 12): log.debug("Still waiting for confirmation of last transaction ...") if restart_wait(txid): break log.info("The previous transaction is now in a block; continuing.") def unconf_update(taker, schedulefile, tumble_log, addtolog=False): """Provide a Taker object, a schedulefile path for the current schedule, a logging instance for TUMBLE.log, and a parameter for whether to update TUMBLE.log. Makes the necessary state updates explained below, including to the wallet. Note that this is re-used for confirmation with addtolog=False, to avoid a repeated entry in the log. """ #on taker side, cache index update is only required after tx #push, to avoid potential of address reuse in case of a crash, #because addresses are not public until broadcast (whereas for makers, #they are public *during* negotiation). So updating the cache here #is sufficient taker.wallet_service.save_wallet() #If honest-only was set, and we are going to continue (e.g. Tumbler), #we switch off the honest-only filter. We also wipe the honest maker #list, because the intention is to isolate the source of liquidity #to exactly those that participated, in 1 transaction (i.e. it's a 1 #transaction feature). This code is here because it *must* be called #before any continuation, even if confirm_callback happens before #unconfirm_callback taker.set_honest_only(False) taker.honest_makers = [] #We persist the fact that the transaction is complete to the #schedule file. Note that if a tweak to the schedule occurred, #it only affects future (non-complete) transactions, so the final #full record should always be accurate; but TUMBLE.log should be #used for checking what actually happened. completion_flag = 1 if not addtolog else taker.txid taker.schedule[taker.schedule_index][-1] = completion_flag with open(schedulefile, "wb") as f: f.write(schedule_to_text(taker.schedule)) if addtolog: tumble_log.info("Completed successfully this entry:") #the log output depends on if it's to INTERNAL hrdestn = None if taker.schedule[taker.schedule_index][3] in ["INTERNAL", "addrask"]: hrdestn = taker.my_cj_addr #Whether sweep or not, the amt is not in satoshis; use taker data hramt = taker.cjamount tumble_log.info(human_readable_schedule_entry( taker.schedule[taker.schedule_index], hramt, hrdestn)) tumble_log.info("Txid was: " + taker.txid) def tumbler_taker_finished_update(taker, schedulefile, tumble_log, options, res, fromtx=False, waittime=0.0, txdetails=None): """on_finished_callback processing for tumbler. Note that this is *not* the full callback, but provides common processing across command line and other GUI versions. """ if fromtx == "unconfirmed": #unconfirmed event means transaction has been propagated, #we update state to prevent accidentally re-creating it in #any crash/restart condition unconf_update(taker, schedulefile, tumble_log, True) return if fromtx: if res: #this has no effect except in the rare case that confirmation #is immediate; also it does not repeat the log entry. unconf_update(taker, schedulefile, tumble_log, False) #note that Qt does not yet support 'addrask', so this is only #for command line script TODO if taker.schedule[taker.schedule_index+1][3] == 'addrask': jm_single().debug_silence[0] = True jmprint('\n'.join(['=' * 60] * 3)) jmprint('Tumbler requires more addresses to stop amount correlation') jmprint('Obtain a new destination address from your bitcoin recipient') jmprint(' for example click the button that gives a new deposit address') jmprint('\n'.join(['=' * 60] * 1)) while True: destaddr = input('insert new address: ') addr_valid, errormsg = validate_address(destaddr) if addr_valid: break jmprint( 'Address ' + destaddr + ' invalid. ' + errormsg + ' try again', "warning") jm_single().debug_silence[0] = False taker.schedule[taker.schedule_index+1][3] = destaddr taker.tdestaddrs.append(destaddr) waiting_message = "Waiting for: " + str(waittime) + " minutes." tumble_log.info(waiting_message) log.info(waiting_message) else: # a transaction failed, either because insufficient makers # (acording to minimum_makers) responded in Phase 1, or not all # makers responded in Phase 2, or the tx was a mempool conflict. # If the tx was a mempool conflict, we should restart with random # maker choice as usual. If someone didn't respond, we'll try to # repeat without the troublemakers. log.info("Schedule entry: " + str( taker.schedule[taker.schedule_index]) + \ " failed after timeout, trying again") taker.add_ignored_makers(taker.nonrespondants) #Is the failure in Phase 2? if not taker.latest_tx is None: if len(taker.nonrespondants) == 0: # transaction was created validly but conflicted in the # mempool; just try again without honest settings; # i.e. fallback to same as Phase 1 failure. log.info("Invalid transaction; possible mempool conflict.") else: #Now we have to set the specific group we want to use, and hopefully #they will respond again as they showed honesty last time. #Note that we must wipe the list first; other honest makers needn't #have the right settings (e.g. max cjamount), so can't be carried #over from earlier transactions. taker.honest_makers = [] taker.add_honest_makers(list(set( taker.maker_utxo_data.keys()).symmetric_difference( set(taker.nonrespondants)))) #If insufficient makers were honest, we can only tweak the schedule. #If enough were, we prefer to restart with them only: log.info("Inside a Phase 2 failure; number of honest " "respondants was: " + str(len(taker.honest_makers))) log.info("They were: " + str(taker.honest_makers)) if len(taker.honest_makers) >= jm_single().config.getint( "POLICY", "minimum_makers"): tumble_log.info("Transaction attempt failed, attempting to " "restart with subset.") tumble_log.info("The paramaters of the failed attempt: ") tumble_log.info(str(taker.schedule[taker.schedule_index])) #we must reset the number of counterparties, as well as fix who they #are; this is because the number is used to e.g. calculate fees. #cleanest way is to reset the number in the schedule before restart. taker.schedule[taker.schedule_index][2] = len(taker.honest_makers) retry_str = "Retrying with: " + str(taker.schedule[ taker.schedule_index][2]) + " counterparties." tumble_log.info(retry_str) log.info(retry_str) taker.set_honest_only(True) taker.schedule_index -= 1 return #There were not enough honest counterparties. #Tumbler is aggressive in trying to complete; we tweak the schedule #from this point in the mixdepth, then try again. tumble_log.info("Transaction attempt failed, tweaking schedule" " and trying again.") tumble_log.info("The paramaters of the failed attempt: ") tumble_log.info(str(taker.schedule[taker.schedule_index])) taker.schedule_index -= 1 taker.schedule = tweak_tumble_schedule(options, taker.schedule, taker.schedule_index, taker.tdestaddrs) tumble_log.info("We tweaked the schedule, the new schedule is:") tumble_log.info(pprint.pformat(taker.schedule)) else: if not res: failure_msg = "Did not complete successfully, shutting down" tumble_log.info(failure_msg) log.info(failure_msg) else: log.info("All transactions completed correctly") tumble_log.info("Completed successfully the last entry:") #Whether sweep or not, the amt is not in satoshis; use taker data hramt = taker.cjamount tumble_log.info(human_readable_schedule_entry( taker.schedule[taker.schedule_index], hramt)) #copy of above, TODO refactor out taker.schedule[taker.schedule_index][5] = 1 with open(schedulefile, "wb") as f: f.write(schedule_to_text(taker.schedule)) def tumbler_filter_orders_callback(orders_fees, cjamount, taker): """Since the tumbler does not use interactive fee checking, we use the -x values from the command line instead. """ orders, total_cj_fee = orders_fees abs_cj_fee = 1.0 * total_cj_fee / taker.n_counterparties rel_cj_fee = abs_cj_fee / cjamount log.info('rel/abs average fee = ' + str(rel_cj_fee) + ' / ' + str( abs_cj_fee)) if rel_cj_fee > taker.max_cj_fee[0] and abs_cj_fee > taker.max_cj_fee[1]: log.info("Rejected fees as too high according to options, will " "retry.") return "retry" return True
'''Wallet functionality tests.''' import os import json from binascii import hexlify, unhexlify import pytest import jmbitcoin as btc from commontest import ensure_bip65_activated from jmbase import get_log, hextobin from jmclient import load_test_config, jm_single, BaseWallet, \ SegwitLegacyWallet,BIP32Wallet, BIP49Wallet, LegacyWallet,\ VolatileStorage, get_network, cryptoengine, WalletError,\ SegwitWallet, WalletService, SegwitLegacyWalletFidelityBonds,\ create_wallet, open_test_wallet_maybe, \ FidelityBondMixin, FidelityBondWatchonlyWallet, wallet_gettimelockaddress from test_blockchaininterface import sync_test_wallet testdir = os.path.dirname(os.path.realpath(__file__)) test_create_wallet_filename = "testwallet_for_create_wallet_test" log = get_log() def signed_tx_is_segwit(tx): return tx.has_witness() def assert_segwit(tx): assert signed_tx_is_segwit(tx) def assert_not_segwit(tx): assert not signed_tx_is_segwit(tx) def get_populated_wallet(amount=10**8, num=3): storage = VolatileStorage() SegwitLegacyWallet.initialize(storage, get_network()) wallet = SegwitLegacyWallet(storage) # fund three wallet addresses at mixdepth 0 for i in range(num): fund_wallet_addr(wallet, wallet.get_internal_addr(0), amount / 10**8) return wallet def fund_wallet_addr(wallet, addr, value_btc=1): # special case, grab_coins returns hex from rpc: txin_id = hextobin(jm_single().bc_interface.grab_coins(addr, value_btc)) txinfo = jm_single().bc_interface.get_transaction(txin_id) txin = btc.CMutableTransaction.deserialize(btc.x(txinfo["hex"])) utxo_in = wallet.add_new_utxos(txin, 1) assert len(utxo_in) == 1 return list(utxo_in.keys())[0] def get_bip39_vectors(): fh = open(os.path.join(testdir, 'bip39vectors.json')) data = json.load(fh)['english'] fh.close() return data @pytest.mark.parametrize('entropy,mnemonic,key,xpriv', get_bip39_vectors()) def test_bip39_seeds(monkeypatch, setup_wallet, entropy, mnemonic, key, xpriv): jm_single().config.set('BLOCKCHAIN', 'network', 'mainnet') created_entropy = SegwitLegacyWallet.entropy_from_mnemonic(mnemonic) assert entropy == hexlify(created_entropy).decode('ascii') storage = VolatileStorage() SegwitLegacyWallet.initialize( storage, get_network(), entropy=created_entropy, entropy_extension='TREZOR', max_mixdepth=4) wallet = SegwitLegacyWallet(storage) assert (mnemonic, b'TREZOR') == wallet.get_mnemonic_words() assert key == hexlify(wallet._create_master_key()).decode('ascii') # need to monkeypatch this, else we'll default to the BIP-49 path monkeypatch.setattr(SegwitLegacyWallet, '_get_bip32_base_path', BIP32Wallet._get_bip32_base_path) assert xpriv == wallet.get_bip32_priv_export() def test_bip49_seed(monkeypatch, setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about' master_xpriv = 'tprv8ZgxMBicQKsPe5YMU9gHen4Ez3ApihUfykaqUorj9t6FDqy3nP6eoXiAo2ssvpAjoLroQxHqr3R5nE3a5dU3DHTjTgJDd7zrbniJr6nrCzd' account0_xpriv = 'tprv8gRrNu65W2Msef2BdBSUgFdRTGzC8EwVXnV7UGS3faeXtuMVtGfEdidVeGbThs4ELEoayCAzZQ4uUji9DUiAs7erdVskqju7hrBcDvDsdbY' addr0_script_hash = '336caa13e08b96080a32b5d818d59b4ab3b36742' entropy = SegwitLegacyWallet.entropy_from_mnemonic(mnemonic) storage = VolatileStorage() SegwitLegacyWallet.initialize( storage, get_network(), entropy=entropy, max_mixdepth=0) wallet = SegwitLegacyWallet(storage) assert (mnemonic, None) == wallet.get_mnemonic_words() assert account0_xpriv == wallet.get_bip32_priv_export(0) assert addr0_script_hash == hexlify(wallet.get_external_script(0)[2:-1]).decode('ascii') # FIXME: is this desired behaviour? BIP49 wallet will not return xpriv for # the root key but only for key after base path monkeypatch.setattr(SegwitLegacyWallet, '_get_bip32_base_path', BIP32Wallet._get_bip32_base_path) assert master_xpriv == wallet.get_bip32_priv_export() def test_bip32_test_vector_1(monkeypatch, setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'mainnet') entropy = unhexlify('000102030405060708090a0b0c0d0e0f') storage = VolatileStorage() LegacyWallet.initialize( storage, get_network(), entropy=entropy, max_mixdepth=0) # test vector 1 is using hardened derivation for the account/mixdepth level monkeypatch.setattr(LegacyWallet, '_get_mixdepth_from_path', BIP49Wallet._get_mixdepth_from_path) monkeypatch.setattr(LegacyWallet, '_get_bip32_mixdepth_path_level', BIP49Wallet._get_bip32_mixdepth_path_level) monkeypatch.setattr(LegacyWallet, '_get_bip32_base_path', BIP32Wallet._get_bip32_base_path) monkeypatch.setattr(LegacyWallet, '_create_master_key', BIP32Wallet._create_master_key) wallet = LegacyWallet(storage) assert wallet.get_bip32_priv_export() == 'xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi' assert wallet.get_bip32_pub_export() == 'xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8' assert wallet.get_bip32_priv_export(0) == 'xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7' assert wallet.get_bip32_pub_export(0) == 'xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw' assert wallet.get_bip32_priv_export(0, 1) == 'xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs' assert wallet.get_bip32_pub_export(0, 1) == 'xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ' # there are more test vectors but those don't match joinmarket's wallet # structure, hence they make litte sense to test here def test_bip32_test_vector_2(monkeypatch, setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'mainnet') entropy = unhexlify('fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542') storage = VolatileStorage() LegacyWallet.initialize( storage, get_network(), entropy=entropy, max_mixdepth=0) monkeypatch.setattr(LegacyWallet, '_get_bip32_base_path', BIP32Wallet._get_bip32_base_path) monkeypatch.setattr(LegacyWallet, '_create_master_key', BIP32Wallet._create_master_key) wallet = LegacyWallet(storage) assert wallet.get_bip32_priv_export() == 'xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U' assert wallet.get_bip32_pub_export() == 'xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB' assert wallet.get_bip32_priv_export(0) == 'xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt' assert wallet.get_bip32_pub_export(0) == 'xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH' # there are more test vectors but those don't match joinmarket's wallet # structure, hence they make litte sense to test here def test_bip32_test_vector_3(monkeypatch, setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'mainnet') entropy = unhexlify('4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be') storage = VolatileStorage() LegacyWallet.initialize( storage, get_network(), entropy=entropy, max_mixdepth=0) # test vector 3 is using hardened derivation for the account/mixdepth level monkeypatch.setattr(LegacyWallet, '_get_mixdepth_from_path', BIP49Wallet._get_mixdepth_from_path) monkeypatch.setattr(LegacyWallet, '_get_bip32_mixdepth_path_level', BIP49Wallet._get_bip32_mixdepth_path_level) monkeypatch.setattr(LegacyWallet, '_get_bip32_base_path', BIP32Wallet._get_bip32_base_path) monkeypatch.setattr(LegacyWallet, '_create_master_key', BIP32Wallet._create_master_key) wallet = LegacyWallet(storage) assert wallet.get_bip32_priv_export() == 'xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6' assert wallet.get_bip32_pub_export() == 'xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13' assert wallet.get_bip32_priv_export(0) == 'xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L' assert wallet.get_bip32_pub_export(0) == 'xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y' @pytest.mark.parametrize('mixdepth,internal,index,address,wif', [ [0, BaseWallet.ADDRESS_TYPE_EXTERNAL, 0, 'mpCX9EbdXpcrKMtjEe1fqFhvzctkfzMYTX', 'cVqtSSoVxFyPqTRGfeESi31uCYfgTF4tGWRtGeVs84fzybiX5TPk'], [0, BaseWallet.ADDRESS_TYPE_EXTERNAL, 5, 'mtj85a3pFppRhrxNcFig1k7ECshrZjJ9XC', 'cMsFXc4TRw9PTcCTv7x9mr88rDeGXBTLEV67mKaw2cxCkjkhL32G'], [0, BaseWallet.ADDRESS_TYPE_INTERNAL, 3, 'n1EaQuqvTRm719hsSJ7yRsj49JfoG1C86q', 'cUgSTqnAtvYoQRXCYy4wCFfaks2Zrz1d55m6mVhFyVhQbkDi7JGJ'], [2, BaseWallet.ADDRESS_TYPE_INTERNAL, 2, 'mfxkBk7uDhmF5PJGS9d1NonGiAxPwJqQP4', 'cPcZXSiXPuS5eiT4oDrDKi1mFumw5D1RcWzK2gkGdEHjEz99eyXn'] ]) def test_bip32_addresses_p2pkh(monkeypatch, setup_wallet, mixdepth, internal, index, address, wif): """ Test with a random but fixed entropy """ jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') entropy = unhexlify('2e0339ba89b4a1272cdf78b27ee62669ee01992a59e836e2807051be128ca817') storage = VolatileStorage() LegacyWallet.initialize( storage, get_network(), entropy=entropy, max_mixdepth=3) monkeypatch.setattr(LegacyWallet, '_get_bip32_base_path', BIP32Wallet._get_bip32_base_path) monkeypatch.setattr(LegacyWallet, '_create_master_key', BIP32Wallet._create_master_key) wallet = LegacyWallet(storage) # wallet needs to know about all intermediate keys for i in range(index + 1): wallet.get_new_script(mixdepth, internal) assert wif == wallet.get_wif(mixdepth, internal, index) assert address == wallet.get_addr(mixdepth, internal, index) @pytest.mark.parametrize('mixdepth,internal,index,address,wif', [ [0, 0, 0, '2MzY5yyonUY7zpHspg7jB7WQs1uJxKafQe4', 'cRAGLvPmhpzJNgdMT4W2gVwEW3fusfaDqdQWM2vnWLgXKzCWKtcM'], [0, 0, 5, '2MsKvqPGStp3yXT8UivuAaGwfPzT7xYwSWk', 'cSo3h7nRuV4fwhVPXeTDJx6cBCkjAzS9VM8APXViyjoSaMq85ZKn'], [0, 1, 3, '2N7k6wiQqkuMaApwGhk3HKrifprUSDydqUv', 'cTwq3UsZa8STVmwZR94dDphgqgdLFeuaRFD1Ea44qjbjFfKEb1n5'], [2, 1, 2, '2MtE6gzHgmEXeWzKsmCJFEqkrpNuBDvoRnz', 'cPV8FZuCvrRpk4RhmhpjnSucHhaQZUan4Vbyo1NVQtuAxurW9grb'] ]) def test_bip32_addresses_p2sh_p2wpkh(setup_wallet, mixdepth, internal, index, address, wif): """ Test with a random but fixed entropy """ jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') entropy = unhexlify('2e0339ba89b4a1272cdf78b27ee62669ee01992a59e836e2807051be128ca817') storage = VolatileStorage() SegwitLegacyWallet.initialize( storage, get_network(), entropy=entropy, max_mixdepth=3) wallet = SegwitLegacyWallet(storage) # wallet needs to know about all intermediate keys for i in range(index + 1): wallet.get_new_script(mixdepth, internal) assert wif == wallet.get_wif(mixdepth, internal, index) assert address == wallet.get_addr(mixdepth, internal, index) @pytest.mark.parametrize('index,timenumber,address,wif', [ [0, 0, 'bcrt1qndcqwedwa4lu77ryqpvp738d6p034a2fv8mufw3pw5smfcn39sgqpesn76', 'cST4g5R3mKp44K4J8PRVyys4XJu6EFavZyssq67PJKCnbhjdEdBY'], [0, 50, 'bcrt1q73zhrfcu0ttkk4er9esrmvnpl6wpzhny5aly97jj9nw52agf8ncqjv8rda', 'cST4g5R3mKp44K4J8PRVyys4XJu6EFavZyssq67PJKCnbhjdEdBY'], [5, 0, 'bcrt1qz5208jdm6399ja309ra28d0a34qlt0859u77uxc94v5mgk7auhtssau4pw', 'cRnUaBYTmyZURPe72YCrtvgxpBMvLKPZaCoXvKuWRPMryeJeAZx2'], [9, 1, 'bcrt1qa7pd6qnadpmlm29vtvqnykalc34tr33eclaz7eeqal59n4gwr28qwnka2r', 'cQCxEPCWMwXVB16zCikDBTXMUccx6ioHQipPhYEp1euihkJUafyD'] ]) def test_bip32_timelocked_addresses(setup_wallet, index, timenumber, address, wif): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') entropy = unhexlify('2e0339ba89b4a1272cdf78b27ee62669ee01992a59e836e2807051be128ca817') storage = VolatileStorage() SegwitLegacyWalletFidelityBonds.initialize( storage, get_network(), entropy=entropy, max_mixdepth=1) wallet = SegwitLegacyWalletFidelityBonds(storage) mixdepth = FidelityBondMixin.FIDELITY_BOND_MIXDEPTH address_type = FidelityBondMixin.BIP32_TIMELOCK_ID #wallet needs to know about the script beforehand wallet.get_script_and_update_map(mixdepth, address_type, index, timenumber) assert address == wallet.get_addr(mixdepth, address_type, index, timenumber) assert wif == wallet.get_wif_path(wallet.get_path(mixdepth, address_type, index, timenumber)) @pytest.mark.parametrize('timenumber,locktime_string', [ [0, "2020-01"], [20, "2021-09"], [100, "2028-05"], [150, "2032-07"], [350, "2049-03"] ]) def test_gettimelockaddress_method(setup_wallet, timenumber, locktime_string): storage = VolatileStorage() SegwitLegacyWalletFidelityBonds.initialize(storage, get_network()) wallet = SegwitLegacyWalletFidelityBonds(storage) m = FidelityBondMixin.FIDELITY_BOND_MIXDEPTH address_type = FidelityBondMixin.BIP32_TIMELOCK_ID index = wallet.get_next_unused_index(m, address_type) script = wallet.get_script_and_update_map(m, address_type, index, timenumber) addr = wallet.script_to_addr(script) addr_from_method = wallet_gettimelockaddress(wallet, locktime_string) assert addr == addr_from_method @pytest.mark.parametrize('index,wif', [ [0, 'cMg9eH3fW2JDSyggvXucjmECRwiheCMDo2Qik8y1keeYaxynzrYa'], [9, 'cURA1Qgxhd7QnhhwxCnCHD4pZddVrJdu2BkTdzNaTp9owRSkUvPy'], [50, 'cRTaHZ1eezb8s6xsT2V7EAevYToQMi7cxQD9vgFZzaJZDfhMhf3c'] ]) def test_bip32_burn_keys(setup_wallet, index, wif): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') entropy = unhexlify('2e0339ba89b4a1272cdf78b27ee62669ee01992a59e836e2807051be128ca817') storage = VolatileStorage() SegwitLegacyWalletFidelityBonds.initialize( storage, get_network(), entropy=entropy, max_mixdepth=1) wallet = SegwitLegacyWalletFidelityBonds(storage) mixdepth = FidelityBondMixin.FIDELITY_BOND_MIXDEPTH address_type = FidelityBondMixin.BIP32_BURN_ID #advance index_cache enough wallet.set_next_index(mixdepth, address_type, index, force=True) assert wif == wallet.get_wif_path(wallet.get_path(mixdepth, address_type, index)) def test_import_key(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') storage = VolatileStorage() SegwitLegacyWallet.initialize(storage, get_network()) wallet = SegwitLegacyWallet(storage) wallet.import_private_key( 0, 'cRAGLvPmhpzJNgdMT4W2gVwEW3fusfaDqdQWM2vnWLgXKzCWKtcM') wallet.import_private_key( 1, 'cVqtSSoVxFyPqTRGfeESi31uCYfgTF4tGWRtGeVs84fzybiX5TPk') with pytest.raises(WalletError): wallet.import_private_key( 1, 'cRAGLvPmhpzJNgdMT4W2gVwEW3fusfaDqdQWM2vnWLgXKzCWKtcM') # test persist imported keys wallet.save() data = storage.file_data del wallet del storage storage = VolatileStorage(data=data) wallet = SegwitLegacyWallet(storage) imported_paths_md0 = list(wallet.yield_imported_paths(0)) imported_paths_md1 = list(wallet.yield_imported_paths(1)) assert len(imported_paths_md0) == 1 assert len(imported_paths_md1) == 1 # verify imported addresses assert wallet.get_address_from_path(imported_paths_md0[0]) == '2MzY5yyonUY7zpHspg7jB7WQs1uJxKafQe4' assert wallet.get_address_from_path(imported_paths_md1[0]) == '2MwbXnJrPP4rnwpgRhvNPP44J6tMokDexZB' # test remove key wallet.remove_imported_key(path=imported_paths_md0[0]) assert not list(wallet.yield_imported_paths(0)) assert wallet.get_details(imported_paths_md1[0]) == (1, 'imported', 0) @pytest.mark.parametrize('wif, type_check', [ ['cRAGLvPmhpzJNgdMT4W2gVwEW3fusfaDqdQWM2vnWLgXKzCWKtcM', assert_segwit] ]) def test_signing_imported(setup_wallet, wif, type_check): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') storage = VolatileStorage() SegwitLegacyWallet.initialize(storage, get_network()) wallet = SegwitLegacyWallet(storage) MIXDEPTH = 0 path = wallet.import_private_key(MIXDEPTH, wif) utxo = fund_wallet_addr(wallet, wallet.get_address_from_path(path)) # The dummy output is constructed as an unspendable p2sh: tx = btc.mktx([utxo], [{"address": str(btc.CCoinAddress.from_scriptPubKey( btc.CScript(b"\x00").to_p2sh_scriptPubKey())), "value": 10**8 - 9000}]) script = wallet.get_script_from_path(path) success, msg = wallet.sign_tx(tx, {0: (script, 10**8)}) assert success, msg type_check(tx) txout = jm_single().bc_interface.pushtx(tx.serialize()) assert txout @pytest.mark.parametrize('wallet_cls,type_check', [ [LegacyWallet, assert_not_segwit], [SegwitLegacyWallet, assert_segwit], [SegwitWallet, assert_segwit], ]) def test_signing_simple(setup_wallet, wallet_cls, type_check): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') storage = VolatileStorage() wallet_cls.initialize(storage, get_network(), entropy=b"\xaa"*16) wallet = wallet_cls(storage) utxo = fund_wallet_addr(wallet, wallet.get_internal_addr(0)) # The dummy output is constructed as an unspendable p2sh: tx = btc.mktx([utxo], [{"address": str(btc.CCoinAddress.from_scriptPubKey( btc.CScript(b"\x00").to_p2sh_scriptPubKey())), "value": 10**8 - 9000}]) script = wallet.get_script(0, BaseWallet.ADDRESS_TYPE_INTERNAL, 0) success, msg = wallet.sign_tx(tx, {0: (script, 10**8)}) assert success, msg type_check(tx) txout = jm_single().bc_interface.pushtx(tx.serialize()) assert txout def test_timelocked_output_signing(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') ensure_bip65_activated() storage = VolatileStorage() SegwitLegacyWalletFidelityBonds.initialize(storage, get_network()) wallet = SegwitLegacyWalletFidelityBonds(storage) index = 0 timenumber = 0 script = wallet.get_script_and_update_map( FidelityBondMixin.FIDELITY_BOND_MIXDEPTH, FidelityBondMixin.BIP32_TIMELOCK_ID, index, timenumber) utxo = fund_wallet_addr(wallet, wallet.script_to_addr(script)) timestamp = wallet._time_number_to_timestamp(timenumber) tx = btc.mktx([utxo], [{"address": str(btc.CCoinAddress.from_scriptPubKey( btc.standard_scripthash_scriptpubkey(btc.Hash160(b"\x00")))), "value":10**8 - 9000}], locktime=timestamp+1) success, msg = wallet.sign_tx(tx, {0: (script, 10**8)}) assert success, msg txout = jm_single().bc_interface.pushtx(tx.serialize()) assert txout def test_get_bbm(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') amount = 10**8 num_tx = 3 wallet = get_populated_wallet(amount, num_tx) # disable a utxo and check we can correctly report # balance with the disabled flag off: utxo_1 = list(wallet._utxos.get_utxos_by_mixdepth()[0].keys())[0] wallet.disable_utxo(*utxo_1) balances = wallet.get_balance_by_mixdepth(include_disabled=True) assert balances[0] == num_tx * amount balances = wallet.get_balance_by_mixdepth() assert balances[0] == (num_tx - 1) * amount wallet.toggle_disable_utxo(*utxo_1) balances = wallet.get_balance_by_mixdepth() assert balances[0] == num_tx * amount def test_add_utxos(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') amount = 10**8 num_tx = 3 wallet = get_populated_wallet(amount, num_tx) balances = wallet.get_balance_by_mixdepth() assert balances[0] == num_tx * amount for md in range(1, wallet.max_mixdepth + 1): assert balances[md] == 0 utxos = wallet.get_utxos_by_mixdepth() assert len(utxos[0]) == num_tx for md in range(1, wallet.max_mixdepth + 1): assert not utxos[md] with pytest.raises(Exception): # no funds in mixdepth wallet.select_utxos(1, amount) with pytest.raises(Exception): # not enough funds wallet.select_utxos(0, amount * (num_tx + 1)) wallet.reset_utxos() assert wallet.get_balance_by_mixdepth()[0] == 0 def test_select_utxos(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') amount = 10**8 wallet = get_populated_wallet(amount) utxos = wallet.select_utxos(0, amount // 2) assert len(utxos) == 1 utxos = list(utxos.keys()) more_utxos = wallet.select_utxos(0, int(amount * 1.5), utxo_filter=utxos) assert len(more_utxos) == 2 assert utxos[0] not in more_utxos def test_add_new_utxos(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') wallet = get_populated_wallet(num=1) scripts = [wallet.get_new_script(x, BaseWallet.ADDRESS_TYPE_INTERNAL) for x in range(3)] tx_scripts = list(scripts) tx = btc.mktx( [(b"\x00"*32, 2)], [{"address": wallet.script_to_addr(s), "value": 10**8} for s in tx_scripts]) added = wallet.add_new_utxos(tx, 1) assert len(added) == len(scripts) added_scripts = {x['script'] for x in added.values()} for s in scripts: assert s in added_scripts balances = wallet.get_balance_by_mixdepth() assert balances[0] == 2 * 10**8 assert balances[1] == 10**8 assert balances[2] == 10**8 assert len(balances) == wallet.max_mixdepth + 1 def test_remove_old_utxos(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') wallet = get_populated_wallet() # add some more utxos to mixdepth 1 for i in range(3): txin = jm_single().bc_interface.grab_coins( wallet.get_internal_addr(1), 1) wallet.add_utxo(btc.x(txin), 0, wallet.get_script(1, BaseWallet.ADDRESS_TYPE_INTERNAL, i), 10**8, 1) inputs = wallet.select_utxos(0, 10**8) inputs.update(wallet.select_utxos(1, 2 * 10**8)) assert len(inputs) == 3 tx_inputs = list(inputs.keys()) tx_inputs.append((b'\x12'*32, 6)) tx = btc.mktx(tx_inputs, [{"address": "2N9gfkUsFW7Kkb1Eurue7NzUxUt7aNJiS1U", "value": 3 * 10**8 - 1000}]) removed = wallet.remove_old_utxos(tx) assert len(removed) == len(inputs) for txid in removed: assert txid in inputs balances = wallet.get_balance_by_mixdepth() assert balances[0] == 2 * 10**8 assert balances[1] == 10**8 assert balances[2] == 0 assert len(balances) == wallet.max_mixdepth + 1 def test_initialize_twice(setup_wallet): wallet = get_populated_wallet(num=0) storage = wallet._storage with pytest.raises(WalletError): SegwitLegacyWallet.initialize(storage, get_network()) def test_is_known(setup_wallet): wallet = get_populated_wallet(num=0) script = wallet.get_new_script(1, BaseWallet.ADDRESS_TYPE_INTERNAL) addr = wallet.get_external_addr(2) assert wallet.is_known_script(script) assert wallet.is_known_addr(addr) assert wallet.is_known_addr(wallet.script_to_addr(script)) assert wallet.is_known_script(wallet.addr_to_script(addr)) assert not wallet.is_known_script(b'\x12' * len(script)) assert not wallet.is_known_addr('2MzY5yyonUY7zpHspg7jB7WQs1uJxKafQe4') def test_wallet_save(setup_wallet): wallet = get_populated_wallet() script = wallet.get_external_script(1) wallet.save() storage = wallet._storage data = storage.file_data del wallet del storage storage = VolatileStorage(data=data) wallet = SegwitLegacyWallet(storage) assert wallet.get_next_unused_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL) == 3 assert wallet.get_next_unused_index(0, BaseWallet.ADDRESS_TYPE_EXTERNAL) == 0 assert wallet.get_next_unused_index(1, BaseWallet.ADDRESS_TYPE_INTERNAL) == 0 assert wallet.get_next_unused_index(1, BaseWallet.ADDRESS_TYPE_EXTERNAL) == 1 assert wallet.is_known_script(script) def test_set_next_index(setup_wallet): wallet = get_populated_wallet() assert wallet.get_next_unused_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL) == 3 with pytest.raises(Exception): # cannot advance index without force=True wallet.set_next_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL, 5) wallet.set_next_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL, 1) assert wallet.get_next_unused_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL) == 1 wallet.set_next_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL, 20, force=True) assert wallet.get_next_unused_index(0, BaseWallet.ADDRESS_TYPE_INTERNAL) == 20 script = wallet.get_new_script(0, BaseWallet.ADDRESS_TYPE_INTERNAL) path = wallet.script_to_path(script) index = wallet.get_details(path)[2] assert index == 20 def test_path_repr(setup_wallet): wallet = get_populated_wallet() path = wallet.get_path(2, BIP32Wallet.ADDRESS_TYPE_EXTERNAL, 0) path_repr = wallet.get_path_repr(path) path_new = wallet.path_repr_to_path(path_repr) assert path_new == path def test_path_repr_imported(setup_wallet): wallet = get_populated_wallet(num=0) path = wallet.import_private_key( 0, 'cRAGLvPmhpzJNgdMT4W2gVwEW3fusfaDqdQWM2vnWLgXKzCWKtcM') path_repr = wallet.get_path_repr(path) path_new = wallet.path_repr_to_path(path_repr) assert path_new == path @pytest.mark.parametrize('timenumber,timestamp', [ [0, 1577836800], [50, 1709251200], [300, 2366841600], [400, None], #too far in the future [-1, None] #before epoch ]) def test_timenumber_to_timestamp(setup_wallet, timenumber, timestamp): try: implied_timestamp = FidelityBondMixin._time_number_to_timestamp( timenumber) assert implied_timestamp == timestamp except ValueError: #None means the timenumber is intentionally invalid assert timestamp == None @pytest.mark.parametrize('timestamp,timenumber', [ [1577836800, 0], [1709251200, 50], [2366841600, 300], [1577836801, None], #not exactly midnight on first of month [2629670400, None], #too far in future [1575158400, None] #before epoch ]) def test_timestamp_to_timenumber(setup_wallet, timestamp, timenumber): try: implied_timenumber = FidelityBondMixin.timestamp_to_time_number( timestamp) assert implied_timenumber == timenumber except ValueError: assert timenumber == None def test_wrong_wallet_cls(setup_wallet): storage = VolatileStorage() SegwitLegacyWallet.initialize(storage, get_network()) wallet = SegwitLegacyWallet(storage) wallet.save() data = storage.file_data del wallet del storage storage = VolatileStorage(data=data) with pytest.raises(Exception): LegacyWallet(storage) def test_wallet_id(setup_wallet): storage1 = VolatileStorage() SegwitLegacyWallet.initialize(storage1, get_network()) wallet1 = SegwitLegacyWallet(storage1) storage2 = VolatileStorage() LegacyWallet.initialize(storage2, get_network(), entropy=wallet1._entropy) wallet2 = LegacyWallet(storage2) assert wallet1.get_wallet_id() != wallet2.get_wallet_id() storage2 = VolatileStorage() SegwitLegacyWallet.initialize(storage2, get_network(), entropy=wallet1._entropy) wallet2 = SegwitLegacyWallet(storage2) assert wallet1.get_wallet_id() == wallet2.get_wallet_id() def test_addr_script_conversion(setup_wallet): wallet = get_populated_wallet(num=1) path = wallet.get_path(0, BaseWallet.ADDRESS_TYPE_INTERNAL, 0) script = wallet.get_script_from_path(path) addr = wallet.script_to_addr(script) assert script == wallet.addr_to_script(addr) addr_path = wallet.addr_to_path(addr) assert path == addr_path def test_imported_key_removed(setup_wallet): wif = 'cRAGLvPmhpzJNgdMT4W2gVwEW3fusfaDqdQWM2vnWLgXKzCWKtcM' storage = VolatileStorage() SegwitLegacyWallet.initialize(storage, get_network()) wallet = SegwitLegacyWallet(storage) path = wallet.import_private_key(1, wif) script = wallet.get_script_from_path(path) assert wallet.is_known_script(script) wallet.remove_imported_key(path=path) assert not wallet.is_known_script(script) with pytest.raises(WalletError): wallet.get_script_from_path(path) def test_wallet_mixdepth_simple(setup_wallet): wallet = get_populated_wallet(num=0) mixdepth = wallet.mixdepth assert wallet.max_mixdepth == mixdepth wallet.close() storage_data = wallet._storage.file_data new_wallet = type(wallet)(VolatileStorage(data=storage_data)) assert new_wallet.mixdepth == mixdepth assert new_wallet.max_mixdepth == mixdepth def test_wallet_mixdepth_increase(setup_wallet): wallet = get_populated_wallet(num=0) mixdepth = wallet.mixdepth wallet.close() storage_data = wallet._storage.file_data new_mixdepth = mixdepth + 2 new_wallet = type(wallet)( VolatileStorage(data=storage_data), mixdepth=new_mixdepth) assert new_wallet.mixdepth == new_mixdepth assert new_wallet.max_mixdepth == new_mixdepth def test_wallet_mixdepth_decrease(setup_wallet): wallet = get_populated_wallet(num=1) # setup max_mixdepth = wallet.max_mixdepth assert max_mixdepth >= 1, "bad default value for mixdepth for this test" utxo = fund_wallet_addr(wallet, wallet.get_internal_addr(max_mixdepth), 1) bci = jm_single().bc_interface unspent_list = bci.listunspent(0) # filter on label, but note (a) in certain circumstances (in- # wallet transfer) it is possible for the utxo to be labeled # with the external label, and (b) the wallet will know if it # belongs or not anyway (is_known_addr): our_unspent_list = [x for x in unspent_list if ( bci.is_address_labeled(x, wallet.get_wallet_name()))] assert wallet.get_balance_by_mixdepth()[max_mixdepth] == 10**8 wallet.close() storage_data = wallet._storage.file_data # actual test new_mixdepth = max_mixdepth - 1 new_wallet = type(wallet)( VolatileStorage(data=storage_data), mixdepth=new_mixdepth) assert new_wallet.max_mixdepth == max_mixdepth assert new_wallet.mixdepth == new_mixdepth sync_test_wallet(True, WalletService(new_wallet)) assert max_mixdepth not in new_wallet.get_balance_by_mixdepth() assert max_mixdepth not in new_wallet.get_utxos_by_mixdepth() # wallet.select_utxos will still return utxos from higher mixdepths # because we explicitly ask for a specific mixdepth assert utxo in new_wallet.select_utxos(max_mixdepth, 10**7) def test_watchonly_wallet(setup_wallet): jm_single().config.set('BLOCKCHAIN', 'network', 'testnet') storage = VolatileStorage() SegwitLegacyWalletFidelityBonds.initialize(storage, get_network()) wallet = SegwitLegacyWalletFidelityBonds(storage) paths = [ "m/49'/1'/0'/0/0", "m/49'/1'/0'/1/0", "m/49'/1'/0'/2/0:1577836800", "m/49'/1'/0'/2/0:2314051200" ] burn_path = "m/49'/1'/0'/3/0" scripts = [wallet.get_script_from_path(wallet.path_repr_to_path(path)) for path in paths] privkey, engine = wallet._get_key_from_path(wallet.path_repr_to_path(burn_path)) burn_pubkey = engine.privkey_to_pubkey(privkey) master_pub_key = wallet.get_bip32_pub_export( FidelityBondMixin.FIDELITY_BOND_MIXDEPTH) watchonly_storage = VolatileStorage() entropy = FidelityBondMixin.get_xpub_from_fidelity_bond_master_pub_key( master_pub_key).encode() FidelityBondWatchonlyWallet.initialize(watchonly_storage, get_network(), entropy=entropy) watchonly_wallet = FidelityBondWatchonlyWallet(watchonly_storage) watchonly_scripts = [watchonly_wallet.get_script_from_path( watchonly_wallet.path_repr_to_path(path)) for path in paths] privkey, engine = wallet._get_key_from_path(wallet.path_repr_to_path(burn_path)) watchonly_burn_pubkey = engine.privkey_to_pubkey(privkey) for script, watchonly_script in zip(scripts, watchonly_scripts): assert script == watchonly_script assert burn_pubkey == watchonly_burn_pubkey @pytest.mark.parametrize('password, wallet_cls', [ ["hunter2", SegwitLegacyWallet], ["hunter2", SegwitWallet], ]) def test_create_wallet(setup_wallet, password, wallet_cls): wallet_name = test_create_wallet_filename password = password.encode("utf-8") # test mainnet (we are not transacting) btc.select_chain_params("bitcoin") wallet = create_wallet(wallet_name, password, 4, wallet_cls) mnemonic = wallet.get_mnemonic_words()[0] firstkey = wallet.get_key_from_addr(wallet.get_addr(0,0,0)) print("Created mnemonic, firstkey: ", mnemonic, firstkey) wallet.close() # ensure that the wallet file created is openable with the password, # and has the parameters that were claimed on creation: new_wallet = open_test_wallet_maybe(wallet_name, "", 4, password=password, ask_for_password=False) assert new_wallet.get_mnemonic_words()[0] == mnemonic assert new_wallet.get_key_from_addr( new_wallet.get_addr(0,0,0)) == firstkey os.remove(wallet_name) btc.select_chain_params("bitcoin/regtest") @pytest.fixture(scope='module') def setup_wallet(request): load_test_config() btc.select_chain_params("bitcoin/regtest") #see note in cryptoengine.py: cryptoengine.BTC_P2WPKH.VBYTE = 100 jm_single().bc_interface.tick_forward_chain_interval = 2 def teardown(): if os.path.exists(test_create_wallet_filename): os.remove(test_create_wallet_filename) request.addfinalizer(teardown)
undeath/joinmarket-clientserver
jmclient/test/test_wallet.py
jmclient/jmclient/taker_utils.py
"""Tests for _sketches.py.""" from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg import clarkson_woodruff_transform from numpy.testing import assert_ def make_random_dense_gaussian_matrix(n_rows, n_columns, mu=0, sigma=0.01): """ Make some random data with Gaussian distributed values """ np.random.seed(142352345) res = np.random.normal(mu, sigma, n_rows*n_columns) return np.reshape(res, (n_rows, n_columns)) class TestClarksonWoodruffTransform(object): """ Testing the Clarkson Woodruff Transform """ # Big dense matrix dimensions n_matrix_rows = 2000 n_matrix_columns = 100 # Sketch matrix dimensions n_sketch_rows = 100 # Error threshold threshold = 0.1 dense_big_matrix = make_random_dense_gaussian_matrix(n_matrix_rows, n_matrix_columns) def test_sketch_dimensions(self): sketch = clarkson_woodruff_transform(self.dense_big_matrix, self.n_sketch_rows) assert_(sketch.shape == (self.n_sketch_rows, self.dense_big_matrix.shape[1])) def test_sketch_rows_norm(self): # Given the probabilistic nature of the sketches # we run the 'test' multiple times and check that # we pass all/almost all the tries n_errors = 0 seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431, 1302443994, 1521083269, 1501189312, 1126232505, 1533465685] for seed_ in seeds: sketch = clarkson_woodruff_transform(self.dense_big_matrix, self.n_sketch_rows, seed_) # We could use other norms (like L2) err = np.linalg.norm(self.dense_big_matrix) - np.linalg.norm(sketch) if err > self.threshold: n_errors += 1 assert_(n_errors == 0)
# Test interfaces to fortran blas. # # The tests are more of interface than they are of the underlying blas. # Only very small matrices checked -- N=3 or so. # # !! Complex calculations really aren't checked that carefully. # !! Only real valued complex numbers are used in tests. from __future__ import division, print_function, absolute_import from numpy import float32, float64, complex64, complex128, arange, array, \ zeros, shape, transpose, newaxis, common_type, conjugate from scipy.linalg import _fblas as fblas from scipy._lib.six import xrange from numpy.testing import assert_array_equal, \ assert_allclose, assert_array_almost_equal, assert_ import pytest # decimal accuracy to require between Python and LAPACK/BLAS calculations accuracy = 5 # Since numpy.dot likely uses the same blas, use this routine # to check. def matrixmultiply(a, b): if len(b.shape) == 1: b_is_vector = True b = b[:, newaxis] else: b_is_vector = False assert_(a.shape[1] == b.shape[0]) c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) for i in xrange(a.shape[0]): for j in xrange(b.shape[1]): s = 0 for k in xrange(a.shape[1]): s += a[i, k] * b[k, j] c[i, j] = s if b_is_vector: c = c.reshape((a.shape[0],)) return c ################################################## # Test blas ?axpy class BaseAxpy(object): ''' Mixin class for axpy tests ''' def test_default_a(self): x = arange(3., dtype=self.dtype) y = arange(3., dtype=x.dtype) real_y = x*1.+y y = self.blas_func(x, y) assert_array_equal(real_y, y) def test_simple(self): x = arange(3., dtype=self.dtype) y = arange(3., dtype=x.dtype) real_y = x*3.+y y = self.blas_func(x, y, a=3.) assert_array_equal(real_y, y) def test_x_stride(self): x = arange(6., dtype=self.dtype) y = zeros(3, x.dtype) y = arange(3., dtype=x.dtype) real_y = x[::2]*3.+y y = self.blas_func(x, y, a=3., n=3, incx=2) assert_array_equal(real_y, y) def test_y_stride(self): x = arange(3., dtype=self.dtype) y = zeros(6, x.dtype) real_y = x*3.+y[::2] y = self.blas_func(x, y, a=3., n=3, incy=2) assert_array_equal(real_y, y[::2]) def test_x_and_y_stride(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) real_y = x[::4]*3.+y[::2] y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2) assert_array_equal(real_y, y[::2]) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(x, y, n=4, incx=5) def test_y_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(x, y, n=3, incy=5) try: class TestSaxpy(BaseAxpy): blas_func = fblas.saxpy dtype = float32 except AttributeError: class TestSaxpy: pass class TestDaxpy(BaseAxpy): blas_func = fblas.daxpy dtype = float64 try: class TestCaxpy(BaseAxpy): blas_func = fblas.caxpy dtype = complex64 except AttributeError: class TestCaxpy: pass class TestZaxpy(BaseAxpy): blas_func = fblas.zaxpy dtype = complex128 ################################################## # Test blas ?scal class BaseScal(object): ''' Mixin class for scal testing ''' def test_simple(self): x = arange(3., dtype=self.dtype) real_x = x*3. x = self.blas_func(3., x) assert_array_equal(real_x, x) def test_x_stride(self): x = arange(6., dtype=self.dtype) real_x = x.copy() real_x[::2] = x[::2]*array(3., self.dtype) x = self.blas_func(3., x, n=3, incx=2) assert_array_equal(real_x, x) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(2., x, n=4, incx=5) try: class TestSscal(BaseScal): blas_func = fblas.sscal dtype = float32 except AttributeError: class TestSscal: pass class TestDscal(BaseScal): blas_func = fblas.dscal dtype = float64 try: class TestCscal(BaseScal): blas_func = fblas.cscal dtype = complex64 except AttributeError: class TestCscal: pass class TestZscal(BaseScal): blas_func = fblas.zscal dtype = complex128 ################################################## # Test blas ?copy class BaseCopy(object): ''' Mixin class for copy testing ''' def test_simple(self): x = arange(3., dtype=self.dtype) y = zeros(shape(x), x.dtype) y = self.blas_func(x, y) assert_array_equal(x, y) def test_x_stride(self): x = arange(6., dtype=self.dtype) y = zeros(3, x.dtype) y = self.blas_func(x, y, n=3, incx=2) assert_array_equal(x[::2], y) def test_y_stride(self): x = arange(3., dtype=self.dtype) y = zeros(6, x.dtype) y = self.blas_func(x, y, n=3, incy=2) assert_array_equal(x, y[::2]) def test_x_and_y_stride(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) y = self.blas_func(x, y, n=3, incx=4, incy=2) assert_array_equal(x[::4], y[::2]) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(x, y, n=4, incx=5) def test_y_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(x, y, n=3, incy=5) # def test_y_bad_type(self): ## Hmmm. Should this work? What should be the output. # x = arange(3.,dtype=self.dtype) # y = zeros(shape(x)) # self.blas_func(x,y) # assert_array_equal(x,y) try: class TestScopy(BaseCopy): blas_func = fblas.scopy dtype = float32 except AttributeError: class TestScopy: pass class TestDcopy(BaseCopy): blas_func = fblas.dcopy dtype = float64 try: class TestCcopy(BaseCopy): blas_func = fblas.ccopy dtype = complex64 except AttributeError: class TestCcopy: pass class TestZcopy(BaseCopy): blas_func = fblas.zcopy dtype = complex128 ################################################## # Test blas ?swap class BaseSwap(object): ''' Mixin class for swap tests ''' def test_simple(self): x = arange(3., dtype=self.dtype) y = zeros(shape(x), x.dtype) desired_x = y.copy() desired_y = x.copy() x, y = self.blas_func(x, y) assert_array_equal(desired_x, x) assert_array_equal(desired_y, y) def test_x_stride(self): x = arange(6., dtype=self.dtype) y = zeros(3, x.dtype) desired_x = y.copy() desired_y = x.copy()[::2] x, y = self.blas_func(x, y, n=3, incx=2) assert_array_equal(desired_x, x[::2]) assert_array_equal(desired_y, y) def test_y_stride(self): x = arange(3., dtype=self.dtype) y = zeros(6, x.dtype) desired_x = y.copy()[::2] desired_y = x.copy() x, y = self.blas_func(x, y, n=3, incy=2) assert_array_equal(desired_x, x) assert_array_equal(desired_y, y[::2]) def test_x_and_y_stride(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) desired_x = y.copy()[::2] desired_y = x.copy()[::4] x, y = self.blas_func(x, y, n=3, incx=4, incy=2) assert_array_equal(desired_x, x[::4]) assert_array_equal(desired_y, y[::2]) def test_x_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(x, y, n=4, incx=5) def test_y_bad_size(self): x = arange(12., dtype=self.dtype) y = zeros(6, x.dtype) with pytest.raises(Exception, match='failed for 1st keyword'): self.blas_func(x, y, n=3, incy=5) try: class TestSswap(BaseSwap): blas_func = fblas.sswap dtype = float32 except AttributeError: class TestSswap: pass class TestDswap(BaseSwap): blas_func = fblas.dswap dtype = float64 try: class TestCswap(BaseSwap): blas_func = fblas.cswap dtype = complex64 except AttributeError: class TestCswap: pass class TestZswap(BaseSwap): blas_func = fblas.zswap dtype = complex128 ################################################## # Test blas ?gemv # This will be a mess to test all cases. class BaseGemv(object): ''' Mixin class for gemv tests ''' def get_data(self, x_stride=1, y_stride=1): mult = array(1, dtype=self.dtype) if self.dtype in [complex64, complex128]: mult = array(1+1j, dtype=self.dtype) from numpy.random import normal, seed seed(1234) alpha = array(1., dtype=self.dtype) * mult beta = array(1., dtype=self.dtype) * mult a = normal(0., 1., (3, 3)).astype(self.dtype) * mult x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult return alpha, beta, a, x, y def test_simple(self): alpha, beta, a, x, y = self.get_data() desired_y = alpha*matrixmultiply(a, x)+beta*y y = self.blas_func(alpha, a, x, beta, y) assert_array_almost_equal(desired_y, y) def test_default_beta_y(self): alpha, beta, a, x, y = self.get_data() desired_y = matrixmultiply(a, x) y = self.blas_func(1, a, x) assert_array_almost_equal(desired_y, y) def test_simple_transpose(self): alpha, beta, a, x, y = self.get_data() desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y y = self.blas_func(alpha, a, x, beta, y, trans=1) assert_array_almost_equal(desired_y, y) def test_simple_transpose_conj(self): alpha, beta, a, x, y = self.get_data() desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y y = self.blas_func(alpha, a, x, beta, y, trans=2) assert_array_almost_equal(desired_y, y) def test_x_stride(self): alpha, beta, a, x, y = self.get_data(x_stride=2) desired_y = alpha*matrixmultiply(a, x[::2])+beta*y y = self.blas_func(alpha, a, x, beta, y, incx=2) assert_array_almost_equal(desired_y, y) def test_x_stride_transpose(self): alpha, beta, a, x, y = self.get_data(x_stride=2) desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2) assert_array_almost_equal(desired_y, y) def test_x_stride_assert(self): # What is the use of this test? alpha, beta, a, x, y = self.get_data(x_stride=2) with pytest.raises(Exception, match='failed for 3rd argument'): y = self.blas_func(1, a, x, 1, y, trans=0, incx=3) with pytest.raises(Exception, match='failed for 3rd argument'): y = self.blas_func(1, a, x, 1, y, trans=1, incx=3) def test_y_stride(self): alpha, beta, a, x, y = self.get_data(y_stride=2) desired_y = y.copy() desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2] y = self.blas_func(alpha, a, x, beta, y, incy=2) assert_array_almost_equal(desired_y, y) def test_y_stride_transpose(self): alpha, beta, a, x, y = self.get_data(y_stride=2) desired_y = y.copy() desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2] y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2) assert_array_almost_equal(desired_y, y) def test_y_stride_assert(self): # What is the use of this test? alpha, beta, a, x, y = self.get_data(y_stride=2) with pytest.raises(Exception, match='failed for 2nd keyword'): y = self.blas_func(1, a, x, 1, y, trans=0, incy=3) with pytest.raises(Exception, match='failed for 2nd keyword'): y = self.blas_func(1, a, x, 1, y, trans=1, incy=3) try: class TestSgemv(BaseGemv): blas_func = fblas.sgemv dtype = float32 def test_sgemv_on_osx(self): from itertools import product import sys import numpy as np if sys.platform != 'darwin': return def aligned_array(shape, align, dtype, order='C'): # Make array shape `shape` with aligned at `align` bytes d = dtype() # Make array of correct size with `align` extra bytes N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] # Find offset into array giving desired alignment for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): # Copy `arr` into an aligned array with same shape aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(self.blas_func(1.0, A, X), desired, rtol=1e-5, atol=1e-7) testdata = product((15, 32), (10000,), (200, 89), ('C', 'F')) for align, m, n, a_order in testdata: A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32, order=a_order) assert_dot_close(A_f, X_f, desired) except AttributeError: class TestSgemv: pass class TestDgemv(BaseGemv): blas_func = fblas.dgemv dtype = float64 try: class TestCgemv(BaseGemv): blas_func = fblas.cgemv dtype = complex64 except AttributeError: class TestCgemv: pass class TestZgemv(BaseGemv): blas_func = fblas.zgemv dtype = complex128 """ ################################################## ### Test blas ?ger ### This will be a mess to test all cases. class BaseGer(object): def get_data(self,x_stride=1,y_stride=1): from numpy.random import normal, seed seed(1234) alpha = array(1., dtype = self.dtype) a = normal(0.,1.,(3,3)).astype(self.dtype) x = arange(shape(a)[0]*x_stride,dtype=self.dtype) y = arange(shape(a)[1]*y_stride,dtype=self.dtype) return alpha,a,x,y def test_simple(self): alpha,a,x,y = self.get_data() # tranpose takes care of Fortran vs. C(and Python) memory layout desired_a = alpha*transpose(x[:,newaxis]*y) + a self.blas_func(x,y,a) assert_array_almost_equal(desired_a,a) def test_x_stride(self): alpha,a,x,y = self.get_data(x_stride=2) desired_a = alpha*transpose(x[::2,newaxis]*y) + a self.blas_func(x,y,a,incx=2) assert_array_almost_equal(desired_a,a) def test_x_stride_assert(self): alpha,a,x,y = self.get_data(x_stride=2) with pytest.raises(ValueError, match='foo'): self.blas_func(x,y,a,incx=3) def test_y_stride(self): alpha,a,x,y = self.get_data(y_stride=2) desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a self.blas_func(x,y,a,incy=2) assert_array_almost_equal(desired_a,a) def test_y_stride_assert(self): alpha,a,x,y = self.get_data(y_stride=2) with pytest.raises(ValueError, match='foo'): self.blas_func(a,x,y,incy=3) class TestSger(BaseGer): blas_func = fblas.sger dtype = float32 class TestDger(BaseGer): blas_func = fblas.dger dtype = float64 """ ################################################## # Test blas ?gerc # This will be a mess to test all cases. """ class BaseGerComplex(BaseGer): def get_data(self,x_stride=1,y_stride=1): from numpy.random import normal, seed seed(1234) alpha = array(1+1j, dtype = self.dtype) a = normal(0.,1.,(3,3)).astype(self.dtype) a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype) x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype) x = x + x * array(1j, dtype = self.dtype) y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype) y = y + y * array(1j, dtype = self.dtype) return alpha,a,x,y def test_simple(self): alpha,a,x,y = self.get_data() # tranpose takes care of Fortran vs. C(and Python) memory layout a = a * array(0.,dtype = self.dtype) #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a desired_a = alpha*transpose(x[:,newaxis]*y) + a #self.blas_func(x,y,a,alpha = alpha) fblas.cgeru(x,y,a,alpha = alpha) assert_array_almost_equal(desired_a,a) #def test_x_stride(self): # alpha,a,x,y = self.get_data(x_stride=2) # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a # self.blas_func(x,y,a,incx=2) # assert_array_almost_equal(desired_a,a) #def test_y_stride(self): # alpha,a,x,y = self.get_data(y_stride=2) # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a # self.blas_func(x,y,a,incy=2) # assert_array_almost_equal(desired_a,a) class TestCgeru(BaseGerComplex): blas_func = fblas.cgeru dtype = complex64 def transform(self,x): return x class TestZgeru(BaseGerComplex): blas_func = fblas.zgeru dtype = complex128 def transform(self,x): return x class TestCgerc(BaseGerComplex): blas_func = fblas.cgerc dtype = complex64 def transform(self,x): return conjugate(x) class TestZgerc(BaseGerComplex): blas_func = fblas.zgerc dtype = complex128 def transform(self,x): return conjugate(x) """
Eric89GXL/scipy
scipy/linalg/tests/test_fblas.py
scipy/linalg/tests/test_sketches.py
from pandas.compat import StringIO from pandas import read_sas import pandas.util.testing as tm class TestSas(object): def test_sas_buffer_format(self): # see gh-14947 b = StringIO("") msg = ("If this is a buffer object rather than a string " "name, you must specify a format string") with tm.assert_raises_regex(ValueError, msg): read_sas(b)
# -*- coding: utf-8 -*- from itertools import product import pytest import numpy as np from pandas.compat import range, u from pandas import MultiIndex, DatetimeIndex from pandas._libs import hashtable import pandas.util.testing as tm @pytest.mark.parametrize('names', [None, ['first', 'second']]) def test_unique(names): mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names) res = mi.unique() exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) tm.assert_index_equal(res, exp) mi = MultiIndex.from_arrays([list('aaaa'), list('abab')], names=names) res = mi.unique() exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names) tm.assert_index_equal(res, exp) mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names) res = mi.unique() exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names) tm.assert_index_equal(res, exp) # GH #20568 - empty MI mi = MultiIndex.from_arrays([[], []], names=names) res = mi.unique() tm.assert_index_equal(mi, res) def test_unique_datetimelike(): idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', '2015-01-01', 'NaT', 'NaT']) idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', '2015-01-02', 'NaT', '2015-01-01'], tz='Asia/Tokyo') result = MultiIndex.from_arrays([idx1, idx2]).unique() eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02', 'NaT', '2015-01-01'], tz='Asia/Tokyo') exp = MultiIndex.from_arrays([eidx1, eidx2]) tm.assert_index_equal(result, exp) @pytest.mark.parametrize('level', [0, 'first', 1, 'second']) def test_unique_level(idx, level): # GH #17896 - with level= argument result = idx.unique(level=level) expected = idx.get_level_values(level).unique() tm.assert_index_equal(result, expected) # With already unique level mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) tm.assert_index_equal(result, expected) # With empty MI mi = MultiIndex.from_arrays([[], []], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) @pytest.mark.parametrize('dropna', [True, False]) def test_get_unique_index(idx, dropna): mi = idx[[0, 1, 0, 1, 1, 0, 0]] expected = mi._shallow_copy(mi[[0, 1]]) result = mi._get_unique_index(dropna=dropna) assert result.unique tm.assert_index_equal(result, expected) def test_duplicate_multiindex_labels(): # GH 17464 # Make sure that a MultiIndex with duplicate levels throws a ValueError with pytest.raises(ValueError): mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) # And that using set_levels with duplicate levels fails mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]]) with pytest.raises(ValueError): mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], inplace=True) @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], [1, 'a', 1]]) def test_duplicate_level_names(names): # GH18872, GH19029 mi = MultiIndex.from_product([[0, 1]] * 3, names=names) assert mi.names == names # With .rename() mi = MultiIndex.from_product([[0, 1]] * 3) mi = mi.rename(names) assert mi.names == names # With .rename(., level=) mi.rename(names[1], level=1, inplace=True) mi = mi.rename([names[0], names[2]], level=[0, 2]) assert mi.names == names def test_duplicate_meta_data(): # GH 10115 mi = MultiIndex( levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) for idx in [mi, mi.set_names([None, None]), mi.set_names([None, 'Num']), mi.set_names(['Upper', 'Num']), ]: assert idx.has_duplicates assert idx.drop_duplicates().names == idx.names def test_has_duplicates(idx, idx_dup): # see fixtures assert idx.is_unique is True assert idx.has_duplicates is False assert idx_dup.is_unique is False assert idx_dup.has_duplicates is True mi = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) assert mi.is_unique is False assert mi.has_duplicates is True def test_has_duplicates_from_tuples(): # GH 9075 t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135), (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145), (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158), (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122), (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160), (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180), (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143), (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128), (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129), (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111), (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114), (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121), (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126), (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155), (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] mi = MultiIndex.from_tuples(t) assert not mi.has_duplicates def test_has_duplicates_overflow(): # handle int64 overflow if possible def check(nlevels, with_nulls): labels = np.tile(np.arange(500), 2) level = np.arange(500) if with_nulls: # inject some null values labels[500] = -1 # common nan value labels = [labels.copy() for i in range(nlevels)] for i in range(nlevels): labels[i][500 + i - nlevels // 2] = -1 labels += [np.array([-1, 1]).repeat(500)] else: labels = [labels] * nlevels + [np.arange(2).repeat(500)] levels = [level] * nlevels + [[0, 1]] # no dups mi = MultiIndex(levels=levels, labels=labels) assert not mi.has_duplicates # with a dup if with_nulls: def f(a): return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) mi = MultiIndex(levels=levels, labels=labels) else: values = mi.values.tolist() mi = MultiIndex.from_tuples(values + [values[0]]) assert mi.has_duplicates # no overflow check(4, False) check(4, True) # overflow possible check(8, False) check(8, True) @pytest.mark.parametrize('keep, expected', [ ('first', np.array([False, False, False, True, True, False])), ('last', np.array([False, True, True, False, False, False])), (False, np.array([False, True, True, True, True, False])) ]) def test_duplicated(idx_dup, keep, expected): result = idx_dup.duplicated(keep=keep) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize('keep', ['first', 'last', False]) def test_duplicated_large(keep): # GH 9125 n, k = 200, 5000 levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] labels = [np.random.choice(n, k * n) for lev in levels] mi = MultiIndex(levels=levels, labels=labels) result = mi.duplicated(keep=keep) expected = hashtable.duplicated_object(mi.values, keep=keep) tm.assert_numpy_array_equal(result, expected) def test_get_duplicates(): # GH5873 for a in [101, 102]: mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) assert not mi.has_duplicates with tm.assert_produces_warning(FutureWarning): # Deprecated - see GH20239 assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []])) tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(2, dtype='bool')) for n in range(1, 6): # 1st level shape for m in range(1, 5): # 2nd level shape # all possible unique combinations, including nan lab = product(range(-1, n), range(-1, m)) mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]], labels=np.random.permutation(list(lab)).T) assert len(mi) == (n + 1) * (m + 1) assert not mi.has_duplicates with tm.assert_produces_warning(FutureWarning): # Deprecated - see GH20239 assert mi.get_duplicates().equals(MultiIndex.from_arrays( [[], []])) tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(len(mi), dtype='bool'))
amolkahat/pandas
pandas/tests/indexes/multi/test_duplicates.py
pandas/tests/io/sas/test_sas.py
#!/usr/bin/env python """Top level ``eval`` module. """ import warnings import tokenize from pandas.io.formats.printing import pprint_thing from pandas.core.computation.scope import _ensure_scope from pandas.compat import string_types from pandas.core.computation.engines import _engines from pandas.util._validators import validate_bool_kwarg def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """ from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """ from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys())) def _check_resolvers(resolvers): if resolvers is not None: for resolver in resolvers: if not hasattr(resolver, '__getitem__'): name = type(resolver).__name__ raise TypeError('Resolver of type {name!r} does not implement ' 'the __getitem__ method'.format(name=name)) def _check_expression(expr): """Make sure an expression is not an empty string Parameters ---------- expr : object An object that can be converted to a string Raises ------ ValueError * If expr is an empty string """ if not expr: raise ValueError("expr cannot be an empty string") def _convert_expression(expr): """Convert an object to an expression. Thus function converts an object to an expression (a unicode string) and checks to make sure it isn't empty after conversion. This is used to convert operators to their string representation for recursive calls to :func:`~pandas.eval`. Parameters ---------- expr : object The object to be converted to a string. Returns ------- s : unicode The string representation of an object. Raises ------ ValueError * If the expression is empty. """ s = pprint_thing(expr) _check_expression(s) return s def _check_for_locals(expr, stack_level, parser): from pandas.core.computation.expr import tokenize_string at_top_of_stack = stack_level == 0 not_pandas_parser = parser != 'pandas' if not_pandas_parser: msg = "The '@' prefix is only supported by the pandas parser" elif at_top_of_stack: msg = ("The '@' prefix is not allowed in " "top-level eval calls, \nplease refer to " "your variables by name without the '@' " "prefix") if at_top_of_stack or not_pandas_parser: for toknum, tokval in tokenize_string(expr): if toknum == tokenize.OP and tokval == '@': raise SyntaxError(msg) def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~pandas.DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. See Also -------- pandas.DataFrame.query pandas.DataFrame.eval """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, string_types): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
# -*- coding: utf-8 -*- from itertools import product import pytest import numpy as np from pandas.compat import range, u from pandas import MultiIndex, DatetimeIndex from pandas._libs import hashtable import pandas.util.testing as tm @pytest.mark.parametrize('names', [None, ['first', 'second']]) def test_unique(names): mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names) res = mi.unique() exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) tm.assert_index_equal(res, exp) mi = MultiIndex.from_arrays([list('aaaa'), list('abab')], names=names) res = mi.unique() exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names) tm.assert_index_equal(res, exp) mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names) res = mi.unique() exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names) tm.assert_index_equal(res, exp) # GH #20568 - empty MI mi = MultiIndex.from_arrays([[], []], names=names) res = mi.unique() tm.assert_index_equal(mi, res) def test_unique_datetimelike(): idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', '2015-01-01', 'NaT', 'NaT']) idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', '2015-01-02', 'NaT', '2015-01-01'], tz='Asia/Tokyo') result = MultiIndex.from_arrays([idx1, idx2]).unique() eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02', 'NaT', '2015-01-01'], tz='Asia/Tokyo') exp = MultiIndex.from_arrays([eidx1, eidx2]) tm.assert_index_equal(result, exp) @pytest.mark.parametrize('level', [0, 'first', 1, 'second']) def test_unique_level(idx, level): # GH #17896 - with level= argument result = idx.unique(level=level) expected = idx.get_level_values(level).unique() tm.assert_index_equal(result, expected) # With already unique level mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) tm.assert_index_equal(result, expected) # With empty MI mi = MultiIndex.from_arrays([[], []], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) @pytest.mark.parametrize('dropna', [True, False]) def test_get_unique_index(idx, dropna): mi = idx[[0, 1, 0, 1, 1, 0, 0]] expected = mi._shallow_copy(mi[[0, 1]]) result = mi._get_unique_index(dropna=dropna) assert result.unique tm.assert_index_equal(result, expected) def test_duplicate_multiindex_labels(): # GH 17464 # Make sure that a MultiIndex with duplicate levels throws a ValueError with pytest.raises(ValueError): mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) # And that using set_levels with duplicate levels fails mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]]) with pytest.raises(ValueError): mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], inplace=True) @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], [1, 'a', 1]]) def test_duplicate_level_names(names): # GH18872, GH19029 mi = MultiIndex.from_product([[0, 1]] * 3, names=names) assert mi.names == names # With .rename() mi = MultiIndex.from_product([[0, 1]] * 3) mi = mi.rename(names) assert mi.names == names # With .rename(., level=) mi.rename(names[1], level=1, inplace=True) mi = mi.rename([names[0], names[2]], level=[0, 2]) assert mi.names == names def test_duplicate_meta_data(): # GH 10115 mi = MultiIndex( levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) for idx in [mi, mi.set_names([None, None]), mi.set_names([None, 'Num']), mi.set_names(['Upper', 'Num']), ]: assert idx.has_duplicates assert idx.drop_duplicates().names == idx.names def test_has_duplicates(idx, idx_dup): # see fixtures assert idx.is_unique is True assert idx.has_duplicates is False assert idx_dup.is_unique is False assert idx_dup.has_duplicates is True mi = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) assert mi.is_unique is False assert mi.has_duplicates is True def test_has_duplicates_from_tuples(): # GH 9075 t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135), (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145), (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158), (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122), (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160), (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180), (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143), (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128), (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129), (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111), (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114), (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121), (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126), (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155), (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] mi = MultiIndex.from_tuples(t) assert not mi.has_duplicates def test_has_duplicates_overflow(): # handle int64 overflow if possible def check(nlevels, with_nulls): labels = np.tile(np.arange(500), 2) level = np.arange(500) if with_nulls: # inject some null values labels[500] = -1 # common nan value labels = [labels.copy() for i in range(nlevels)] for i in range(nlevels): labels[i][500 + i - nlevels // 2] = -1 labels += [np.array([-1, 1]).repeat(500)] else: labels = [labels] * nlevels + [np.arange(2).repeat(500)] levels = [level] * nlevels + [[0, 1]] # no dups mi = MultiIndex(levels=levels, labels=labels) assert not mi.has_duplicates # with a dup if with_nulls: def f(a): return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) mi = MultiIndex(levels=levels, labels=labels) else: values = mi.values.tolist() mi = MultiIndex.from_tuples(values + [values[0]]) assert mi.has_duplicates # no overflow check(4, False) check(4, True) # overflow possible check(8, False) check(8, True) @pytest.mark.parametrize('keep, expected', [ ('first', np.array([False, False, False, True, True, False])), ('last', np.array([False, True, True, False, False, False])), (False, np.array([False, True, True, True, True, False])) ]) def test_duplicated(idx_dup, keep, expected): result = idx_dup.duplicated(keep=keep) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize('keep', ['first', 'last', False]) def test_duplicated_large(keep): # GH 9125 n, k = 200, 5000 levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] labels = [np.random.choice(n, k * n) for lev in levels] mi = MultiIndex(levels=levels, labels=labels) result = mi.duplicated(keep=keep) expected = hashtable.duplicated_object(mi.values, keep=keep) tm.assert_numpy_array_equal(result, expected) def test_get_duplicates(): # GH5873 for a in [101, 102]: mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) assert not mi.has_duplicates with tm.assert_produces_warning(FutureWarning): # Deprecated - see GH20239 assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []])) tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(2, dtype='bool')) for n in range(1, 6): # 1st level shape for m in range(1, 5): # 2nd level shape # all possible unique combinations, including nan lab = product(range(-1, n), range(-1, m)) mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]], labels=np.random.permutation(list(lab)).T) assert len(mi) == (n + 1) * (m + 1) assert not mi.has_duplicates with tm.assert_produces_warning(FutureWarning): # Deprecated - see GH20239 assert mi.get_duplicates().equals(MultiIndex.from_arrays( [[], []])) tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(len(mi), dtype='bool'))
amolkahat/pandas
pandas/tests/indexes/multi/test_duplicates.py
pandas/core/computation/eval.py
# Copyright 2015 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import mock class BigIPMock(object): """Mock BIG-IP® object Mocks a BIG-IP® object by substituting a mock icr_session object which returns a user created mock response object. To use, create a mock response object which will get returned by any icr_session HTTP method, then create an interface object, passing in this BIG-IPMock object. Example: # Create a mock response object with status code and JSON. Here # read_json_file() is used to get mock JSON, but you can always pass # in a JSON string, or create a dictionary object and convert to JSON # using json.loads(). response = BIG-IPMock.create_mock_response( 200, BIG-IPMock.read_json_file("f5/BIG-IP/interfaces/test/pool.json") ) # Create BIG-IP® object, passing in mocked response object big_ip = BIG-IPMock(response) # Create interface object test_pool = Pool(big_ip) # Call interface method which will receive mock response object created # above when it calls the icr_session method get(). description = test_pool.get_description("my-Pool") """ def __init__(self, response=mock.Mock()): """Initializes BIG-IPMock object. :param response: Mock response object to return from icr_session calls. :return: """ self.icontrol = self._create_icontrol() self.icr_session = self._create_icr_session() self.icr_uri = 'https://host-abc/mgmt/tm' self.response = response def _create_icontrol(self): return mock.Mock() def _create_icr_session(self): """Creates a mock icr_session object. This mocked icr_session substitutes basic request library methods (get, put, post, etc.) with a method that simply returns a mocked response object. Set the response on the BIG-IPMock object before calling one of the icr_session methods. :rtype object: mock session object. """ def mock_response(url, *args, **kwargs): return self.response icr_session = mock.Mock() icr_session.delete = mock_response icr_session.get = mock_response icr_session.put = mock_response icr_session.post = mock_response icr_session.put = mock_response return icr_session @staticmethod def create_mock_response(status_code, json_str): """Creates a mock HTTP response. :param int status_code: HTTP response code to mock. :param string json: JSON string to mock. :rtype object: mock HTTP response object. """ response = mock.Mock() response.status_code = status_code response.text = json_str response.json.return_value = json.loads(json_str) return response @staticmethod def read_json_file(filename): """Reads JSON file, returning a JSON string. The file must contain a valid JSON object, for example: {"key": "value"...} or {"key": {"key": "value"}...} :param string name: Name of file containing JSON object. :rtype string: JSON object as a string. """ file = open(filename) s = file.read() assert s.__len__() > 0 return s
# Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from f5.bigip import ManagementRoot from f5.bigip.tm.auth.radius import Radius from f5.sdk_exception import InvalidName from f5.sdk_exception import MissingRequiredCreationParameter import mock import pytest @pytest.fixture def FakeRadius(): fake_radius = mock.MagicMock() fake_radobj = Radius(fake_radius) return fake_radobj class TestCreate(object): def test_create_two(self, fakeicontrolsession): b = ManagementRoot('localhost', 'admin', 'admin') r1 = b.tm.auth.radius_s.radius r2 = b.tm.auth.radius_s.radius assert r1 is not r2 def test_create_no_args(self, FakeRadius): with pytest.raises(MissingRequiredCreationParameter): FakeRadius.create() def test_create_bad_name(self, FakeRadius): with pytest.raises(InvalidName): FakeRadius.create(name='testauth')
F5Networks/f5-common-python
f5/bigip/tm/auth/test/unit/test_radius.py
f5/bigip/test/unit/big_ip_mock.py
# coding=utf-8 # # Copyright 2015-2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """BIG-IP® Advanced Firewall Manager™ (AFM®) module. REST URI ``http://localhost/mgmt/tm/security/scrubber`` GUI Path ``Security --> Option --> Network Firewall --> External Redirection --> Scrubbing Profile`` REST Kind ``tm:security:scrubbercollectionstate:*`` """ from f5.bigip.resource import Collection from f5.bigip.resource import OrganizingCollection from f5.bigip.resource import Resource class Scrubber(OrganizingCollection): """BIG-IP® AFM® Scrubber organizing collection.""" def __init__(self, security): super(Scrubber, self).__init__(security) self._meta_data['allowed_lazy_attributes'] = [ Profile_s] class Profile_s(Collection): """BIG-IP® AFM® Scrubber Profile collection""" def __init__(self, scrubber): super(Profile_s, self).__init__(scrubber) self._meta_data['allowed_lazy_attributes'] = [Profile] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:profilestate': Profile} class Profile(Resource): """BIG-IP® AFM® Scrubber Profile resource""" def __init__(self, profile_s): super(Profile, self).__init__(profile_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:profilestate' self._meta_data['required_load_parameters'].update(('partition', 'name')) self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber_rt_domaincollectionstate': Scrubber_Rt_Domain_s, 'tm:security:scrubber:profile:scrubber-categories:scrubber-categoriescollectionstate': Scrubber_Categories_s, 'tm:security:scrubber:profile:scrubber-virtual-server:scrubber-virtual-servercollectionstate': Scrubber_Virtual_Server_s, 'tm:security:scrubber:profile:scrubber-netflow-protected-server:scrubber-netflow-protected-servercollectionstate': Scrubber_Netflow_Protected_Server_s} self._meta_data['allowed_lazy_attributes'] = [ Scrubber_Rt_Domain_s, Scrubber_Virtual_Server_s, Scrubber_Categories_s, Scrubber_Netflow_Protected_Server_s] class Scrubber_Rt_Domain_s(Collection): """BIG-IP® AFM® Scrubber Profile Route Domain collection""" def __init__(self, profile): super(Scrubber_Rt_Domain_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Rt_Domain] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rt-domainstate': Scrubber_Rt_Domain} class Scrubber_Rt_Domain(Resource): """BIG-IP® AFM® Scrubber Profile Route Domain resource""" def __init__(self, scrubber_rt_domain_s): super(Scrubber_Rt_Domain, self).__init__(scrubber_rt_domain_s) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Rd_Network_Prefix_s] self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rt-domainstate' self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rd-network-prefix:scrubber-rd-network-prefixcollectionstate': Scrubber_Rd_Network_Prefix_s} self._meta_data['required_creation_parameters'].update(('name', 'routeDomain')) class Scrubber_Rd_Network_Prefix_s(Collection): """BIG-IP® AFM® Scrubber Rd Network Prefix collection""" def __init__(self, scrubber_rt_domain): super(Scrubber_Rd_Network_Prefix_s, self).__init__(scrubber_rt_domain) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Rd_Network_Prefix] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rd-network-prefix:scrubber-rd-network-prefixstate': Scrubber_Rd_Network_Prefix} class Scrubber_Rd_Network_Prefix(Resource): """BIG-IP® AFM® Scrubber Rd Network Prefix resource""" def __init__(self, scrubber_rd_network_prefix_s): super(Scrubber_Rd_Network_Prefix, self).__init__(scrubber_rd_network_prefix_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rd-network-prefix:scrubber-rd-network-prefixstate' self._meta_data['required_creation_parameters'].update(('name', 'nextHop', 'dstIp', 'mask')) class Scrubber_Virtual_Server_s(Collection): """BIG-IP® AFM® Scrubber Profile Virtual Server collection""" def __init__(self, profile): super(Scrubber_Virtual_Server_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Virtual_Server] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-virtual-server:scrubber-virtual-serverstate': Scrubber_Virtual_Server} class Scrubber_Virtual_Server(Resource): """BIG-IP® AFM® Scrubber Profile Virtual Server resource""" def __init__(self, scrubber_virtual_server_s): super(Scrubber_Virtual_Server, self).__init__(scrubber_virtual_server_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-virtual-server:scrubber-virtual-serverstate' self._meta_data['required_creation_parameters'].update(('name', 'vsName')) class Scrubber_Categories_s(Collection): """BIG-IP® AFM® Scrubber Profile Categories collection""" def __init__(self, profile): super(Scrubber_Categories_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Categories] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-categories:scrubber-categoriesstate': Scrubber_Categories} class Scrubber_Categories(Resource): """BIG-IP® AFM® Scrubber Profile Categories resource""" def __init__(self, scrubber_categories_s): super(Scrubber_Categories, self).__init__(scrubber_categories_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-categories:scrubber-categoriesstate' self._meta_data['required_creation_parameters'].update(('name', 'blacklistCategory', 'routeDomainName')) class Scrubber_Netflow_Protected_Server_s(Collection): """BIG-IP® AFM® Scrubber Profile Netflow Protected Server collection""" def __init__(self, profile): super(Scrubber_Netflow_Protected_Server_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Netflow_Protected_Server] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-netflow-protected-server:scrubber-netflow-protected-serverstate': Scrubber_Netflow_Protected_Server} class Scrubber_Netflow_Protected_Server(Resource): """BIG-IP® AFM® Scrubber Profile Netflow Protected Server resource""" def __init__(self, scrubber_netflow_protected_server_s): super(Scrubber_Netflow_Protected_Server, self).__init__(scrubber_netflow_protected_server_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-netflow-protected-server:scrubber-netflow-protected-serverstate' self._meta_data['required_creation_parameters'].update(('name', 'npsName'))
# Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from f5.bigip import ManagementRoot from f5.bigip.tm.auth.radius import Radius from f5.sdk_exception import InvalidName from f5.sdk_exception import MissingRequiredCreationParameter import mock import pytest @pytest.fixture def FakeRadius(): fake_radius = mock.MagicMock() fake_radobj = Radius(fake_radius) return fake_radobj class TestCreate(object): def test_create_two(self, fakeicontrolsession): b = ManagementRoot('localhost', 'admin', 'admin') r1 = b.tm.auth.radius_s.radius r2 = b.tm.auth.radius_s.radius assert r1 is not r2 def test_create_no_args(self, FakeRadius): with pytest.raises(MissingRequiredCreationParameter): FakeRadius.create() def test_create_bad_name(self, FakeRadius): with pytest.raises(InvalidName): FakeRadius.create(name='testauth')
F5Networks/f5-common-python
f5/bigip/tm/auth/test/unit/test_radius.py
f5/bigip/tm/security/scrubber.py
# -*- coding: utf-8 -*- """Some utility functions.""" # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD (3-clause) from collections.abc import Iterable import os import os.path as op import logging import tempfile from threading import Thread import time import numpy as np from .check import _check_option from .config import get_config from ._logging import logger class ProgressBar(object): """Generate a command-line progressbar. Parameters ---------- iterable : iterable | int | None The iterable to use. Can also be an int for backward compatibility (acts like ``max_value``). initial_value : int Initial value of process, useful when resuming process from a specific value, defaults to 0. mesg : str Message to include at end of progress bar. max_total_width : int | str Maximum total message width. Can use "auto" (default) to try to set a sane value based on the current terminal width. max_value : int | None The max value. If None, the length of ``iterable`` will be used. **kwargs : dict Additional keyword arguments for tqdm. """ def __init__(self, iterable=None, initial_value=0, mesg=None, max_total_width='auto', max_value=None, **kwargs): # noqa: D102 # The following mimics this, but with configurable module to use # from ..externals.tqdm import auto from ..externals import tqdm which_tqdm = get_config('MNE_TQDM', 'tqdm.auto') _check_option('MNE_TQDM', which_tqdm[:5], ('tqdm', 'tqdm.', 'off'), extra='beginning') logger.debug(f'Using ProgressBar with {which_tqdm}') if which_tqdm not in ('tqdm', 'off'): tqdm = getattr(tqdm, which_tqdm.split('.', 1)[1]) tqdm = tqdm.tqdm defaults = dict( leave=True, mininterval=0.016, miniters=1, smoothing=0.05, bar_format='{percentage:3.0f}%|{bar}| {desc} : {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]', # noqa: E501 ) for key, val in defaults.items(): if key not in kwargs: kwargs.update({key: val}) if isinstance(iterable, Iterable): self.iterable = iterable if max_value is None: self.max_value = len(iterable) else: self.max_value = max_value else: # ignore max_value then self.max_value = int(iterable) self.iterable = None if max_total_width == 'auto': max_total_width = None # tqdm's auto with tempfile.NamedTemporaryFile('wb', prefix='tmp_mne_prog') as tf: self._mmap_fname = tf.name del tf # should remove the file self._mmap = None disable = logger.level > logging.INFO or which_tqdm == 'off' self._tqdm = tqdm( iterable=self.iterable, desc=mesg, total=self.max_value, initial=initial_value, ncols=max_total_width, disable=disable, **kwargs) def update(self, cur_value): """Update progressbar with current value of process. Parameters ---------- cur_value : number Current value of process. Should be <= max_value (but this is not enforced). The percent of the progressbar will be computed as ``(cur_value / max_value) * 100``. """ self.update_with_increment_value(cur_value - self._tqdm.n) def update_with_increment_value(self, increment_value): """Update progressbar with an increment. Parameters ---------- increment_value : int Value of the increment of process. The percent of the progressbar will be computed as ``(self.cur_value + increment_value / max_value) * 100``. """ self._tqdm.update(increment_value) def __iter__(self): """Iterate to auto-increment the pbar with 1.""" for x in self._tqdm: yield x def subset(self, idx): """Make a joblib-friendly index subset updater. Parameters ---------- idx : ndarray List of indices for this subset. Returns ------- updater : instance of PBSubsetUpdater Class with a ``.update(ii)`` method. """ return _PBSubsetUpdater(self, idx) def __enter__(self): # noqa: D105 # This should only be used with pb.subset and parallelization if op.isfile(self._mmap_fname): os.remove(self._mmap_fname) # prevent corner cases where self.max_value == 0 self._mmap = np.memmap(self._mmap_fname, bool, 'w+', shape=max(self.max_value, 1)) self.update(0) # must be zero as we just created the memmap # We need to control how the pickled bars exit: remove print statements self._thread = _UpdateThread(self) self._thread.start() return self def __exit__(self, type_, value, traceback): # noqa: D105 # Restore exit behavior for our one from the main thread self.update(self._mmap.sum()) self._tqdm.close() self._thread._mne_run = False self._thread.join() self._mmap = None if op.isfile(self._mmap_fname): os.remove(self._mmap_fname) def __del__(self): """Ensure output completes.""" if getattr(self, '_tqdm', None) is not None: self._tqdm.close() class _UpdateThread(Thread): def __init__(self, pb): super(_UpdateThread, self).__init__(daemon=True) self._mne_run = True self._mne_pb = pb def run(self): while self._mne_run: self._mne_pb.update(self._mne_pb._mmap.sum()) time.sleep(1. / 30.) # 30 Hz refresh is plenty class _PBSubsetUpdater(object): def __init__(self, pb, idx): self.mmap = pb._mmap self.idx = idx def update(self, ii): self.mmap[self.idx[ii - 1]] = True
# Author: Yousra Bekhti <yousra.bekhti@gmail.com> # Mark Wronkiewicz <wronk@uw.edu> # # License: BSD (3-clause) import os.path as op import numpy as np from numpy.testing import assert_almost_equal import pytest from mne import read_source_spaces from mne.datasets import testing from mne.simulation import simulate_sparse_stc, source_estimate_quantification from mne.utils import run_tests_if_main data_path = testing.data_path(download=False) src_fname = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif') @testing.requires_testing_data def test_metrics(): """Test simulation metrics.""" src = read_source_spaces(src_fname) times = np.arange(600) / 1000. rng = np.random.RandomState(42) stc1 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng) stc2 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng) E1_rms = source_estimate_quantification(stc1, stc1, metric='rms') E2_rms = source_estimate_quantification(stc2, stc2, metric='rms') E1_cos = source_estimate_quantification(stc1, stc1, metric='cosine') E2_cos = source_estimate_quantification(stc2, stc2, metric='cosine') # ### Tests to add assert (E1_rms == 0.) assert (E2_rms == 0.) assert_almost_equal(E1_cos, 0.) assert_almost_equal(E2_cos, 0.) stc_bad = stc2.copy().crop(0, 0.5) pytest.raises(ValueError, source_estimate_quantification, stc1, stc_bad) stc_bad = stc2.copy() stc_bad.tmin -= 0.1 pytest.raises(ValueError, source_estimate_quantification, stc1, stc_bad) pytest.raises(ValueError, source_estimate_quantification, stc1, stc2, metric='foo') run_tests_if_main()
kambysese/mne-python
mne/simulation/tests/test_metrics.py
mne/utils/progressbar.py
"""Compute Linearly constrained minimum variance (LCMV) beamformer.""" # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Roman Goj <roman.goj@gmail.com> # Britta Westner <britta.wstnr@gmail.com> # # License: BSD (3-clause) import numpy as np from ..rank import compute_rank from ..io.meas_info import _simplify_info from ..io.pick import pick_channels_cov, pick_info from ..forward import _subject_from_forward from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth from ..source_estimate import _make_stc, _get_src_type from ..utils import logger, verbose, _check_channels_spatial_filter from ..utils import _check_one_ch_type, _check_info_inv from ._compute_beamformer import ( _prepare_beamformer_input, _compute_power, _compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data) @verbose def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, pick_ori=None, rank='info', weight_norm='unit-noise-gain-invariant', reduce_rank=False, depth=None, inversion='matrix', verbose=None): """Compute LCMV spatial filter. Parameters ---------- info : instance of Info The measurement info to specify the channels to include. Bad channels in info['bads'] are not used. forward : instance of Forward Forward operator. data_cov : instance of Covariance The data covariance. reg : float The regularization for the whitened data covariance. noise_cov : instance of Covariance The noise covariance. If provided, whitening will be done. Providing a noise covariance is mandatory if you mix sensor types, e.g. gradiometers with magnetometers or EEG with MEG. label : instance of Label Restricts the LCMV solution to a given label. %(bf_pick_ori)s - ``'vector'`` Keeps the currents for each direction separate %(rank_info)s %(weight_norm)s Defaults to ``'unit-noise-gain-invariant'``. %(reduce_rank)s %(depth)s .. versionadded:: 0.18 %(bf_inversion)s .. versionadded:: 0.21 %(verbose)s Returns ------- filters : instance of Beamformer Dictionary containing filter weights from LCMV beamformer. Contains the following keys: 'kind' : str The type of beamformer, in this case 'LCMV'. 'weights' : array The filter weights of the beamformer. 'data_cov' : instance of Covariance The data covariance matrix used to compute the beamformer. 'noise_cov' : instance of Covariance | None The noise covariance matrix used to compute the beamformer. 'whitener' : None | ndarray, shape (n_channels, n_channels) Whitening matrix, provided if whitening was applied to the covariance matrix and leadfield during computation of the beamformer weights. 'weight_norm' : str | None Type of weight normalization used to compute the filter weights. 'pick-ori' : None | 'max-power' | 'normal' | 'vector' The orientation in which the beamformer filters were computed. 'ch_names' : list of str Channels used to compute the beamformer. 'proj' : array Projections used to compute the beamformer. 'is_ssp' : bool If True, projections were applied prior to filter computation. 'vertices' : list Vertices for which the filter weights were computed. 'is_free_ori' : bool If True, the filter was computed with free source orientation. 'n_sources' : int Number of source location for which the filter weight were computed. 'src_type' : str Type of source space. 'source_nn' : ndarray, shape (n_sources, 3) For each source location, the surface normal. 'proj' : ndarray, shape (n_channels, n_channels) Projections used to compute the beamformer. 'subject' : str The subject ID. 'rank' : int The rank of the data covariance matrix used to compute the beamformer weights. 'max-power-ori' : ndarray, shape (n_sources, 3) | None When pick_ori='max-power', this fields contains the estimated direction of maximum power at each source location. 'inversion' : 'single' | 'matrix' Whether the spatial filters were computed for each dipole separately or jointly for all dipoles at each vertex using a matrix inversion. Notes ----- The original reference is :footcite:`VanVeenEtAl1997`. To obtain the Sekihara unit-noise-gain vector beamformer, you should use ``weight_norm='unit-noise-gain', pick_ori='vector'`` followed by :meth:`vec_stc.project('pca', src) <mne.VectorSourceEstimate.project>`. .. versionchanged:: 0.21 The computations were extensively reworked, and the default for ``weight_norm`` was set to ``'unit-noise-gain-invariant'``. References ---------- .. footbibliography:: """ # check number of sensor types present in the data and ensure a noise cov info = _simplify_info(info) noise_cov, _, allow_mismatch = _check_one_ch_type( 'lcmv', info, forward, data_cov, noise_cov) # XXX we need this extra picking step (can't just rely on minimum norm's # because there can be a mismatch. Should probably add an extra arg to # _prepare_beamformer_input at some point (later) picks = _check_info_inv(info, forward, data_cov, noise_cov) info = pick_info(info, picks) data_rank = compute_rank(data_cov, rank=rank, info=info) noise_rank = compute_rank(noise_cov, rank=rank, info=info) for key in data_rank: if (key not in noise_rank or data_rank[key] != noise_rank[key]) and \ not allow_mismatch: raise ValueError('%s data rank (%s) did not match the noise ' 'rank (%s)' % (key, data_rank[key], noise_rank.get(key, None))) del noise_rank rank = data_rank logger.info('Making LCMV beamformer with rank %s' % (rank,)) del data_rank depth = _check_depth(depth, 'depth_sparse') if inversion == 'single': depth['combine_xyz'] = False is_free_ori, info, proj, vertno, G, whitener, nn, orient_std = \ _prepare_beamformer_input( info, forward, label, pick_ori, noise_cov=noise_cov, rank=rank, pca=False, **depth) ch_names = list(info['ch_names']) data_cov = pick_channels_cov(data_cov, include=ch_names) Cm = data_cov._get_square() if 'estimator' in data_cov: del data_cov['estimator'] rank_int = sum(rank.values()) del rank # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int, inversion=inversion, nn=nn, orient_std=orient_std, whitener=whitener) # get src type to store with filters for _make_stc src_type = _get_src_type(forward['src'], vertno) # get subject to store with filters subject_from = _subject_from_forward(forward) # Is the computed beamformer a scalar or vector beamformer? is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False is_ssp = bool(info['projs']) filters = Beamformer( kind='LCMV', weights=W, data_cov=data_cov, noise_cov=noise_cov, whitener=whitener, weight_norm=weight_norm, pick_ori=pick_ori, ch_names=ch_names, proj=proj, is_ssp=is_ssp, vertices=vertno, is_free_ori=is_free_ori, n_sources=forward['nsource'], src_type=src_type, source_nn=forward['source_nn'].copy(), subject=subject_from, rank=rank_int, max_power_ori=max_power_ori, inversion=inversion) return filters def _apply_lcmv(data, filters, info, tmin, max_ori_out): """Apply LCMV spatial filter to data for source reconstruction.""" if max_ori_out != 'signed': raise ValueError('max_ori_out must be "signed", got %s' % (max_ori_out,)) if isinstance(data, np.ndarray) and data.ndim == 2: data = [data] return_single = True else: return_single = False W = filters['weights'] for i, M in enumerate(data): if len(M) != len(filters['ch_names']): raise ValueError('data and picks must have the same length') if not return_single: logger.info("Processing epoch : %d" % (i + 1)) M = _proj_whiten_data(M, info['projs'], filters) # project to source space using beamformer weights vector = False if filters['is_free_ori']: sol = np.dot(W, M) if filters['pick_ori'] == 'vector': vector = True else: logger.info('combining the current components...') sol = combine_xyz(sol) else: # Linear inverse: do computation here or delayed if (M.shape[0] < W.shape[0] and filters['pick_ori'] != 'max-power'): sol = (W, M) else: sol = np.dot(W, M) if filters['pick_ori'] == 'max-power' and max_ori_out == 'abs': sol = np.abs(sol) tstep = 1.0 / info['sfreq'] # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) yield _make_stc(sol, vertices=filters['vertices'], tmin=tmin, tstep=tstep, subject=filters['subject'], vector=vector, source_nn=filters['source_nn'], src_type=filters['src_type'], warn_text=warn_text) logger.info('[done]') @verbose def apply_lcmv(evoked, filters, max_ori_out='signed', verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights on evoked data. Parameters ---------- evoked : Evoked Evoked data to invert. filters : instance of Beamformer LCMV spatial filter (beamformer weights). Filter weights returned from :func:`make_lcmv`. max_ori_out : 'signed' Specify in case of pick_ori='max-power'. %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate | VectorSourceEstimate Source time courses. See Also -------- make_lcmv, apply_lcmv_raw, apply_lcmv_epochs, apply_lcmv_cov Notes ----- .. versionadded:: 0.18 """ _check_reference(evoked) info = evoked.info data = evoked.data tmin = evoked.times[0] sel = _check_channels_spatial_filter(evoked.ch_names, filters) data = data[sel] stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin, max_ori_out=max_ori_out) return next(stc) @verbose def apply_lcmv_epochs(epochs, filters, max_ori_out='signed', return_generator=False, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights on single trial data. Parameters ---------- epochs : Epochs Single trial epochs. filters : instance of Beamformer LCMV spatial filter (beamformer weights) Filter weights returned from :func:`make_lcmv`. max_ori_out : 'signed' Specify in case of pick_ori='max-power'. return_generator : bool Return a generator object instead of a list. This allows iterating over the stcs without having to keep them all in memory. %(verbose)s Returns ------- stc: list | generator of (SourceEstimate | VolSourceEstimate) The source estimates for all epochs. See Also -------- make_lcmv, apply_lcmv_raw, apply_lcmv, apply_lcmv_cov """ _check_reference(epochs) info = epochs.info tmin = epochs.times[0] sel = _check_channels_spatial_filter(epochs.ch_names, filters) data = epochs.get_data()[:, sel, :] stcs = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin, max_ori_out=max_ori_out) if not return_generator: stcs = [s for s in stcs] return stcs @verbose def apply_lcmv_raw(raw, filters, start=None, stop=None, max_ori_out='signed', verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights on raw data. Parameters ---------- raw : mne.io.Raw Raw data to invert. filters : instance of Beamformer LCMV spatial filter (beamformer weights). Filter weights returned from :func:`make_lcmv`. start : int Index of first time sample (index not time is seconds). stop : int Index of first time sample not to include (index not time is seconds). max_ori_out : 'signed' Specify in case of pick_ori='max-power'. %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate Source time courses. See Also -------- make_lcmv, apply_lcmv_epochs, apply_lcmv, apply_lcmv_cov """ _check_reference(raw) info = raw.info sel = _check_channels_spatial_filter(raw.ch_names, filters) data, times = raw[sel, start:stop] tmin = times[0] stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin, max_ori_out=max_ori_out) return next(stc) @verbose def apply_lcmv_cov(data_cov, filters, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights to a data covariance matrix to estimate source power. Parameters ---------- data_cov : instance of Covariance Data covariance matrix. filters : instance of Beamformer LCMV spatial filter (beamformer weights). Filter weights returned from :func:`make_lcmv`. %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate Source power. See Also -------- make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw """ sel = _check_channels_spatial_filter(data_cov.ch_names, filters) sel_names = [data_cov.ch_names[ii] for ii in sel] data_cov = pick_channels_cov(data_cov, sel_names) n_orient = filters['weights'].shape[0] // filters['n_sources'] # Need to project and whiten along both dimensions data = _proj_whiten_data(data_cov['data'].T, data_cov['projs'], filters) data = _proj_whiten_data(data.T, data_cov['projs'], filters) del data_cov source_power = _compute_power(data, filters['weights'], n_orient) # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) return(_make_stc(source_power, vertices=filters['vertices'], src_type=filters['src_type'], tmin=0., tstep=1., subject=filters['subject'], source_nn=filters['source_nn'], warn_text=warn_text))
# Author: Yousra Bekhti <yousra.bekhti@gmail.com> # Mark Wronkiewicz <wronk@uw.edu> # # License: BSD (3-clause) import os.path as op import numpy as np from numpy.testing import assert_almost_equal import pytest from mne import read_source_spaces from mne.datasets import testing from mne.simulation import simulate_sparse_stc, source_estimate_quantification from mne.utils import run_tests_if_main data_path = testing.data_path(download=False) src_fname = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif') @testing.requires_testing_data def test_metrics(): """Test simulation metrics.""" src = read_source_spaces(src_fname) times = np.arange(600) / 1000. rng = np.random.RandomState(42) stc1 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng) stc2 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng) E1_rms = source_estimate_quantification(stc1, stc1, metric='rms') E2_rms = source_estimate_quantification(stc2, stc2, metric='rms') E1_cos = source_estimate_quantification(stc1, stc1, metric='cosine') E2_cos = source_estimate_quantification(stc2, stc2, metric='cosine') # ### Tests to add assert (E1_rms == 0.) assert (E2_rms == 0.) assert_almost_equal(E1_cos, 0.) assert_almost_equal(E2_cos, 0.) stc_bad = stc2.copy().crop(0, 0.5) pytest.raises(ValueError, source_estimate_quantification, stc1, stc_bad) stc_bad = stc2.copy() stc_bad.tmin -= 0.1 pytest.raises(ValueError, source_estimate_quantification, stc1, stc_bad) pytest.raises(ValueError, source_estimate_quantification, stc1, stc2, metric='foo') run_tests_if_main()
kambysese/mne-python
mne/simulation/tests/test_metrics.py
mne/beamformer/_lcmv.py
import pytest # Use pytest verbose asserts # https://stackoverflow.com/questions/41522767/pytest-assert-introspection-in-helper-function pytest.register_assert_rewrite('utils.urls') # Untrusted attachments and samples domains that are indexed INDEXED_ATTACHMENT_DOMAINS = set(( 'mdn.mozillademos.org', # Main attachments domain 'demos-origin.mdn.mozit.cloud', # Attachments origin )) # Kuma web domains that are indexed INDEXED_WEB_DOMAINS = set(( 'developer.mozilla.org', # Main website, CDN origin 'cdn.mdn.mozilla.net', # Assets CDN ))
import re from urlparse import urlsplit import pytest import requests from pyquery import PyQuery from . import INDEXED_WEB_DOMAINS META_ROBOTS_RE = re.compile(r'''(?x) # Verbose regex mode <meta\s+ # meta tag followed by whitespace name="robots"\s* # name=robots content="(?P<content>[^"]+)" # capture the content \s*> # end meta tag ''') @pytest.fixture() def is_indexed(base_url): hostname = urlsplit(base_url).netloc return hostname in INDEXED_WEB_DOMAINS @pytest.mark.headless @pytest.mark.nondestructive def test_document_json(base_url): url = base_url + '/en-US/docs/Web$json' resp = requests.get(url) assert resp.status_code == 200 assert resp.headers['Content-Type'] == 'application/json' assert resp.headers['Access-Control-Allow-Origin'] == '*' @pytest.mark.headless @pytest.mark.nondestructive def test_document(base_url, is_indexed): url = base_url + '/en-US/docs/Web' resp = requests.get(url) assert resp.status_code == 200 assert resp.headers['Content-Type'] == 'text/html; charset=utf-8' meta = META_ROBOTS_RE.search(resp.content) assert meta content = meta.group('content') if is_indexed: assert content == 'index, follow' else: assert content == 'noindex, nofollow' @pytest.mark.headless @pytest.mark.nondestructive def test_document_based_redirection(base_url): """Ensure that content-based redirects properly redirect.""" url = base_url + '/en-US/docs/MDN/Promote' resp = requests.get(url) assert resp.status_code == 200 assert len(resp.history) == 1 assert resp.history[0].status_code == 301 assert resp.url == base_url + '/en-US/docs/MDN/About/Promote' @pytest.mark.headless @pytest.mark.nondestructive def test_document_based_redirection_suppression(base_url): """ Ensure that the redirect directive and not the content of the target page is displayed when content-based redirects are suppressed. """ url = base_url + '/en-US/docs/MDN/Promote?redirect=no' resp = requests.get(url) assert resp.status_code == 200 assert not resp.history body = PyQuery(resp.text)('#wikiArticle') assert body.text().startswith('REDIRECT ') assert body.find('a[href="/en-US/docs/MDN/About/Promote"]') @pytest.mark.smoke @pytest.mark.headless @pytest.mark.nondestructive def test_home(base_url, is_indexed): url = base_url + '/en-US/' resp = requests.get(url) assert resp.status_code == 200 assert resp.headers['Content-Type'] == 'text/html; charset=utf-8' meta = META_ROBOTS_RE.search(resp.content) assert meta content = meta.group('content') if is_indexed: assert content == 'index, follow' else: assert content == 'noindex, nofollow'
jwhitlock/kuma
tests/headless/test_endpoints.py
tests/headless/__init__.py
import time from inspect import isclass import os from cached_property import cached_property from jsmin import jsmin from navmazing import Navigate, NavigateStep from selenium.common.exceptions import NoSuchElementException from widgetastic.browser import Browser, DefaultPlugin from cfme import exceptions from cfme.utils.browser import manager from cfme.utils.log import logger, create_sublogger from cfme.utils.wait import wait_for from fixtures.pytest_store import store from . import Implementation class MiqSSUIBrowser(Browser): def __init__(self, selenium, endpoint, extra_objects=None): extra_objects = extra_objects or {} extra_objects.update({ 'appliance': endpoint.owner, 'endpoint': endpoint, 'store': store, }) super(MiqSSUIBrowser, self).__init__( selenium, plugin_class=MiqSSUIBrowserPlugin, logger=create_sublogger('MiqSSUIBrowser'), extra_objects=extra_objects) self.window_handle = selenium.current_window_handle # TODO: Use the same base class for both UI & SSUI since they are 99% the same self.logger.info( 'Opened browser %s %s', selenium.capabilities.get('browserName', 'unknown'), selenium.capabilities.get('version', 'unknown')) @property def appliance(self): return self.extra_objects['appliance'] def create_view(self, *args, **kwargs): return self.appliance.ssui.create_view(*args, **kwargs) @property def product_version(self): return self.appliance.version class MiqSSUIBrowserPlugin(DefaultPlugin): ENSURE_PAGE_SAFE = jsmin(''' function checkProgressBar() { try { return $('#ngProgress').attr('style').indexOf('width: 0%') > -1; } catch(err) { // Not ready yet return false; } } function checkJquery() { if(typeof $ == 'undefined') { return true; } else { return !($.active > 0); } } return checkProgressBar() && checkJquery();''') def ensure_page_safe(self, timeout='20s'): # THIS ONE SHOULD ALWAYS USE JAVASCRIPT ONLY, NO OTHER SELENIUM INTERACTION def _check(): result = self.browser.execute_script(self.ENSURE_PAGE_SAFE, silent=True) # TODO: Logging return bool(result) wait_for(_check, timeout=timeout, delay=2, silent_failure=True, very_quiet=True) def after_keyboard_input(self, element, keyboard_input): self.browser.plugin.ensure_page_safe() class SSUINavigateStep(NavigateStep): VIEW = None @cached_property def view(self): if self.VIEW is None: raise AttributeError('{} does not have VIEW specified'.format(type(self).__name__)) return self.create_view(self.VIEW, additional_context={'object': self.obj}) @property def appliance(self): return self.obj.appliance def create_view(self, *args, **kwargs): return self.appliance.ssui.create_view(*args, **kwargs) def am_i_here(self): try: return self.view.is_displayed except (AttributeError, NoSuchElementException): return False def pre_navigate(self, *args, **kwargs): self.appliance.browser.open_browser(url_key=self.obj.appliance.server.address()) def do_nav(self, _tries=0, *args, **kwargs): """Describes how the navigation should take place.""" try: self.step(*args, **kwargs) except Exception as e: logger.error(e) raise self.go(_tries, *args, **kwargs) def log_message(self, msg, level="debug"): class_name = self.obj.__name__ if isclass(self.obj) else self.obj.__class__.__name__ str_msg = "[SUI-NAV/{}/{}]: {}".format(class_name, self._name, msg) getattr(logger, level)(str_msg) def construct_message(self, here, resetter, view, duration, waited): str_here = "Already Here" if here else "Needed Navigation" str_resetter = "Resetter Used" if resetter else "No Resetter" str_view = "View Returned" if view else "No View Available" str_waited = "Waited on View" if waited else "No Wait on View" return "{}/{}/{}/{} (elapsed {}ms)".format( str_here, str_resetter, str_view, str_waited, duration ) def go(self, _tries=0, *args, **kwargs): nav_args = {'use_resetter': True, 'wait_for_view': False} self.log_message("Beginning SUI Navigation...", level="info") start_time = time.time() if _tries > 2: # Need at least three tries: # 1: login_admin handles an alert or CannotContinueWithNavigation appears. # 2: Everything should work. If not, NavigationError. raise exceptions.NavigationError(self._name) _tries += 1 for arg in nav_args: if arg in kwargs: nav_args[arg] = kwargs.pop(arg) self.pre_navigate(_tries, *args, **kwargs) here = False resetter_used = False waited = False try: here = self.am_i_here() except Exception as e: self.log_message( "Exception raised [{}] whilst checking if already here".format(e), level="error") if not here: self.log_message("Prerequisite Needed") self.prerequisite_view = self.prerequisite() self.do_nav(_tries, *args, **kwargs) if nav_args['use_resetter']: resetter_used = True self.resetter() self.post_navigate(_tries) view = self.view if self.VIEW is not None else None duration = int((time.time() - start_time) * 1000) if view and nav_args['wait_for_view'] and not os.environ.get( 'DISABLE_NAVIGATE_ASSERT', False): waited = True wait_for( lambda: view.is_displayed, num_sec=10, message="Waiting for view [{}] to display".format(view.__class__.__name__) ) self.log_message( self.construct_message(here, resetter_used, view, duration, waited), level="info" ) return view navigator = Navigate() navigate_to = navigator.navigate class ViaSSUI(Implementation): name = "SSUI" def __str__(self): return 'SSUI' @cached_property def widgetastic(self): """This gives us a widgetastic browser.""" # TODO: Make this a property that could watch for browser change? browser = self.open_browser(url_key=self.appliance.server.address()) wt = MiqSSUIBrowser(browser, self) manager.add_cleanup(self._reset_cache) return wt
# -*- coding: utf-8 -*- import fauxfactory import pytest from cfme.common.provider import cleanup_vm from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.pxe import get_template_from_config, ISODatastore from cfme.provisioning import do_vm_provisioning from cfme.utils import testgen from cfme.utils.blockers import GH from cfme.utils.conf import cfme_data pytestmark = [ pytest.mark.meta(server_roles="+automate"), pytest.mark.usefixtures('uses_infra_providers'), pytest.mark.tier(2) ] def pytest_generate_tests(metafunc): # Filter out providers without provisioning data or hosts defined argnames, argvalues, idlist = testgen.providers_by_class( metafunc, [InfraProvider], required_fields=[ ('iso_datastore', True), ['provisioning', 'host'], ['provisioning', 'datastore'], ['provisioning', 'iso_template'], ['provisioning', 'iso_file'], ['provisioning', 'iso_kickstart'], ['provisioning', 'iso_root_password'], ['provisioning', 'iso_image_type'], ['provisioning', 'vlan'], ]) new_idlist = [] new_argvalues = [] for i, argvalue_tuple in enumerate(argvalues): args = dict(zip(argnames, argvalue_tuple)) if args['provider'].type == "scvmm": continue iso_cust_template = args['provider'].data['provisioning']['iso_kickstart'] if iso_cust_template not in cfme_data.get('customization_templates', {}).keys(): continue new_idlist.append(idlist[i]) new_argvalues.append(argvalues[i]) testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module") @pytest.fixture(scope="module") def iso_cust_template(provider, appliance): iso_cust_template = provider.data['provisioning']['iso_kickstart'] return get_template_from_config(iso_cust_template, appliance=appliance) @pytest.fixture(scope="module") def iso_datastore(provider, appliance): return ISODatastore(provider.name, appliance=appliance) @pytest.fixture def datastore_init(iso_cust_template, iso_datastore, provisioning): if not iso_datastore.exists(): iso_datastore.create() # Fails on upstream, BZ1109256 iso_datastore.set_iso_image_type(provisioning['iso_file'], provisioning['iso_image_type']) if not iso_cust_template.exists(): iso_cust_template.create() @pytest.fixture(scope="function") def vm_name(): vm_name = 'test_iso_prov_{}'.format(fauxfactory.gen_alphanumeric(8)) return vm_name @pytest.mark.rhv1 @pytest.mark.tier(2) @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:6692', unblock=lambda provider: not provider.one_of(RHEVMProvider))]) def test_iso_provision_from_template(appliance, provider, vm_name, smtp_test, datastore_init, request, setup_provider): """Tests ISO provisioning Metadata: test_flag: iso, provision suite: infra_provisioning """ # generate_tests makes sure these have values iso_template, host, datastore, iso_file, iso_kickstart,\ iso_root_password, iso_image_type, vlan = map(provider.data['provisioning'].get, ('pxe_template', 'host', 'datastore', 'iso_file', 'iso_kickstart', 'iso_root_password', 'iso_image_type', 'vlan')) request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) provisioning_data = { 'catalog': { 'vm_name': vm_name, 'provision_type': 'ISO', 'iso_file': {'name': iso_file}}, 'environment': { 'host_name': {'name': host}, 'datastore_name': {'name': datastore}}, 'customize': { 'custom_template': {'name': iso_kickstart}, 'root_password': iso_root_password}, 'network': { 'vlan': vlan}} do_vm_provisioning(appliance, iso_template, provider, vm_name, provisioning_data, request, smtp_test, num_sec=1500)
akarol/cfme_tests
cfme/tests/infrastructure/test_iso_provisioning.py
cfme/utils/appliance/implementations/ssui.py
# -*- coding: utf-8 -*- import re from bugzilla import Bugzilla as _Bugzilla from collections import Sequence from cached_property import cached_property from cfme.utils.conf import cfme_data, credentials from cfme.utils.log import logger from cfme.utils.version import ( LATEST, Version, current_version, appliance_build_datetime, appliance_is_downstream) NONE_FIELDS = {"---", "undefined", "unspecified"} class Product(object): def __init__(self, data): self._data = data @property def default_release(self): return Version(self._data["default_release"]) @property def name(self): return self._data["name"] @property def milestones(self): return map(lambda ms: ms["name"], self._data["milestones"]) @property def releases(self): return map(lambda release: release["name"], self._data["releases"]) @property def versions(self): versions = [] for version in self._data["versions"]: if version["name"] not in NONE_FIELDS: versions.append(Version(version["name"])) return sorted(versions) @property def latest_version(self): return self.versions[-1] class Bugzilla(object): def __init__(self, **kwargs): self.__product = kwargs.pop("product", None) self.__kwargs = kwargs self.__bug_cache = {} self.__product_cache = {} @property def bug_count(self): return len(self.__bug_cache.keys()) @property def bugs(self): for bug in self.__bug_cache.itervalues(): yield bug def products(self, *names): return map(Product, self.bugzilla._proxy.Product.get({"names": names})["products"]) def product(self, product): if product not in self.__product_cache: self.__product_cache[product] = self.products(product)[0] return self.__product_cache[product] @property def default_product(self): if self.__product is None: return None return self.product(self.__product) @classmethod def from_config(cls): url = cfme_data.get("bugzilla", {}).get("url") product = cfme_data.get("bugzilla", {}).get("product") if url is None: raise Exception("No Bugzilla URL specified!") cr_root = cfme_data.get("bugzilla", {}).get("credentials") username = credentials.get(cr_root, {}).get("username") password = credentials.get(cr_root, {}).get("password") return cls( url=url, user=username, password=password, cookiefile=None, tokenfile=None, product=product) @cached_property def bugzilla(self): return _Bugzilla(**self.__kwargs) @cached_property def loose(self): return cfme_data.get("bugzilla", {}).get("loose", []) @cached_property def open_states(self): return cfme_data.get("bugzilla", {}).get("skip", set([])) @cached_property def upstream_version(self): if self.default_product is not None: return self.default_product.latest_version else: return Version(cfme_data.get("bugzilla", {}).get("upstream_version", "9.9")) def get_bug(self, id): id = int(id) if id not in self.__bug_cache: self.__bug_cache[id] = BugWrapper(self, self.bugzilla.getbug(id)) return self.__bug_cache[id] def get_bug_variants(self, id): if isinstance(id, BugWrapper): bug = id else: bug = self.get_bug(id) expanded = set([]) found = set([]) stack = set([bug]) while stack: b = stack.pop() if b.status == "CLOSED" and b.resolution == "DUPLICATE": b = self.get_bug(b.dupe_of) found.add(b) if b.copy_of: stack.add(self.get_bug(b.copy_of)) if b not in expanded: for cp in map(self.get_bug, b.copies): found.add(cp) stack.add(cp) expanded.add(b) return found def resolve_blocker(self, blocker, version=None, ignore_bugs=None, force_block_streams=None): # ignore_bugs is mutable but is not mutated here! Same force_block_streams force_block_streams = force_block_streams or [] ignore_bugs = set([]) if not ignore_bugs else ignore_bugs if isinstance(id, BugWrapper): bug = blocker else: bug = self.get_bug(blocker) if version is None: version = current_version() if version == LATEST: version = bug.product.latest_version is_upstream = version == bug.product.latest_version variants = self.get_bug_variants(bug) filtered = set([]) version_series = ".".join(str(version).split(".")[:2]) for variant in sorted(variants, key=lambda variant: variant.id): if variant.id in ignore_bugs: continue if variant.version is not None and variant.version > version: continue if variant.release_flag is not None and version.is_in_series(variant.release_flag): logger.info('Found matching bug for %d by release - #%d', bug.id, variant.id) filtered.clear() filtered.add(variant) break elif is_upstream and variant.release_flag == 'future': # It is an upstream bug logger.info('Found a matching upstream bug #%d for bug #%d', variant.id, bug.id) return variant elif ((variant.version is not None and variant.target_release is not None) and ( variant.version.is_in_series(version_series) or variant.target_release.is_in_series(version_series))): filtered.add(variant) else: logger.warning( "ATTENTION!!: No release flags, wrong versions, ignoring %s", variant.id) if not filtered: # No appropriate bug was found for forced_stream in force_block_streams: # Find out if we force this bug. if version.is_in_series(forced_stream): return bug else: # No bug, yipee :) return None # First, use versions for bug in filtered: if ((bug.version is not None and bug.target_release is not None) and check_fixed_in(bug.fixed_in, version_series) and ( bug.version.is_in_series(version_series) or bug.target_release.is_in_series(version_series))): return bug # Otherwise prefer release_flag for bug in filtered: if bug.release_flag and version.is_in_series(bug.release_flag): return bug return None def check_fixed_in(fixed_in, version_series): # used to check if the bug belongs to that series if fixed_in is None: return True if not isinstance(fixed_in, Version): fixed_in = Version(fixed_in) return fixed_in.is_in_series(version_series) class BugWrapper(object): _copy_matchers = map(re.compile, [ r'^[+]{3}\s*This bug is a CFME zstream clone. The original bug is:\s*[+]{3}\n[+]{3}\s*' 'https://bugzilla.redhat.com/show_bug.cgi\?id=(\d+)\.\s*[+]{3}', r"^\+\+\+ This bug was initially created as a clone of Bug #([0-9]+) \+\+\+" ]) def __init__(self, bugzilla, bug): self._bug = bug self._bugzilla = bugzilla @property def loose(self): return self._bugzilla.loose @property def bugzilla(self): return self._bugzilla def __getattr__(self, attr): """This proxies the attribute queries to the Bug object and modifies its result. If the field looked up is specified as loose field, it will be converted to Version. If the field is string and it has zero length, or the value is specified as "not specified", it will return None. """ value = getattr(self._bug, attr) if attr in self.loose: if isinstance(value, Sequence) and not isinstance(value, basestring): value = value[0] value = value.strip() if not value: return None if value.lower() in NONE_FIELDS: return None # We have to strip any leading non-number characters to correctly match value = re.sub(r"^[^0-9]+", "", value) if not value: return None return Version(value) if isinstance(value, basestring): if len(value.strip()) == 0: return None else: return value else: return value @property def qa_whiteboard(self): """Returns a set of QA Whiteboard markers. It relies on the fact, that our QA Whiteboard uses format foo:bar:baz. Should be able to handle cases like 'foo::bar', or 'abc:'. """ return {x.strip() for x in self._bug.qa_whiteboard.strip().split(":") if x.strip()} @property def copy_of(self): """Returns either id of the bug this is copy of, or None, if it is not a copy.""" try: first_comment = self._bug.comments[0]["text"].lstrip() except IndexError: return None for copy_matcher in self._copy_matchers: copy_match = copy_matcher.match(first_comment) if copy_match is not None: return int(copy_match.groups()[0]) else: return None @property def copies(self): """Returns list of copies of this bug.""" result = [] for bug_id in self._bug.blocks: bug = self._bugzilla.get_bug(bug_id) if bug.copy_of == self._bug.id: result.append(bug_id) return map(int, result) @property def _release_flag_data(self): for flag in self.flags: if flag["name"].startswith("cfme-"): release_flag = flag["name"].split("-", 1)[-1] if release_flag.endswith(".z"): return release_flag.rsplit(".", 1)[0], True else: return release_flag, False else: return None, False @property def release_flag(self): return self._release_flag_data[0] @property def zstream(self): return self._release_flag_data[1] @property def is_opened(self): states = self._bugzilla.open_states if not self.upstream_bug and appliance_is_downstream(): states = self._bugzilla.open_states + ["POST", "MODIFIED"] return self.status in states @property def product(self): return self._bugzilla.product(self._bug.product) @property def upstream_bug(self): if self.version is None: return True return self.version >= self.product.latest_version @property def can_test_on_upstream(self): change_states = {"POST", "MODIFIED"} # With these states, the change is in upstream if self.status not in {"POST", "MODIFIED", "ON_QA", "VERIFIED", "RELEASE_PENDING"}: return False history = self.get_history_raw()["bugs"][0]["history"] changes = [] # We look for status changes in the history for event in history: for change in event["changes"]: if change["field_name"].lower() != "status": continue if change["added"] in change_states: changes.append(event["when"]) return event["when"] < appliance_build_datetime() else: return False def __repr__(self): return repr(self._bug) def __str__(self): return str(self._bug)
# -*- coding: utf-8 -*- import fauxfactory import pytest from cfme.common.provider import cleanup_vm from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.pxe import get_template_from_config, ISODatastore from cfme.provisioning import do_vm_provisioning from cfme.utils import testgen from cfme.utils.blockers import GH from cfme.utils.conf import cfme_data pytestmark = [ pytest.mark.meta(server_roles="+automate"), pytest.mark.usefixtures('uses_infra_providers'), pytest.mark.tier(2) ] def pytest_generate_tests(metafunc): # Filter out providers without provisioning data or hosts defined argnames, argvalues, idlist = testgen.providers_by_class( metafunc, [InfraProvider], required_fields=[ ('iso_datastore', True), ['provisioning', 'host'], ['provisioning', 'datastore'], ['provisioning', 'iso_template'], ['provisioning', 'iso_file'], ['provisioning', 'iso_kickstart'], ['provisioning', 'iso_root_password'], ['provisioning', 'iso_image_type'], ['provisioning', 'vlan'], ]) new_idlist = [] new_argvalues = [] for i, argvalue_tuple in enumerate(argvalues): args = dict(zip(argnames, argvalue_tuple)) if args['provider'].type == "scvmm": continue iso_cust_template = args['provider'].data['provisioning']['iso_kickstart'] if iso_cust_template not in cfme_data.get('customization_templates', {}).keys(): continue new_idlist.append(idlist[i]) new_argvalues.append(argvalues[i]) testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module") @pytest.fixture(scope="module") def iso_cust_template(provider, appliance): iso_cust_template = provider.data['provisioning']['iso_kickstart'] return get_template_from_config(iso_cust_template, appliance=appliance) @pytest.fixture(scope="module") def iso_datastore(provider, appliance): return ISODatastore(provider.name, appliance=appliance) @pytest.fixture def datastore_init(iso_cust_template, iso_datastore, provisioning): if not iso_datastore.exists(): iso_datastore.create() # Fails on upstream, BZ1109256 iso_datastore.set_iso_image_type(provisioning['iso_file'], provisioning['iso_image_type']) if not iso_cust_template.exists(): iso_cust_template.create() @pytest.fixture(scope="function") def vm_name(): vm_name = 'test_iso_prov_{}'.format(fauxfactory.gen_alphanumeric(8)) return vm_name @pytest.mark.rhv1 @pytest.mark.tier(2) @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:6692', unblock=lambda provider: not provider.one_of(RHEVMProvider))]) def test_iso_provision_from_template(appliance, provider, vm_name, smtp_test, datastore_init, request, setup_provider): """Tests ISO provisioning Metadata: test_flag: iso, provision suite: infra_provisioning """ # generate_tests makes sure these have values iso_template, host, datastore, iso_file, iso_kickstart,\ iso_root_password, iso_image_type, vlan = map(provider.data['provisioning'].get, ('pxe_template', 'host', 'datastore', 'iso_file', 'iso_kickstart', 'iso_root_password', 'iso_image_type', 'vlan')) request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) provisioning_data = { 'catalog': { 'vm_name': vm_name, 'provision_type': 'ISO', 'iso_file': {'name': iso_file}}, 'environment': { 'host_name': {'name': host}, 'datastore_name': {'name': datastore}}, 'customize': { 'custom_template': {'name': iso_kickstart}, 'root_password': iso_root_password}, 'network': { 'vlan': vlan}} do_vm_provisioning(appliance, iso_template, provider, vm_name, provisioning_data, request, smtp_test, num_sec=1500)
akarol/cfme_tests
cfme/tests/infrastructure/test_iso_provisioning.py
cfme/utils/bz.py
"""Small modules to cope with python 2 vs 3 incompatibilities inside numpy.distutils """ from __future__ import division, absolute_import, print_function import sys def get_exception(): return sys.exc_info()[1]
from __future__ import division, absolute_import, print_function import sys import platform import pytest import numpy as np # import the c-extension module directly since _arg is not exported via umath import numpy.core._multiarray_umath as ncu from numpy.testing import ( assert_raises, assert_equal, assert_array_equal, assert_almost_equal ) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. #FIXME: this will probably change when we require full C99 campatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) # TODO This can be xfail when the generator functions are got rid of. platform_skip = pytest.mark.skipif(xfail_complex_tests, reason="Inadequate C99 complex support") class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp check(f, 1, 0, np.exp(1), 0, False) check(f, 0, 1, np.cos(1), np.sin(1), False) ref = np.exp(1) * complex(np.cos(1), np.sin(1)) check(f, 1, 1, ref.real, ref.imag, False) @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i check(f, np.PZERO, 0, 1, 0, False) check(f, np.NZERO, 0, 1, 0, False) # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception check(f, 1, np.inf, np.nan, np.nan) check(f, -1, np.inf, np.nan, np.nan) check(f, 0, np.inf, np.nan, np.nan) # cexp(inf + 0i) is inf + 0i check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y check(f, -np.inf, 1, np.PZERO, np.PZERO) check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_inf(None) # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_inf(None) # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_nan(None) # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, -1, np.nan, np.nan) check(f, np.nan, np.inf, np.nan, np.nan) check(f, np.nan, -np.inf, np.nan, np.nan) # cexp(nan + nani) is nan + nani check(f, np.nan, np.nan, np.nan, np.nan) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i check = check_complex_value f = np.exp check(f, np.nan, 0, np.nan, 0) class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex(-np.inf, np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex(-np.inf, 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=complex) y = complex(np.nan, np.nan) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=complex) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) class TestCsqrt(object): def test_simple(self): # sqrt(1) check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) rres = 0.5*np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) # sqrt(-1) check_complex_value(np.sqrt, -1, 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): # C99: Sec G 6.4.2 check = check_complex_value f = np.sqrt # csqrt(+-0 + 0i) is 0 + 0i check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) # csqrt(x + infi) is inf + infi for any x (including NaN) check(f, 1, np.inf, np.inf, np.inf) check(f, -1, np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, -np.inf, np.inf, np.inf, np.inf) check(f, -np.nan, np.inf, np.inf, np.inf) # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) check(f, -1, np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) # csqrt(-inf + yi) is +0 + infi for any finite y > 0 check(f, -np.inf, 1, np.PZERO, np.inf) # csqrt(inf + yi) is +inf + 0i for any finite y > 0 check(f, np.inf, 1, np.inf, np.PZERO) # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # csqrt(+inf + nani) is inf + nani check(f, np.inf, np.nan, np.inf, np.nan) # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan) # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) check_real_value(np.abs, np.nan, np.nan, np.nan) x.append(np.nan) y.append(-np.nan) check_real_value(np.abs, -np.nan, np.nan, np.nan) # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) check_real_value(np.abs, np.inf, np.nan, np.inf) x.append(-np.inf) y.append(np.nan) check_real_value(np.abs, -np.inf, np.nan, np.inf) # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(complex(a, b)) xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) check_real_value(f, x[i], y[i], ref) class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip( reason="Complex arithmetic with signed zero fails on most platforms") def test_zero(self): # carg(-0 +- 0i) returns +- pi check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) # carg(+0 +- 0i) returns +- 0 check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) # carg(x +- 0i) returns +- 0 for x > 0 check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) # carg(x +- 0i) returns +- pi for x < 0 check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) # carg(+- 0 + yi) returns pi/2 for y > 0 check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) # carg(+- 0 + yi) returns -pi/2 for y < 0 check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 check_real_value(ncu._arg, -np.inf, 1, np.pi, False) check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) # carg(np.inf +- yi) returns +-0 for finite y > 0 check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) # carg(x +- np.infi) returns +-pi/2 for finite x check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) # carg(-np.inf +- np.infi) returns +-3pi/4 check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) # carg(np.inf +- np.infi) returns +-pi/4 check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) # carg(x + yi) returns np.nan if x or y is nan check_real_value(ncu._arg, np.nan, 0, np.nan, False) check_real_value(ncu._arg, 0, np.nan, np.nan, False) check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2)
pizzathief/numpy
numpy/core/tests/test_umath_complex.py
numpy/distutils/compat.py
""" Back compatibility nosetester module. It will import the appropriate set of tools """ from __future__ import division, absolute_import, print_function import warnings # 2018-04-04, numpy 1.15.0 warnings.warn("Importing from numpy.testing.nosetester is deprecated " "since 1.15.0, import from numpy.testing instead.", DeprecationWarning, stacklevel=2) from ._private.nosetester import * __all__ = ['get_package_name', 'run_module_suite', 'NoseTester', '_numpy_tester', 'get_package_name', 'import_nose', 'suppress_warnings']
from __future__ import division, absolute_import, print_function import sys import platform import pytest import numpy as np # import the c-extension module directly since _arg is not exported via umath import numpy.core._multiarray_umath as ncu from numpy.testing import ( assert_raises, assert_equal, assert_array_equal, assert_almost_equal ) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. #FIXME: this will probably change when we require full C99 campatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) # TODO This can be xfail when the generator functions are got rid of. platform_skip = pytest.mark.skipif(xfail_complex_tests, reason="Inadequate C99 complex support") class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp check(f, 1, 0, np.exp(1), 0, False) check(f, 0, 1, np.cos(1), np.sin(1), False) ref = np.exp(1) * complex(np.cos(1), np.sin(1)) check(f, 1, 1, ref.real, ref.imag, False) @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i check(f, np.PZERO, 0, 1, 0, False) check(f, np.NZERO, 0, 1, 0, False) # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception check(f, 1, np.inf, np.nan, np.nan) check(f, -1, np.inf, np.nan, np.nan) check(f, 0, np.inf, np.nan, np.nan) # cexp(inf + 0i) is inf + 0i check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y check(f, -np.inf, 1, np.PZERO, np.PZERO) check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_inf(None) # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_inf(None) # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_nan(None) # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, -1, np.nan, np.nan) check(f, np.nan, np.inf, np.nan, np.nan) check(f, np.nan, -np.inf, np.nan, np.nan) # cexp(nan + nani) is nan + nani check(f, np.nan, np.nan, np.nan, np.nan) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i check = check_complex_value f = np.exp check(f, np.nan, 0, np.nan, 0) class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex(-np.inf, np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex(-np.inf, 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=complex) y = complex(np.nan, np.nan) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=complex) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) class TestCsqrt(object): def test_simple(self): # sqrt(1) check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) rres = 0.5*np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) # sqrt(-1) check_complex_value(np.sqrt, -1, 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): # C99: Sec G 6.4.2 check = check_complex_value f = np.sqrt # csqrt(+-0 + 0i) is 0 + 0i check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) # csqrt(x + infi) is inf + infi for any x (including NaN) check(f, 1, np.inf, np.inf, np.inf) check(f, -1, np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, -np.inf, np.inf, np.inf, np.inf) check(f, -np.nan, np.inf, np.inf, np.inf) # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) check(f, -1, np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) # csqrt(-inf + yi) is +0 + infi for any finite y > 0 check(f, -np.inf, 1, np.PZERO, np.inf) # csqrt(inf + yi) is +inf + 0i for any finite y > 0 check(f, np.inf, 1, np.inf, np.PZERO) # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # csqrt(+inf + nani) is inf + nani check(f, np.inf, np.nan, np.inf, np.nan) # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan) # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) check_real_value(np.abs, np.nan, np.nan, np.nan) x.append(np.nan) y.append(-np.nan) check_real_value(np.abs, -np.nan, np.nan, np.nan) # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) check_real_value(np.abs, np.inf, np.nan, np.inf) x.append(-np.inf) y.append(np.nan) check_real_value(np.abs, -np.inf, np.nan, np.inf) # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(complex(a, b)) xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) check_real_value(f, x[i], y[i], ref) class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip( reason="Complex arithmetic with signed zero fails on most platforms") def test_zero(self): # carg(-0 +- 0i) returns +- pi check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) # carg(+0 +- 0i) returns +- 0 check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) # carg(x +- 0i) returns +- 0 for x > 0 check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) # carg(x +- 0i) returns +- pi for x < 0 check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) # carg(+- 0 + yi) returns pi/2 for y > 0 check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) # carg(+- 0 + yi) returns -pi/2 for y < 0 check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 check_real_value(ncu._arg, -np.inf, 1, np.pi, False) check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) # carg(np.inf +- yi) returns +-0 for finite y > 0 check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) # carg(x +- np.infi) returns +-pi/2 for finite x check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) # carg(-np.inf +- np.infi) returns +-3pi/4 check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) # carg(np.inf +- np.infi) returns +-pi/4 check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) # carg(x + yi) returns np.nan if x or y is nan check_real_value(ncu._arg, np.nan, 0, np.nan, False) check_real_value(ncu._arg, 0, np.nan, np.nan, False) check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2)
pizzathief/numpy
numpy/core/tests/test_umath_complex.py
numpy/testing/nosetester.py
""" This is only meant to add docs to objects defined in C-extension modules. The purpose is to allow easier editing of the docstrings without requiring a re-compile. NOTE: Many of the methods of ndarray have corresponding functions. If you update these docstrings, please keep also the ones in core/fromnumeric.py, core/defmatrix.py up-to-date. """ from __future__ import division, absolute_import, print_function import sys from numpy.core import numerictypes as _numerictypes from numpy.core import dtype from numpy.core.function_base import add_newdoc ############################################################################### # # flatiter # # flatiter needs a toplevel description # ############################################################################### add_newdoc('numpy.core', 'flatiter', """ Flat iterator object to iterate over arrays. A `flatiter` iterator is returned by ``x.flat`` for any array `x`. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in row-major, C-style order (the last index varying the fastest). The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- ndarray.flat : Return a flat iterator over an array. ndarray.flatten : Returns a flattened copy of an array. Notes ----- A `flatiter` iterator can not be constructed directly from Python code by calling the `flatiter` constructor. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) <class 'numpy.flatiter'> >>> for item in fl: ... print(item) ... 0 1 2 3 4 5 >>> fl[2:4] array([2, 3]) """) # flatiter attributes add_newdoc('numpy.core', 'flatiter', ('base', """ A reference to the array that is iterated over. Examples -------- >>> x = np.arange(5) >>> fl = x.flat >>> fl.base is x True """)) add_newdoc('numpy.core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.coords (0, 0) >>> next(fl) 0 >>> fl.coords (0, 1) """)) add_newdoc('numpy.core', 'flatiter', ('index', """ Current flat index into the array. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.index 0 >>> next(fl) 0 >>> fl.index 1 """)) # flatiter functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator """)) add_newdoc('numpy.core', 'flatiter', ('copy', """ copy() Get a copy of the iterator as a 1-D array. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> fl = x.flat >>> fl.copy() array([0, 1, 2, 3, 4, 5]) """)) ############################################################################### # # nditer # ############################################################################### add_newdoc('numpy.core', 'nditer', """ Efficient multi-dimensional iterator object to iterate over arrays. To get started using this object, see the :ref:`introductory guide to array iteration <arrays.nditer>`. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. flags : sequence of str, optional Flags to control the behavior of the iterator. * ``buffered`` enables buffering when required. * ``c_index`` causes a C-order index to be tracked. * ``f_index`` causes a Fortran-order index to be tracked. * ``multi_index`` causes a multi-index, or a tuple of indices with one per iteration dimension, to be tracked. * ``common_dtype`` causes all the operands to be converted to a common data type, with copying or buffering as necessary. * ``copy_if_overlap`` causes the iterator to determine if read operands have overlap with write operands, and make temporary copies as necessary to avoid overlap. False positives (needless copying) are possible in some cases. * ``delay_bufalloc`` delays allocation of the buffers until a reset() call is made. Allows ``allocate`` operands to be initialized before their values are copied into the buffers. * ``external_loop`` causes the ``values`` given to be one-dimensional arrays with multiple values instead of zero-dimensional arrays. * ``grow_inner`` allows the ``value`` array sizes to be made larger than the buffer size when both ``buffered`` and ``external_loop`` is used. * ``ranged`` allows the iterator to be restricted to a sub-range of the iterindex values. * ``refs_ok`` enables iteration of reference types, such as object arrays. * ``reduce_ok`` enables iteration of ``readwrite`` operands which are broadcasted, also known as reduction operands. * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional This is a list of flags for each operand. At minimum, one of ``readonly``, ``readwrite``, or ``writeonly`` must be specified. * ``readonly`` indicates the operand will only be read from. * ``readwrite`` indicates the operand will be read from and written to. * ``writeonly`` indicates the operand will only be written to. * ``no_broadcast`` prevents the operand from being broadcasted. * ``contig`` forces the operand data to be contiguous. * ``aligned`` forces the operand data to be aligned. * ``nbo`` forces the operand data to be in native byte order. * ``copy`` allows a temporary read-only copy if required. * ``updateifcopy`` allows a temporary read-write copy if required. * ``allocate`` causes the array to be allocated if it is None in the ``op`` parameter. * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. * ``arraymask`` indicates that this operand is the mask to use for selecting elements when writing to operands with the 'writemasked' flag set. The iterator does not enforce this, but when writing from a buffer back to the array, it only copies those elements indicated by this mask. * ``writemasked`` indicates that only elements where the chosen ``arraymask`` operand is True will be written to. * ``overlap_assume_elementwise`` can be used to mark operands that are accessed only in the iterator order, to allow less conservative copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. order : {'C', 'F', 'A', 'K'}, optional Controls the iteration order. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. This also affects the element memory order of ``allocate`` operands, as they are allocated to be compatible with iteration order. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur when making a copy or buffering. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. op_axes : list of list of ints, optional If provided, is a list of ints or None for each operands. The list of axes for an operand is a mapping from the dimensions of the iterator to the dimensions of the operand. A value of -1 can be placed for entries, causing that dimension to be treated as `newaxis`. itershape : tuple of ints, optional The desired shape of the iterator. This allows ``allocate`` operands with a dimension mapped by op_axes not corresponding to a dimension of a different operand to get a value not equal to 1 for that dimension. buffersize : int, optional When buffering is enabled, controls the size of the temporary buffers. Set to 0 for the default value. Attributes ---------- dtypes : tuple of dtype(s) The data types of the values provided in `value`. This may be different from the operand data types if buffering is enabled. Valid only before the iterator is closed. finished : bool Whether the iteration over the operands is finished or not. has_delayed_bufalloc : bool If True, the iterator was created with the ``delay_bufalloc`` flag, and no reset() function was called on it yet. has_index : bool If True, the iterator was created with either the ``c_index`` or the ``f_index`` flag, and the property `index` can be used to retrieve it. has_multi_index : bool If True, the iterator was created with the ``multi_index`` flag, and the property `multi_index` can be used to retrieve it. index When the ``c_index`` or ``f_index`` flag was used, this property provides access to the index. Raises a ValueError if accessed and ``has_index`` is False. iterationneedsapi : bool Whether iteration requires access to the Python API, for example if one of the operands is an object array. iterindex : int An index which matches the order of iteration. itersize : int Size of the iterator. itviews Structured view(s) of `operands` in memory, matching the reordered and optimized iterator access pattern. Valid only before the iterator is closed. multi_index When the ``multi_index`` flag was used, this property provides access to the index. Raises a ValueError if accessed accessed and ``has_multi_index`` is False. ndim : int The dimensions of the iterator. nop : int The number of iterator operands. operands : tuple of operand(s) The array(s) to be iterated over. Valid only before the iterator is closed. shape : tuple of ints Shape tuple, the shape of the iterator. value Value of ``operands`` at current iteration. Normally, this is a tuple of array scalars, but if the flag ``external_loop`` is used, it is a tuple of one dimensional arrays. Notes ----- `nditer` supersedes `flatiter`. The iterator implementation behind `nditer` is also exposed by the NumPy C API. The Python exposure supplies two iteration interfaces, one which follows the Python iterator protocol, and another which mirrors the C-style do-while pattern. The native Python approach is better in most cases, but if you need the coordinates or index of an iterator, use the C-style pattern. Examples -------- Here is how we might write an ``iter_add`` function, using the Python iterator protocol: >>> def iter_add_py(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], ... [['readonly'], ['readonly'], ['writeonly','allocate']]) ... with it: ... for (a, b, c) in it: ... addop(a, b, out=c) ... return it.operands[2] Here is the same function, but following the C-style pattern: >>> def iter_add(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], ... [['readonly'], ['readonly'], ['writeonly','allocate']]) ... with it: ... while not it.finished: ... addop(it[0], it[1], out=it[2]) ... it.iternext() ... return it.operands[2] Here is an example outer product function: >>> def outer_it(x, y, out=None): ... mulop = np.multiply ... it = np.nditer([x, y, out], ['external_loop'], ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, ... [-1] * x.ndim + list(range(y.ndim)), ... None]) ... with it: ... for (a, b, c) in it: ... mulop(a, b, out=c) ... return it.operands[2] >>> a = np.arange(2)+1 >>> b = np.arange(3)+1 >>> outer_it(a,b) array([[1, 2, 3], [2, 4, 6]]) Here is an example function which operates like a "lambda" ufunc: >>> def luf(lamdaexpr, *args, **kwargs): ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' ... nargs = len(args) ... op = (kwargs.get('out',None),) + args ... it = np.nditer(op, ['buffered','external_loop'], ... [['writeonly','allocate','no_broadcast']] + ... [['readonly','nbo','aligned']]*nargs, ... order=kwargs.get('order','K'), ... casting=kwargs.get('casting','safe'), ... buffersize=kwargs.get('buffersize',0)) ... while not it.finished: ... it[0] = lamdaexpr(*it[1:]) ... it.iternext() ... return it.operands[0] >>> a = np.arange(5) >>> b = np.ones(5) >>> luf(lambda i,j:i*i + j/2, a, b) array([ 0.5, 1.5, 4.5, 9.5, 16.5]) If operand flags `"writeonly"` or `"readwrite"` are used the operands may be views into the original data with the `WRITEBACKIFCOPY` flag. In this case nditer must be used as a context manager or the nditer.close method must be called before using the result. The temporary data will be written back to the original data when the `__exit__` function is called but not before: >>> a = np.arange(6, dtype='i4')[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', ... op_dtypes=[np.dtype('f4')]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here >>> a, x (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) It is important to note that once the iterator is exited, dangling references (like `x` in the example) may or may not share data with the original data `a`. If writeback semantics were active, i.e. if `x.base.flags.writebackifcopy` is `True`, then exiting the iterator will sever the connection between `x` and `a`, writing to `x` will no longer write to `a`. If writeback semantics are not active, then `x.data` will still point at some part of `a.data`, and writing to one will affect the other. """) # nditer methods add_newdoc('numpy.core', 'nditer', ('copy', """ copy() Get a copy of the iterator in its current state. Examples -------- >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) >>> next(it) (array(0), array(1)) >>> it2 = it.copy() >>> next(it2) (array(1), array(2)) """)) add_newdoc('numpy.core', 'nditer', ('operands', """ operands[`Slice`] The array(s) to be iterated over. Valid only before the iterator is closed. """)) add_newdoc('numpy.core', 'nditer', ('debug_print', """ debug_print() Print the current state of the `nditer` instance and debug info to stdout. """)) add_newdoc('numpy.core', 'nditer', ('enable_external_loop', """ enable_external_loop() When the "external_loop" was not used during construction, but is desired, this modifies the iterator to behave as if the flag was specified. """)) add_newdoc('numpy.core', 'nditer', ('iternext', """ iternext() Check whether iterations are left, and perform a single internal iteration without returning the result. Used in the C-style pattern do-while pattern. For an example, see `nditer`. Returns ------- iternext : bool Whether or not there are iterations left. """)) add_newdoc('numpy.core', 'nditer', ('remove_axis', """ remove_axis(i) Removes axis `i` from the iterator. Requires that the flag "multi_index" be enabled. """)) add_newdoc('numpy.core', 'nditer', ('remove_multi_index', """ remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing the internal iteration structure to be optimized further. """)) add_newdoc('numpy.core', 'nditer', ('reset', """ reset() Reset the iterator to its initial state. """)) add_newdoc('numpy.core', 'nested_iters', """ Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the outermost loop, the last in the innermost loop. Advancing one will change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. axes : list of list of int Each item is used as an "op_axes" argument to an nditer flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name Returns ------- iters : tuple of nditer An nditer for each item in `axes`, outermost first See Also -------- nditer Examples -------- Basic usage. Note how y is the "flattened" version of [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified the first iter's axes as [1] >>> a = np.arange(12).reshape(2, 3, 2) >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) >>> for x in i: ... print(i.multi_index) ... for y in j: ... print('', j.multi_index, y) (0,) (0, 0) 0 (0, 1) 1 (1, 0) 6 (1, 1) 7 (1,) (0, 0) 2 (0, 1) 3 (1, 0) 8 (1, 1) 9 (2,) (0, 0) 4 (0, 1) 5 (1, 0) 10 (1, 1) 11 """) add_newdoc('numpy.core', 'nditer', ('close', """ close() Resolve all writeback semantics in writeable operands. See Also -------- :ref:`nditer-context-manager` """)) ############################################################################### # # broadcast # ############################################################################### add_newdoc('numpy.core', 'broadcast', """ Produce an object that mimics broadcasting. Parameters ---------- in1, in2, ... : array_like Input parameters. Returns ------- b : broadcast object Broadcast the input parameters against one another, and return an object that encapsulates the result. Amongst others, it has ``shape`` and ``nd`` properties, and may be used as an iterator. See Also -------- broadcast_arrays broadcast_to Examples -------- Manually adding two vectors, using broadcasting: >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) >>> out = np.empty(b.shape) >>> out.flat = [u+v for (u,v) in b] >>> out array([[5., 6., 7.], [6., 7., 8.], [7., 8., 9.]]) Compare against built-in broadcasting: >>> x + y array([[5, 6, 7], [6, 7, 8], [7, 8, 9]]) """) # attributes add_newdoc('numpy.core', 'broadcast', ('index', """ current index in broadcasted result Examples -------- >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) >>> b.index 0 >>> next(b), next(b), next(b) ((1, 4), (1, 5), (1, 6)) >>> b.index 3 """)) add_newdoc('numpy.core', 'broadcast', ('iters', """ tuple of iterators along ``self``'s "components." Returns a tuple of `numpy.flatiter` objects, one for each "component" of ``self``. See Also -------- numpy.flatiter Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> row, col = b.iters >>> next(row), next(col) (1, 4) """)) add_newdoc('numpy.core', 'broadcast', ('ndim', """ Number of dimensions of broadcasted result. Alias for `nd`. .. versionadded:: 1.12.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.ndim 2 """)) add_newdoc('numpy.core', 'broadcast', ('nd', """ Number of dimensions of broadcasted result. For code intended for NumPy 1.12.0 and later the more consistent `ndim` is preferred. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.nd 2 """)) add_newdoc('numpy.core', 'broadcast', ('numiter', """ Number of iterators possessed by the broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.numiter 2 """)) add_newdoc('numpy.core', 'broadcast', ('shape', """ Shape of broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.shape (3, 3) """)) add_newdoc('numpy.core', 'broadcast', ('size', """ Total size of broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.size 9 """)) add_newdoc('numpy.core', 'broadcast', ('reset', """ reset() Reset the broadcasted result's iterator(s). Parameters ---------- None Returns ------- None Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.index 0 >>> next(b), next(b), next(b) ((1, 4), (2, 4), (3, 4)) >>> b.index 3 >>> b.reset() >>> b.index 0 """)) ############################################################################### # # numpy functions # ############################################################################### add_newdoc('numpy.core.multiarray', 'array', """ array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) Create an array. Parameters ---------- object : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. dtype : data-type, optional The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. copy : bool, optional If true (default), then the object is copied. Otherwise, a copy will only be made if __array__ returns a copy, if obj is a nested sequence, or if a copy is needed to satisfy any of the other requirements (`dtype`, `order`, etc.). order : {'K', 'A', 'C', 'F'}, optional Specify the memory layout of the array. If object is not an array, the newly created array will be in C order (row major) unless 'F' is specified, in which case it will be in Fortran order (column major). If object is an array the following holds. ===== ========= =================================================== order no copy copy=True ===== ========= =================================================== 'K' unchanged F & C order preserved, otherwise most similar order 'A' unchanged F order if input is F and not C, otherwise C order 'C' C order C order 'F' F order F order ===== ========= =================================================== When ``copy=False`` and a copy is made for other reasons, the result is the same as if ``copy=True``, with some exceptions for `A`, see the Notes section. The default order is 'K'. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will be pre-pended to the shape as needed to meet this requirement. Returns ------- out : ndarray An array object satisfying the specified requirements. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Notes ----- When order is 'A' and `object` is an array in neither 'C' nor 'F' order, and a copy is forced by a change in dtype, then the order of the result is not necessarily 'C' as expected. This is likely a bug. Examples -------- >>> np.array([1, 2, 3]) array([1, 2, 3]) Upcasting: >>> np.array([1, 2, 3.0]) array([ 1., 2., 3.]) More than one dimension: >>> np.array([[1, 2], [3, 4]]) array([[1, 2], [3, 4]]) Minimum dimensions 2: >>> np.array([1, 2, 3], ndmin=2) array([[1, 2, 3]]) Type provided: >>> np.array([1, 2, 3], dtype=complex) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: >>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')]) >>> x['a'] array([1, 3]) Creating an array from sub-classes: >>> np.array(np.mat('1 2; 3 4')) array([[1, 2], [3, 4]]) >>> np.array(np.mat('1 2; 3 4'), subok=True) matrix([[1, 2], [3, 4]]) """) add_newdoc('numpy.core.multiarray', 'empty', """ empty(shape, dtype=float, order='C') Return a new array of given shape and type, without initializing entries. Parameters ---------- shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional Desired output data-type for the array, e.g, `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of uninitialized (arbitrary) data of the given shape, dtype, and order. Object arrays will be initialized to None. See Also -------- empty_like : Return an empty array with shape and type of input. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Notes ----- `empty`, unlike `zeros`, does not set the array values to zero, and may therefore be marginally faster. On the other hand, it requires the user to manually set all the values in the array, and should be used with caution. Examples -------- >>> np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized >>> np.empty([2, 2], dtype=int) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized """) add_newdoc('numpy.core.multiarray', 'scalar', """ scalar(dtype, obj) Return a new scalar array of the given type initialized with obj. This function is meant mainly for pickle support. `dtype` must be a valid data-type descriptor. If `dtype` corresponds to an object descriptor, then `obj` can be any object, otherwise `obj` must be a string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. """) add_newdoc('numpy.core.multiarray', 'zeros', """ zeros(shape, dtype=float, order='C') Return a new array of given shape and type, filled with zeros. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of zeros with the given shape, dtype, and order. See Also -------- zeros_like : Return an array of zeros with shape and type of input. empty : Return a new uninitialized array. ones : Return a new array setting values to one. full : Return a new array of given shape filled with value. Examples -------- >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) >>> np.zeros((5,), dtype=int) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) array([[ 0.], [ 0.]]) >>> s = (2,2) >>> np.zeros(s) array([[ 0., 0.], [ 0., 0.]]) >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype array([(0, 0), (0, 0)], dtype=[('x', '<i4'), ('y', '<i4')]) """) add_newdoc('numpy.core.multiarray', 'set_typeDict', """set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. """) add_newdoc('numpy.core.multiarray', 'fromstring', """ fromstring(string, dtype=float, count=-1, sep='') A new 1-D array initialized from text data in a string. Parameters ---------- string : str A string containing the data. dtype : data-type, optional The data type of the array; default: float. For binary input data, the data must be in exactly this format. count : int, optional Read this number of `dtype` elements from the data. If this is negative (the default), the count will be determined from the length of the data. sep : str, optional The string separating numbers in the data; extra whitespace between elements is also ignored. .. deprecated:: 1.14 Passing ``sep=''``, the default, is deprecated since it will trigger the deprecated binary mode of this function. This mode interprets `string` as binary bytes, rather than ASCII text with decimal numbers, an operation which is better spelt ``frombuffer(string, dtype, count)``. If `string` contains unicode text, the binary mode of `fromstring` will first encode it into bytes using either utf-8 (python 3) or the default encoding (python 2), neither of which produce sane results. Returns ------- arr : ndarray The constructed array. Raises ------ ValueError If the string is not the correct size to satisfy the requested `dtype` and `count`. See Also -------- frombuffer, fromfile, fromiter Examples -------- >>> np.fromstring('1 2', dtype=int, sep=' ') array([1, 2]) >>> np.fromstring('1, 2', dtype=int, sep=',') array([1, 2]) """) add_newdoc('numpy.core.multiarray', 'compare_chararrays', """ compare_chararrays(a, b, cmp_op, rstrip) Performs element-wise comparison of two string arrays using the comparison operator specified by `cmp_op`. Parameters ---------- a, b : array_like Arrays to be compared. cmp_op : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. rstrip : Boolean If True, the spaces at the end of Strings are removed before the comparison. Returns ------- out : ndarray The output array of type Boolean with the same shape as a and b. Raises ------ ValueError If `cmp_op` is not valid. TypeError If at least one of `a` or `b` is a non-string array Examples -------- >>> a = np.array(["a", "b", "cde"]) >>> b = np.array(["a", "a", "dec"]) >>> np.compare_chararrays(a, b, ">", True) array([False, True, False]) """) add_newdoc('numpy.core.multiarray', 'fromiter', """ fromiter(iterable, dtype, count=-1) Create a new 1-dimensional array from an iterable object. Parameters ---------- iterable : iterable object An iterable object providing data for the array. dtype : data-type The data-type of the returned array. count : int, optional The number of items to read from *iterable*. The default is -1, which means all data is read. Returns ------- out : ndarray The output array. Notes ----- Specify `count` to improve performance. It allows ``fromiter`` to pre-allocate the output array, instead of resizing it on demand. Examples -------- >>> iterable = (x*x for x in range(5)) >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) """) add_newdoc('numpy.core.multiarray', 'fromfile', """ fromfile(file, dtype=float, count=-1, sep='', offset=0) Construct an array from data in a text or binary file. A highly efficient way of reading binary data with a known data-type, as well as parsing simply formatted text files. Data written using the `tofile` method can be read using this function. Parameters ---------- file : file or str or Path Open file object or filename. .. versionchanged:: 1.17.0 `pathlib.Path` objects are now accepted. dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order of the items in the file. count : int Number of items to read. ``-1`` means all items (i.e., the complete file). sep : str Separator between items if file is a text file. Empty ("") separator means the file should be treated as binary. Spaces (" ") in the separator match zero or more whitespace characters. A separator consisting only of spaces must match at least one whitespace. offset : int The offset (in bytes) from the file's current position. Defaults to 0. Only permitted for binary files. .. versionadded:: 1.17.0 See also -------- load, save ndarray.tofile loadtxt : More flexible way of loading data from a text file. Notes ----- Do not rely on the combination of `tofile` and `fromfile` for data storage, as the binary files generated are are not platform independent. In particular, no byte-order or data-type information is saved. Data can be stored in the platform independent ``.npy`` format using `save` and `load` instead. Examples -------- Construct an ndarray: >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) >>> x['time']['min'] = 10; x['temp'] = 98.25 >>> x array([((10, 0), 98.25)], dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) Save the raw data to disk: >>> import tempfile >>> fname = tempfile.mkstemp()[1] >>> x.tofile(fname) Read the raw data from disk: >>> np.fromfile(fname, dtype=dt) array([((10, 0), 98.25)], dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) The recommended way to store and load data: >>> np.save(fname, x) >>> np.load(fname + '.npy') array([((10, 0), 98.25)], dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) """) add_newdoc('numpy.core.multiarray', 'frombuffer', """ frombuffer(buffer, dtype=float, count=-1, offset=0) Interpret a buffer as a 1-dimensional array. Parameters ---------- buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional Data-type of the returned array; default: float. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional Start reading the buffer from this offset (in bytes); default: 0. Notes ----- If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: >>> dt = np.dtype(int) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP The data of the resulting array will not be byteswapped, but will be interpreted correctly. Examples -------- >>> s = b'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) array([1, 2], dtype=uint8) >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) array([1, 2, 3], dtype=uint8) """) add_newdoc('numpy.core', 'fastCopyAndTranspose', """_fastCopyAndTranspose(a)""") add_newdoc('numpy.core.multiarray', 'correlate', """cross_correlate(a,v, mode=0)""") add_newdoc('numpy.core.multiarray', 'arange', """ arange([start,] stop[, step,], dtype=None) Return evenly spaced values within a given interval. Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range` function, but returns an ndarray rather than a list. When using a non-integer step, such as 0.1, the results will often not be consistent. It is better to use `numpy.linspace` for these cases. Parameters ---------- start : number, optional Start of interval. The interval includes this value. The default start value is 0. stop : number End of interval. The interval does not include this value, except in some cases where `step` is not an integer and floating point round-off affects the length of `out`. step : number, optional Spacing between values. For any output `out`, this is the distance between two adjacent values, ``out[i+1] - out[i]``. The default step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. Returns ------- arange : ndarray Array of evenly spaced values. For floating point arguments, the length of the result is ``ceil((stop - start)/step)``. Because of floating point overflow, this rule may result in the last element of `out` being greater than `stop`. See Also -------- linspace : Evenly spaced numbers with careful handling of endpoints. ogrid: Arrays of evenly spaced numbers in N-dimensions. mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. Examples -------- >>> np.arange(3) array([0, 1, 2]) >>> np.arange(3.0) array([ 0., 1., 2.]) >>> np.arange(3,7) array([3, 4, 5, 6]) >>> np.arange(3,7,2) array([3, 5]) """) add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() Return the compile time NDARRAY_VERSION number. """) add_newdoc('numpy.core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) Construct an empty array. Used by Pickles. """) add_newdoc('numpy.core.multiarray', 'set_string_function', """ set_string_function(f, repr=1) Internal method to set a function to be used when pretty printing arrays. """) add_newdoc('numpy.core.multiarray', 'set_numeric_ops', """ set_numeric_ops(op1=func1, op2=func2, ...) Set numerical operators for array objects. .. deprecated:: 1.16 For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. For ndarray subclasses, define the ``__array_ufunc__`` method and override the relevant ufunc. Parameters ---------- op1, op2, ... : callable Each ``op = func`` pair describes an operator to be replaced. For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace addition by modulus 5 addition. Returns ------- saved_ops : list of callables A list of all operators, stored before making replacements. Notes ----- .. WARNING:: Use with care! Incorrect usage may lead to memory errors. A function replacing an operator cannot make use of that operator. For example, when replacing add, you may not use ``+``. Instead, directly call ufuncs. Examples -------- >>> def add_mod5(x, y): ... return np.add(x, y) % 5 ... >>> old_funcs = np.set_numeric_ops(add=add_mod5) >>> x = np.arange(12).reshape((3, 4)) >>> x + x array([[0, 2, 4, 1], [3, 0, 2, 4], [1, 3, 0, 2]]) >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators """) add_newdoc('numpy.core.multiarray', 'promote_types', """ promote_types(type1, type2) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. The returned data type is always in native byte order. This function is symmetric, but rarely associative. Parameters ---------- type1 : dtype or dtype specifier First data type. type2 : dtype or dtype specifier Second data type. Returns ------- out : dtype The promoted data type. Notes ----- .. versionadded:: 1.6.0 Starting in NumPy 1.9, promote_types function now returns a valid string length when given an integer or float dtype as one argument and a string dtype as another argument. Previously it always returned the input string dtype, even if it wasn't long enough to store the max integer/float value converted to a string. See Also -------- result_type, dtype, can_cast Examples -------- >>> np.promote_types('f4', 'f8') dtype('float64') >>> np.promote_types('i8', 'f4') dtype('float64') >>> np.promote_types('>i8', '<c8') dtype('complex128') >>> np.promote_types('i4', 'S8') dtype('S11') An example of a non-associative case: >>> p = np.promote_types >>> p('S', p('i1', 'u1')) dtype('S6') >>> p(p('S', 'i1'), 'u1') dtype('S4') """) if sys.version_info.major < 3: add_newdoc('numpy.core.multiarray', 'newbuffer', """ newbuffer(size) Return a new uninitialized buffer object. Parameters ---------- size : int Size in bytes of returned buffer object. Returns ------- newbuffer : buffer object Returned, uninitialized buffer object of `size` bytes. """) add_newdoc('numpy.core.multiarray', 'getbuffer', """ getbuffer(obj [,offset[, size]]) Create a buffer object from the given object referencing a slice of length size starting at offset. Default is the entire buffer. A read-write buffer is attempted followed by a read-only buffer. Parameters ---------- obj : object offset : int, optional size : int, optional Returns ------- buffer_obj : buffer Examples -------- >>> buf = np.getbuffer(np.ones(5), 1, 3) >>> len(buf) 3 >>> buf[0] '\\x00' >>> buf <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0> """) add_newdoc('numpy.core.multiarray', 'c_einsum', """ c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe') *This documentation shadows that of the native python implementation of the `einsum` function, except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : list of array_like These are the arrays for the operation. out : ndarray, optional If provided, the calculation is done into this array. dtype : {data-type, None}, optional If provided, forces the calculation to use the data type specified. Note that you may have to also give a more liberal `casting` parameter to allow the conversions. Default is None. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the output. 'C' means it should be C contiguous. 'F' means it should be Fortran contiguous, 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. 'K' means it should be as close to the layout as the inputs as is possible, including arbitrarily permuted axes. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Default is 'safe'. optimize : {False, True, 'greedy', 'optimal'}, optional Controls if intermediate optimization should occur. No optimization will occur if False and True will default to the 'greedy' algorithm. Also accepts an explicit contraction list from the ``np.einsum_path`` function. See ``np.einsum_path`` for more details. Defaults to False. Returns ------- output : ndarray The calculation based on the Einstein summation convention. See Also -------- einsum_path, dot, inner, outer, tensordot, linalg.multi_dot Notes ----- .. versionadded:: 1.6.0 The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`numpy.trace`. * Return a diagonal, :py:func:`numpy.diag`. * Array axis summations, :py:func:`numpy.sum`. * Transpositions and permutations, :py:func:`numpy.transpose`. * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. * Tensor contractions, :py:func:`numpy.tensordot`. * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label appears only once, it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent to :py:func:`np.trace(a) <numpy.trace>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while ``np.einsum('ji', a)`` takes its transpose. Additionally, ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``np.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``np.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` produces a view (changed in version 1.10.0). `einsum` also provides an alternative way to provide the subscripts and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. If the output shape is not provided in this format `einsum` will be calculated in implicit mode, otherwise it will be performed explicitly. The examples below have corresponding `einsum` calls with the two parameter methods. .. versionadded:: 1.10.0 Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal of a 2D array. Examples -------- >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) Trace of a matrix: >>> np.einsum('ii', a) 60 >>> np.einsum(a, [0,0]) 60 >>> np.trace(a) 60 Extract the diagonal (requires explicit form): >>> np.einsum('ii->i', a) array([ 0, 6, 12, 18, 24]) >>> np.einsum(a, [0,0], [0]) array([ 0, 6, 12, 18, 24]) >>> np.diag(a) array([ 0, 6, 12, 18, 24]) Sum over an axis (requires explicit form): >>> np.einsum('ij->i', a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [0,1], [0]) array([ 10, 35, 60, 85, 110]) >>> np.sum(a, axis=1) array([ 10, 35, 60, 85, 110]) For higher dimensional arrays summing a single axis can be done with ellipsis: >>> np.einsum('...j->...', a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) array([ 10, 35, 60, 85, 110]) Compute a matrix transpose, or reorder any number of axes: >>> np.einsum('ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum('ij->ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum(c, [1,0]) array([[0, 3], [1, 4], [2, 5]]) >>> np.transpose(c) array([[0, 3], [1, 4], [2, 5]]) Vector inner products: >>> np.einsum('i,i', b, b) 30 >>> np.einsum(b, [0], b, [0]) 30 >>> np.inner(b,b) 30 Matrix vector multiplication: >>> np.einsum('ij,j', a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum(a, [0,1], b, [1]) array([ 30, 80, 130, 180, 230]) >>> np.dot(a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum('...j,j', a, b) array([ 30, 80, 130, 180, 230]) Broadcasting and scalar multiplication: >>> np.einsum('..., ...', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(',ij', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.multiply(3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) Vector outer product: >>> np.einsum('i,j', np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.einsum(np.arange(2)+1, [0], b, [1]) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.outer(np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) Tensor contraction: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> np.einsum('ijk,jil->kl', a, b) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> np.tensordot(a,b, axes=([1,0],[0,1])) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) Writeable returned arrays (since version 1.10.0): >>> a = np.zeros((3, 3)) >>> np.einsum('ii->i', a)[:] = 1 >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) Example of ellipsis use: >>> a = np.arange(6).reshape((3,2)) >>> b = np.arange(12).reshape((4,3)) >>> np.einsum('ki,jk->ij', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('ki,...k->i...', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('k...,jk', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) """) ############################################################################## # # Documentation for ndarray attributes and methods # ############################################################################## ############################################################################## # # ndarray object # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', """ ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the format of each element in the array (its byte-order, how many bytes it occupies in memory, whether it is an integer, a floating point number, or something else, etc.) Arrays should be constructed using `array`, `zeros` or `empty` (refer to the See Also section below). The parameters given here refer to a low-level method (`ndarray(...)`) for instantiating an array. For more information, refer to the `numpy` module and examine the methods and attributes of an array. Parameters ---------- (for the __new__ method; see Notes below) shape : tuple of ints Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional Offset of array data in buffer. strides : tuple of ints, optional Strides of data in memory. order : {'C', 'F'}, optional Row-major (C-style) or column-major (Fortran-style) order. Attributes ---------- T : ndarray Transpose of the array. data : buffer The array's elements, in memory. dtype : dtype object Describes the format of the elements in the array. flags : dict Dictionary containing information related to memory use, e.g., 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. flat : numpy.flatiter object Flattened version of the array as an iterator. The iterator allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for assignment examples; TODO). imag : ndarray Imaginary part of the array. real : ndarray Real part of the array. size : int Number of elements in the array. itemsize : int The memory use of each array element in bytes. nbytes : int The total number of bytes required to store the array data, i.e., ``itemsize * size``. ndim : int The array's number of dimensions. shape : tuple of ints Shape of the array. strides : tuple of ints The step-size required to move from one element to the next in memory. For example, a contiguous ``(3, 4)`` array of type ``int16`` in C-order has strides ``(8, 2)``. This implies that to move from element to element in memory requires jumps of 2 bytes. To move from row-to-row, one needs to jump 8 bytes at a time (``2 * 4``). ctypes : ctypes object Class containing properties of the array needed for interaction with ctypes. base : ndarray If the array is a view into another array, that array is its `base` (unless that array is also a view). The `base` array is where the array data is actually stored. See Also -------- array : Construct an array. zeros : Create an array, each element of which is zero. empty : Create an array, but leave its allocated memory unchanged (i.e., it contains "garbage"). dtype : Create a data-type. Notes ----- There are two modes of creating an array using ``__new__``: 1. If `buffer` is None, then only `shape`, `dtype`, and `order` are used. 2. If `buffer` is an object exposing the buffer interface, then all keywords are interpreted. No ``__init__`` method is needed because the array is fully initialized after the ``__new__`` method. Examples -------- These examples illustrate the low-level `ndarray` constructor. Refer to the `See Also` section above for easier ways of constructing an ndarray. First mode, `buffer` is None: >>> np.ndarray(shape=(2,2), dtype=float, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) Second mode: >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, ... dtype=int) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) ############################################################################## # # ndarray attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', """Array protocol: Python side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', """None.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', """Array priority.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('base', """ Base object if memory is from some other object. Examples -------- The base of an array that owns its memory is None: >>> x = np.array([1,2,3,4]) >>> x.base is None True Slicing creates a view, whose memory is shared with x: >>> y = x[2:] >>> y.base is x True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', """ An object to simplify the interaction of the array with the ctypes module. This attribute creates an object that makes it easier to use arrays when calling shared libraries with the ctypes module. The returned object has, among others, data, shape, and strides attributes (see Notes below) which themselves return ctypes objects that can be used as arguments to a shared library. Parameters ---------- None Returns ------- c : Python object Possessing attributes data, shape, strides, etc. See Also -------- numpy.ctypeslib Notes ----- Below are the public attributes of this object which were documented in "Guide to NumPy" (we have omitted undocumented public attributes, as well as documented private attributes): .. autoattribute:: numpy.core._internal._ctypes.data :noindex: .. autoattribute:: numpy.core._internal._ctypes.shape :noindex: .. autoattribute:: numpy.core._internal._ctypes.strides :noindex: .. automethod:: numpy.core._internal._ctypes.data_as :noindex: .. automethod:: numpy.core._internal._ctypes.shape_as :noindex: .. automethod:: numpy.core._internal._ctypes.strides_as :noindex: If the ctypes module is not available, then the ctypes attribute of array objects still returns something useful, but ctypes objects are not returned and errors may be raised instead. In particular, the object will still have the ``as_parameter`` attribute which will return an integer equal to the data attribute. Examples -------- >>> import ctypes >>> x array([[0, 1], [2, 3]]) >>> x.ctypes.data 30439712 >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) <ctypes.LP_c_long object at 0x01F01300> >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents c_long(0) >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents c_longlong(4294967296L) >>> x.ctypes.shape <numpy.core._internal.c_long_Array_2 object at 0x01FFD580> >>> x.ctypes.shape_as(ctypes.c_long) <numpy.core._internal.c_long_Array_2 object at 0x01FCE620> >>> x.ctypes.strides <numpy.core._internal.c_long_Array_2 object at 0x01FCE620> >>> x.ctypes.strides_as(ctypes.c_longlong) <numpy.core._internal.c_longlong_Array_2 object at 0x01F01300> """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('data', """Python buffer object pointing to the start of the array's data.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', """ Data-type of the array's elements. Parameters ---------- None Returns ------- d : numpy dtype object See Also -------- numpy.dtype Examples -------- >>> x array([[0, 1], [2, 3]]) >>> x.dtype dtype('int32') >>> type(x.dtype) <type 'numpy.dtype'> """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', """ The imaginary part of the array. Examples -------- >>> x = np.sqrt([1+0j, 0+1j]) >>> x.imag array([ 0. , 0.70710678]) >>> x.imag.dtype dtype('float64') """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', """ Length of one array element in bytes. Examples -------- >>> x = np.array([1,2,3], dtype=np.float64) >>> x.itemsize 8 >>> x = np.array([1,2,3], dtype=np.complex128) >>> x.itemsize 16 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', """ Information about the memory layout of the array. Attributes ---------- C_CONTIGUOUS (C) The data is in a single, C-style contiguous segment. F_CONTIGUOUS (F) The data is in a single, Fortran-style contiguous segment. OWNDATA (O) The array owns the memory it uses or borrows it from another object. WRITEABLE (W) The data area can be written to. Setting this to False locks the data, making it read-only. A view (slice, etc.) inherits WRITEABLE from its base array at creation time, but a view of a writeable array may be subsequently locked while the base array remains writeable. (The opposite is not true, in that a view of a locked array may not be made writeable. However, currently, locking a base object does not lock any views that already reference it, so under that circumstance it is possible to alter the contents of a locked array via a previously created writeable view onto it.) Attempting to change a non-writeable array raises a RuntimeError exception. ALIGNED (A) The data and all elements are aligned appropriately for the hardware. WRITEBACKIFCOPY (X) This array is a copy of some other array. The C-API function PyArray_ResolveWritebackIfCopy must be called before deallocating to the base array will be updated with the contents of this array. UPDATEIFCOPY (U) (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. When this array is deallocated, the base array will be updated with the contents of this array. FNC F_CONTIGUOUS and not C_CONTIGUOUS. FORC F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). BEHAVED (B) ALIGNED and WRITEABLE. CARRAY (CA) BEHAVED and C_CONTIGUOUS. FARRAY (FA) BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. Notes ----- The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag names are only supported in dictionary access. Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by the user, via direct assignment to the attribute or dictionary entry, or by calling `ndarray.setflags`. The array flags cannot be set arbitrarily: - UPDATEIFCOPY can only be set ``False``. - WRITEBACKIFCOPY can only be set ``False``. - ALIGNED can only be set ``True`` if the data is truly aligned. - WRITEABLE can only be set ``True`` if the array owns its own memory or the ultimate owner of the memory exposes a writeable buffer interface or is a string. Arrays can be both C-style and Fortran-style contiguous simultaneously. This is clear for 1-dimensional arrays, but can also be true for higher dimensional arrays. Even for contiguous arrays a stride for a given dimension ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` or the array has no elements. It does *not* generally hold that ``self.strides[-1] == self.itemsize`` for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for Fortran-style contiguous arrays is true. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', """ A 1-D iterator over the array. This is a `numpy.flatiter` instance, which acts similarly to, but is not a subclass of, Python's built-in iterator object. See Also -------- flatten : Return a copy of the array collapsed into one dimension. flatiter Examples -------- >>> x = np.arange(1, 7).reshape(2, 3) >>> x array([[1, 2, 3], [4, 5, 6]]) >>> x.flat[3] 4 >>> x.T array([[1, 4], [2, 5], [3, 6]]) >>> x.T.flat[3] 5 >>> type(x.flat) <class 'numpy.flatiter'> An assignment example: >>> x.flat = 3; x array([[3, 3, 3], [3, 3, 3]]) >>> x.flat[[1,4]] = 1; x array([[3, 1, 3], [3, 1, 3]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', """ Total bytes consumed by the elements of the array. Notes ----- Does not include memory consumed by non-element attributes of the array object. Examples -------- >>> x = np.zeros((3,5,2), dtype=np.complex128) >>> x.nbytes 480 >>> np.prod(x.shape) * x.itemsize 480 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', """ Number of array dimensions. Examples -------- >>> x = np.array([1, 2, 3]) >>> x.ndim 1 >>> y = np.zeros((2, 3, 4)) >>> y.ndim 3 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('real', """ The real part of the array. Examples -------- >>> x = np.sqrt([1+0j, 0+1j]) >>> x.real array([ 1. , 0.70710678]) >>> x.real.dtype dtype('float64') See Also -------- numpy.real : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', """ Tuple of array dimensions. The shape property is usually used to get the current shape of an array, but may also be used to reshape the array in-place by assigning a tuple of array dimensions to it. As with `numpy.reshape`, one of the new shape dimensions can be -1, in which case its value is inferred from the size of the array and the remaining dimensions. Reshaping an array in-place will fail if a copy is required. Examples -------- >>> x = np.array([1, 2, 3, 4]) >>> x.shape (4,) >>> y = np.zeros((2, 3, 4)) >>> y.shape (2, 3, 4) >>> y.shape = (3, 8) >>> y array([[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]]) >>> y.shape = (3, 6) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: total size of new array must be unchanged >>> np.zeros((4,2))[::2].shape = (-1,) Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: incompatible shape for a non-contiguous array See Also -------- numpy.reshape : similar function ndarray.reshape : similar method """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('size', """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the array's dimensions. Notes ----- `a.size` returns a standard arbitrary precision Python integer. This may not be the case with other methods of obtaining the same value (like the suggested ``np.prod(a.shape)``, which returns an instance of ``np.int_``), and may be relevant if the value is used further in calculations that may overflow a fixed size integer type. Examples -------- >>> x = np.zeros((3, 5, 2), dtype=np.complex128) >>> x.size 30 >>> np.prod(x.shape) 30 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', """ Tuple of bytes to step in each dimension when traversing an array. The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` is:: offset = sum(np.array(i) * a.strides) A more detailed explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. Notes ----- Imagine an array of 32-bit integers (each 4 bytes):: x = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int32) This array is stored in memory as 40 bytes, one after the other (known as a contiguous block of memory). The strides of an array tell us how many bytes we have to skip in memory to move to the next position along a certain axis. For example, we have to skip 4 bytes (1 value) to move to the next column, but 20 bytes (5 values) to get to the same position in the next row. As such, the strides for the array `x` will be ``(20, 4)``. See Also -------- numpy.lib.stride_tricks.as_strided Examples -------- >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) >>> y array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]) >>> y.strides (48, 16, 4) >>> y[1,1,1] 17 >>> offset=sum(y.strides * np.array((1,1,1))) >>> offset/y.itemsize 17 >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) >>> x.strides (32, 4, 224, 1344) >>> i = np.array([3,5,2,2]) >>> offset = sum(i * x.strides) >>> x[3,5,2,2] 813 >>> offset / x.itemsize 813 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('T', """ The transposed array. Same as ``self.transpose()``. Examples -------- >>> x = np.array([[1.,2.],[3.,4.]]) >>> x array([[ 1., 2.], [ 3., 4.]]) >>> x.T array([[ 1., 3.], [ 2., 4.]]) >>> x = np.array([1.,2.,3.,4.]) >>> x array([ 1., 2., 3., 4.]) >>> x.T array([ 1., 2., 3., 4.]) See Also -------- transpose """)) ############################################################################## # # ndarray methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. Returns either a new reference to self if dtype is not given or a new array of provided data type if dtype is different from the current dtype of the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', """a.__array_wrap__(obj) -> Object of same type as ndarray object a. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', """a.__copy__() Used if :func:`copy.copy` is called on an array. Returns a copy of the array. Equivalent to ``a.copy(order='K')``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', """a.__deepcopy__(memo, /) -> Deep copy of array. Used if :func:`copy.deepcopy` is called on an array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', """a.__reduce__() For pickling. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', """a.__setstate__(state, /) For unpickling. The `state` argument must be a sequence that contains the following elements: Parameters ---------- version : int optional pickle version. If omitted defaults to 0. shape : tuple dtype : data-type isFortran : bool rawdata : string or list a binary string with the data (or a list if 'a' is an object array) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('all', """ a.all(axis=None, out=None, keepdims=False) Returns True if all elements evaluate to True. Refer to `numpy.all` for full documentation. See Also -------- numpy.all : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('any', """ a.any(axis=None, out=None, keepdims=False) Returns True if any of the elements of `a` evaluate to True. Refer to `numpy.any` for full documentation. See Also -------- numpy.any : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', """ a.argmax(axis=None, out=None) Return indices of the maximum values along the given axis. Refer to `numpy.argmax` for full documentation. See Also -------- numpy.argmax : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', """ a.argmin(axis=None, out=None) Return indices of the minimum values along the given axis of `a`. Refer to `numpy.argmin` for detailed documentation. See Also -------- numpy.argmin : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', """ a.argsort(axis=-1, kind=None, order=None) Returns the indices that would sort this array. Refer to `numpy.argsort` for full documentation. See Also -------- numpy.argsort : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', """ a.argpartition(kth, axis=-1, kind='introselect', order=None) Returns the indices that would partition this array. Refer to `numpy.argpartition` for full documentation. .. versionadded:: 1.8.0 See Also -------- numpy.argpartition : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to false, and the `dtype`, `order`, and `subok` requirements are satisfied, the input array is returned instead of a copy. Returns ------- arr_t : ndarray Unless `copy` is False and the other conditions for returning the input array are satisfied (see description for `copy` input parameter), `arr_t` is a new array of the same shape as the input array, with dtype, order given by `dtype`, `order`. Notes ----- .. versionchanged:: 1.17.0 Casting between a simple data type and a structured one is possible only for "unsafe" casting. Casting to multiple fields is allowed, but casting from multiple fields is not. .. versionchanged:: 1.9.0 Casting from numeric to string types in 'safe' casting mode requires that the string dtype length is long enough to store the max integer/float value converted. Raises ------ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. Examples -------- >>> x = np.array([1, 2, 2.5]) >>> x array([1. , 2. , 2.5]) >>> x.astype(int) array([1, 2, 2]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', """ a.byteswap(inplace=False) Swap the bytes of the array elements Toggle between low-endian and big-endian data representation by returning a byteswapped array, optionally swapped in-place. Arrays of byte-strings are not swapped. The real and imaginary parts of a complex number are swapped individually. Parameters ---------- inplace : bool, optional If ``True``, swap bytes in-place, default is ``False``. Returns ------- out : ndarray The byteswapped array. If `inplace` is ``True``, this is a view to self. Examples -------- >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> list(map(hex, A)) ['0x1', '0x100', '0x2233'] >>> A.byteswap(inplace=True) array([ 256, 1, 13090], dtype=int16) >>> list(map(hex, A)) ['0x100', '0x1', '0x3322'] Arrays of byte-strings are not swapped >>> A = np.array([b'ceg', b'fac']) >>> A.byteswap() array([b'ceg', b'fac'], dtype='|S3') ``A.newbyteorder().byteswap()`` produces an array with the same values but different representation in memory >>> A = np.array([1, 2, 3]) >>> A.view(np.uint8) array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0], dtype=uint8) >>> A.newbyteorder().byteswap(inplace=True) array([1, 2, 3]) >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', """ a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. Refer to `numpy.choose` for full documentation. See Also -------- numpy.choose : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', """ a.clip(min=None, max=None, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. Refer to `numpy.clip` for full documentation. See Also -------- numpy.clip : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', """ a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. Refer to `numpy.compress` for full documentation. See Also -------- numpy.compress : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', """ a.conj() Complex-conjugate all elements. Refer to `numpy.conjugate` for full documentation. See Also -------- numpy.conjugate : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', """ a.conjugate() Return the complex conjugate, element-wise. Refer to `numpy.conjugate` for full documentation. See Also -------- numpy.conjugate : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', """ a.copy(order='C') Return a copy of the array. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :func:`numpy.copy` are very similar, but have different default values for their order= arguments.) See also -------- numpy.copy numpy.copyto Examples -------- >>> x = np.array([[1,2,3],[4,5,6]], order='F') >>> y = x.copy() >>> x.fill(0) >>> x array([[0, 0, 0], [0, 0, 0]]) >>> y array([[1, 2, 3], [4, 5, 6]]) >>> y.flags['C_CONTIGUOUS'] True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', """ a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. Refer to `numpy.cumprod` for full documentation. See Also -------- numpy.cumprod : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', """ a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. Refer to `numpy.cumsum` for full documentation. See Also -------- numpy.cumsum : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', """ a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a read-only view instead of a copy as in previous NumPy versions. In a future version the read-only restriction will be removed. Refer to :func:`numpy.diagonal` for full documentation. See Also -------- numpy.diagonal : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', """ a.dot(b, out=None) Dot product of two arrays. Refer to `numpy.dot` for full documentation. See Also -------- numpy.dot : equivalent function Examples -------- >>> a = np.eye(2) >>> b = np.ones((2, 2)) * 2 >>> a.dot(b) array([[2., 2.], [2., 2.]]) This array method can be conveniently chained: >>> a.dot(b).dot(b) array([[8., 8.], [8., 8.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', """a.dump(file) Dump a pickle of the array to the specified file. The array can be read back with pickle.load or numpy.load. Parameters ---------- file : str or Path A string naming the dump file. .. versionchanged:: 1.17.0 `pathlib.Path` objects are now accepted. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', """ a.dumps() Returns the pickle of the array as a string. pickle.loads or numpy.loads will convert the string back to an array. Parameters ---------- None """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', """ a.fill(value) Fill the array with a scalar value. Parameters ---------- value : scalar All elements of `a` will be assigned this value. Examples -------- >>> a = np.array([1, 2]) >>> a.fill(0) >>> a array([0, 0]) >>> a = np.empty(2) >>> a.fill(1) >>> a array([1., 1.]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', """ a.flatten(order='C') Return a copy of the array collapsed into one dimension. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran- style) order. 'A' means to flatten in column-major order if `a` is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten `a` in the order the elements occur in memory. The default is 'C'. Returns ------- y : ndarray A copy of the input array, flattened to one dimension. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the array. Examples -------- >>> a = np.array([[1,2], [3,4]]) >>> a.flatten() array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', """ a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. A field is a view of the array data with a given data-type. The values in the view are determined by the given type and the offset into the current array in bytes. The offset needs to be such that the view dtype fits in the array dtype; for example an array of dtype complex128 has 16-byte elements. If taking a view with a 32-bit integer (4 bytes), the offset needs to be between 0 and 12 bytes. Parameters ---------- dtype : str or dtype The data type of the view. The dtype size of the view can not be larger than that of the array itself. offset : int Number of bytes to skip before beginning the element view. Examples -------- >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x array([[1.+1.j, 0.+0.j], [0.+0.j, 2.+4.j]]) >>> x.getfield(np.float64) array([[1., 0.], [0., 2.]]) By choosing an offset of 8 bytes we can select the complex part of the array for our view: >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('item', """ a.item(*args) Copy an element of an array to a standard Python scalar and return it. Parameters ---------- \\*args : Arguments (variable number and type) * none: in this case, the method only works for arrays with one element (`a.size == 1`), which element is copied into a standard Python scalar object and returned. * int_type: this argument is interpreted as a flat index into the array, specifying which element to copy and return. * tuple of int_types: functions as does a single int_type argument, except that the argument is interpreted as an nd-index into the array. Returns ------- z : Standard Python scalar object A copy of the specified element of the array as a suitable Python scalar Notes ----- When the data type of `a` is longdouble or clongdouble, item() returns a scalar array object because there is no available Python scalar that would not lose information. Void arrays return a buffer object for item(), unless fields are defined, in which case a tuple is returned. `item` is very similar to a[args], except, instead of an array scalar, a standard Python scalar is returned. This can be useful for speeding up access to elements of the array and doing arithmetic on elements of the array using Python's optimized math. Examples -------- >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x array([[2, 2, 6], [1, 3, 6], [1, 0, 1]]) >>> x.item(3) 1 >>> x.item(7) 0 >>> x.item((0, 1)) 2 >>> x.item((2, 2)) 1 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', """ a.itemset(*args) Insert scalar into an array (scalar is cast to array's dtype, if possible) There must be at least 1 argument, and define the last argument as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster than ``a[args] = item``. The item should be a scalar value and `args` must select a single item in the array `a`. Parameters ---------- \\*args : Arguments If one argument: a scalar, only used in case `a` is of size 1. If two arguments: the last argument is the value to be set and must be a scalar, the first argument specifies a single array element location. It is either an int or a tuple. Notes ----- Compared to indexing syntax, `itemset` provides some speed increase for placing a scalar into a particular location in an `ndarray`, if you must do this. However, generally this is discouraged: among other problems, it complicates the appearance of the code. Also, when using `itemset` (and `item`) inside a loop, be sure to assign the methods to a local variable to avoid the attribute look-up at each loop iteration. Examples -------- >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x array([[2, 2, 6], [1, 3, 6], [1, 0, 1]]) >>> x.itemset(4, 0) >>> x.itemset((2, 2), 9) >>> x array([[2, 2, 6], [1, 0, 6], [1, 0, 9]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('max', """ a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True) Return the maximum along a given axis. Refer to `numpy.amax` for full documentation. See Also -------- numpy.amax : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', """ a.mean(axis=None, dtype=None, out=None, keepdims=False) Returns the average of the array elements along given axis. Refer to `numpy.mean` for full documentation. See Also -------- numpy.mean : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('min', """ a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True) Return the minimum along a given axis. Refer to `numpy.amin` for full documentation. See Also -------- numpy.amin : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', """ arr.newbyteorder(new_order='S') Return the array with the same data viewed with a different byte order. Equivalent to:: arr.view(arr.dtype.newbytorder(new_order)) Changes are also made in all fields and sub-arrays of the array data type. Parameters ---------- new_order : string, optional Byte order to force; a value from the byte order specifications below. `new_order` codes can be any of: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_arr : array New array object with the dtype reflecting given change to the byte order. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', """ a.nonzero() Return the indices of the elements that are non-zero. Refer to `numpy.nonzero` for full documentation. See Also -------- numpy.nonzero : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', """ a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True) Return the product of the array elements over the given axis Refer to `numpy.prod` for full documentation. See Also -------- numpy.prod : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', """ a.ptp(axis=None, out=None, keepdims=False) Peak to peak (maximum - minimum) value along a given axis. Refer to `numpy.ptp` for full documentation. See Also -------- numpy.ptp : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('put', """ a.put(indices, values, mode='raise') Set ``a.flat[n] = values[n]`` for all `n` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', """ a.ravel([order]) Return a flattened array. Refer to `numpy.ravel` for full documentation. See Also -------- numpy.ravel : equivalent function ndarray.flat : a flat iterator on the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', """ a.repeat(repeats, axis=None) Repeat elements of an array. Refer to `numpy.repeat` for full documentation. See Also -------- numpy.repeat : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', """ a.reshape(shape, order='C') Returns an array containing the same data with a new shape. Refer to `numpy.reshape` for full documentation. See Also -------- numpy.reshape : equivalent function Notes ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. For example, ``a.reshape(10, 11)`` is equivalent to ``a.reshape((10, 11))``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', """ a.resize(new_shape, refcheck=True) Change shape and size of array in-place. Parameters ---------- new_shape : tuple of ints, or `n` ints Shape of resized array. refcheck : bool, optional If False, reference count will not be checked. Default is True. Returns ------- None Raises ------ ValueError If `a` does not own its own data or references or views to it exist, and the data memory must be changed. PyPy only: will always raise if the data memory must be changed, since there is no reliable way to determine if references or views to it exist. SystemError If the `order` keyword argument is specified. This behaviour is a bug in NumPy. See Also -------- resize : Return a new array with the specified shape. Notes ----- This reallocates space for the data area if necessary. Only contiguous arrays (data elements consecutive in memory) can be resized. The purpose of the reference count check is to make sure you do not use this array as a buffer for another Python object and then reallocate the memory. However, reference counts can increase in other ways so if you are sure that you have not shared the memory for this array with another Python object, then you may safely set `refcheck` to False. Examples -------- Shrinking an array: array is flattened (in the order that the data are stored in memory), resized, and reshaped: >>> a = np.array([[0, 1], [2, 3]], order='C') >>> a.resize((2, 1)) >>> a array([[0], [1]]) >>> a = np.array([[0, 1], [2, 3]], order='F') >>> a.resize((2, 1)) >>> a array([[0], [2]]) Enlarging an array: as above, but missing entries are filled with zeros: >>> b = np.array([[0, 1], [2, 3]]) >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple >>> b array([[0, 1, 2], [3, 0, 0]]) Referencing an array prevents resizing... >>> c = a >>> a.resize((1, 1)) Traceback (most recent call last): ... ValueError: cannot resize an array that references or is referenced ... Unless `refcheck` is False: >>> a.resize((1, 1), refcheck=False) >>> a array([[0]]) >>> c array([[0]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('round', """ a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- numpy.around : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', """ a.searchsorted(v, side='left', sorter=None) Find indices where elements of v should be inserted in a to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', """ a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. Place `val` into `a`'s field defined by `dtype` and beginning `offset` bytes into the field. Parameters ---------- val : object Value to be placed in field. dtype : dtype object Data-type of the field in which to place `val`. offset : int, optional The number of bytes into the field at which to place `val`. Returns ------- None See Also -------- getfield Examples -------- >>> x = np.eye(3) >>> x.getfield(np.float64) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) >>> x.setfield(3, np.int32) >>> x.getfield(np.int32) array([[3, 3, 3], [3, 3, 3], [3, 3, 3]], dtype=int32) >>> x array([[1.0e+000, 1.5e-323, 1.5e-323], [1.5e-323, 1.0e+000, 1.5e-323], [1.5e-323, 1.5e-323, 1.0e+000]]) >>> x.setfield(np.eye(3), np.int32) >>> x array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', """ a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), respectively. These Boolean-valued flags affect how numpy interprets the memory area used by `a` (see Notes below). The ALIGNED flag can only be set to True if the data is actually aligned according to the type. The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set to True. The flag WRITEABLE can only be set to True if the array owns its own memory, or the ultimate owner of the memory exposes a writeable buffer interface, or is a string. (The exception for string is made so that unpickling can be done without copying memory.) Parameters ---------- write : bool, optional Describes whether or not `a` can be written to. align : bool, optional Describes whether or not `a` is aligned properly for its type. uic : bool, optional Describes whether or not `a` is a copy of another "base" array. Notes ----- Array flags provide information about how the memory area used for the array is to be interpreted. There are 7 Boolean flags in use, only four of which can be changed by the user: WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. WRITEABLE (W) the data area can be written to; ALIGNED (A) the data and strides are aligned appropriately for the hardware (as determined by the compiler); UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced by .base). When the C-API function PyArray_ResolveWritebackIfCopy is called, the base array will be updated with the contents of this array. All flags can be accessed using the single (upper case) letter as well as the full name. Examples -------- >>> y = np.array([[3, 1, 7], ... [2, 0, 0], ... [8, 5, 9]]) >>> y array([[3, 1, 7], [2, 0, 0], [8, 5, 9]]) >>> y.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(write=0, align=0) >>> y.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : False ALIGNED : False WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(uic=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: cannot set WRITEBACKIFCOPY flag to True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', """ a.sort(axis=-1, kind=None, order=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. Parameters ---------- axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. The default is 'quicksort'. Note that both 'stable' and 'mergesort' use timsort under the covers and, in general, the actual implementation will vary with datatype. The 'mergesort' option is retained for backwards compatibility. .. versionchanged:: 1.15.0. The 'stable' option was added. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. See Also -------- numpy.sort : Return a sorted copy of an array. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in sorted array. partition: Partial sort. Notes ----- See `numpy.sort` for notes on the different sorting algorithms. Examples -------- >>> a = np.array([[1,4], [3,1]]) >>> a.sort(axis=1) >>> a array([[1, 4], [1, 3]]) >>> a.sort(axis=0) >>> a array([[1, 3], [1, 4]]) Use the `order` keyword to specify a field to use when sorting a structured array: >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) >>> a.sort(order='y') >>> a array([(b'c', 1), (b'a', 2)], dtype=[('x', 'S1'), ('y', '<i8')]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', """ a.partition(kth, axis=-1, kind='introselect', order=None) Rearranges the elements in the array in such a way that the value of the element in kth position is in the position it would be in a sorted array. All elements smaller than the kth element are moved before this element and all equal or greater are moved behind it. The ordering of the elements in the two partitions is undefined. .. versionadded:: 1.8.0 Parameters ---------- kth : int or sequence of ints Element index to partition by. The kth element value will be in its final sorted position and all smaller elements will be moved before it and all equal or greater elements behind it. The order of all elements in the partitions is undefined. If provided with a sequence of kth it will partition all elements indexed by kth of them into their sorted position at once. axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'introselect'}, optional Selection algorithm. Default is 'introselect'. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need to be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. See Also -------- numpy.partition : Return a parititioned copy of an array. argpartition : Indirect partition. sort : Full sort. Notes ----- See ``np.partition`` for notes on the different algorithms. Examples -------- >>> a = np.array([3, 4, 2, 1]) >>> a.partition(3) >>> a array([2, 1, 3, 4]) >>> a.partition((1, 3)) >>> a array([1, 2, 3, 4]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', """ a.squeeze(axis=None) Remove single-dimensional entries from the shape of `a`. Refer to `numpy.squeeze` for full documentation. See Also -------- numpy.squeeze : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('std', """ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) Returns the standard deviation of the array elements along given axis. Refer to `numpy.std` for full documentation. See Also -------- numpy.std : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', """ a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) Return the sum of the array elements over the given axis. Refer to `numpy.sum` for full documentation. See Also -------- numpy.sum : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', """ a.swapaxes(axis1, axis2) Return a view of the array with `axis1` and `axis2` interchanged. Refer to `numpy.swapaxes` for full documentation. See Also -------- numpy.swapaxes : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('take', """ a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. Refer to `numpy.take` for full documentation. See Also -------- numpy.take : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', """ a.tofile(fid, sep="", format="%s") Write array to a file as text or binary (default). Data is always written in 'C' order, independent of the order of `a`. The data produced by this method can be recovered using the function fromfile(). Parameters ---------- fid : file or str or Path An open file object, or a string containing a filename. .. versionchanged:: 1.17.0 `pathlib.Path` objects are now accepted. sep : str Separator between array items for text output. If "" (empty), a binary file is written, equivalent to ``file.write(a.tobytes())``. format : str Format string for text file output. Each entry in the array is formatted to text by first converting it to the closest Python type, and then using "format" % item. Notes ----- This is a convenience function for quick storage of array data. Information on endianness and precision is lost, so this method is not a good choice for files intended to archive data or transport data between machines with different endianness. Some of these problems can be overcome by outputting the data as text files, at the expense of speed and file size. When fid is a file object, array contents are directly written to the file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', """ a.tolist() Return the array as an ``a.ndim``-levels deep nested list of Python scalars. Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible builtin Python type, via the `~numpy.ndarray.item` function. If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will not be a list at all, but a simple Python scalar. Parameters ---------- none Returns ------- y : object, or list of object, or list of list of object, or ... The possibly nested list of array elements. Notes ----- The array may be recreated via ``a = np.array(a.tolist())``, although this may sometimes lose precision. Examples -------- For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``: >>> a = np.array([1, 2]) >>> list(a) [1, 2] >>> a.tolist() [1, 2] However, for a 2D array, ``tolist`` applies recursively: >>> a = np.array([[1, 2], [3, 4]]) >>> list(a) [array([1, 2]), array([3, 4])] >>> a.tolist() [[1, 2], [3, 4]] The base case for this recursion is a 0D array: >>> a = np.array(1) >>> list(a) Traceback (most recent call last): ... TypeError: iteration over a 0-d array >>> a.tolist() 1 """)) tobytesdoc = """ a.{name}(order='C') Construct Python bytes containing the raw data bytes in the array. Constructs Python bytes showing a copy of the raw contents of data memory. The bytes object can be produced in either 'C' or 'Fortran', or 'Any' order (the default is 'C'-order). 'Any' order means C-order unless the F_CONTIGUOUS flag in the array is set, in which case it means 'Fortran' order. {deprecated} Parameters ---------- order : {{'C', 'F', None}}, optional Order of the data for multidimensional arrays: C, Fortran, or the same as for the original array. Returns ------- s : bytes Python bytes exhibiting a copy of `a`'s raw data. Examples -------- >>> x = np.array([[0, 1], [2, 3]], dtype='<u2') >>> x.tobytes() b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' >>> x.tobytes('C') == x.tobytes() True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' """ add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', tobytesdoc.format(name='tostring', deprecated= 'This function is a compatibility ' 'alias for tobytes. Despite its ' 'name it returns bytes not ' 'strings.'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', tobytesdoc.format(name='tobytes', deprecated='.. versionadded:: 1.9.0'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', """ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. Refer to `numpy.trace` for full documentation. See Also -------- numpy.trace : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', """ a.transpose(*axes) Returns a view of the array with axes transposed. For a 1-D array this has no effect, as a transposed vector is simply the same vector. To convert a 1-D array into a 2D column vector, an additional dimension must be added. `np.atleast2d(a).T` achieves this, as does `a[:, np.newaxis]`. For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted (see Examples). If axes are not provided and ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. Parameters ---------- axes : None, tuple of ints, or `n` ints * None or no argument: reverses the order of the axes. * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s `i`-th axis becomes `a.transpose()`'s `j`-th axis. * `n` ints: same as an n-tuple of the same ints (this form is intended simply as a "convenience" alternative to the tuple form) Returns ------- out : ndarray View of `a`, with axes suitably permuted. See Also -------- ndarray.T : Array property returning the array transposed. ndarray.reshape : Give a new shape to an array without changing its data. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], [3, 4]]) >>> a.transpose() array([[1, 3], [2, 4]]) >>> a.transpose((1, 0)) array([[1, 3], [2, 4]]) >>> a.transpose(1, 0) array([[1, 3], [2, 4]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('var', """ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) Returns the variance of the array elements, along given axis. Refer to `numpy.var` for full documentation. See Also -------- numpy.var : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('view', """ a.view(dtype=None, type=None) New view of array with the same data. Parameters ---------- dtype : data-type or ndarray sub-class, optional Data-type descriptor of the returned view, e.g., float32 or int16. The default, None, results in the view having the same data-type as `a`. This argument can also be specified as an ndarray sub-class, which then specifies the type of the returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional Type of the returned view, e.g., ndarray or matrix. Again, the default None results in type preservation. Notes ----- ``a.view()`` is used two different ways: ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view of the array's memory with a different data-type. This can cause a reinterpretation of the bytes of memory. ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just returns an instance of `ndarray_subclass` that looks at the same array (same shape, dtype, etc.) This does not cause a reinterpretation of the memory. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of bytes per entry than the previous dtype (for example, converting a regular array to a structured array), then the behavior of the view cannot be predicted just from the superficial appearance of ``a`` (shown by ``print(a)``). It also depends on exactly how ``a`` is stored in memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus defined as a slice or transpose, etc., the view may give different results. Examples -------- >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: >>> y = x.view(dtype=np.int16, type=np.matrix) >>> y matrix([[513]], dtype=int16) >>> print(type(y)) <class 'numpy.matrix'> Creating a view on a structured array so it can be used in calculations >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) >>> xv = x.view(dtype=np.int8).reshape(-1,2) >>> xv array([[1, 2], [3, 4]], dtype=int8) >>> xv.mean(0) array([2., 3.]) Making changes to the view changes the underlying array >>> xv[0,1] = 20 >>> x array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) Using a view to convert an array to a recarray: >>> z = x.view(np.recarray) >>> z.a array([1, 3], dtype=int8) Views share data: >>> x[0] = (9, 10) >>> z[0] (9, 10) Views that change the dtype size (bytes per entry) should normally be avoided on arrays defined by slices, transposes, fortran-ordering, etc.: >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) >>> y = x[:, 0:2] >>> y array([[1, 2], [4, 5]], dtype=int16) >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) Traceback (most recent call last): ... ValueError: To change to a dtype of a different size, the array must be C-contiguous >>> z = y.copy() >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) array([[(1, 2)], [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')]) """)) ############################################################################## # # umath functions # ############################################################################## add_newdoc('numpy.core.umath', 'frompyfunc', """ frompyfunc(func, nin, nout) Takes an arbitrary Python function and returns a NumPy ufunc. Can be used, for example, to add broadcasting to a built-in Python function (see Examples section). Parameters ---------- func : Python function object An arbitrary Python function. nin : int The number of input arguments. nout : int The number of objects returned by `func`. Returns ------- out : ufunc Returns a NumPy universal function (``ufunc``) object. See Also -------- vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy Notes ----- The returned ufunc always returns PyObject arrays. Examples -------- Use frompyfunc to add broadcasting to the Python function ``oct``: >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) array(['0o12', '0o36', '0o144'], dtype=object) >>> np.array((oct(10), oct(30), oct(100))) # for comparison array(['0o12', '0o36', '0o144'], dtype='<U5') """) add_newdoc('numpy.core.umath', 'geterrobj', """ geterrobj() Return the current object that defines floating-point error handling. The error object contains all information that defines the error handling behavior in NumPy. `geterrobj` is used internally by the other functions that get and set error handling behavior (`geterr`, `seterr`, `geterrcall`, `seterrcall`). Returns ------- errobj : list The error object, a list containing three elements: [internal numpy buffer size, error mask, error callback function]. The error mask is a single integer that holds the treatment information on all four floating point errors. The information for each error type is contained in three bits of the integer. If we print it in base 8, we can see what treatment is set for "invalid", "under", "over", and "divide" (in that order). The printed string can be interpreted with * 0 : 'ignore' * 1 : 'warn' * 2 : 'raise' * 3 : 'call' * 4 : 'print' * 5 : 'log' See Also -------- seterrobj, seterr, geterr, seterrcall, geterrcall getbufsize, setbufsize Notes ----- For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- >>> np.geterrobj() # first get the defaults [8192, 521, None] >>> def err_handler(type, flag): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> old_bufsize = np.setbufsize(20000) >>> old_err = np.seterr(divide='raise') >>> old_handler = np.seterrcall(err_handler) >>> np.geterrobj() [8192, 521, <function err_handler at 0x91dcaac>] >>> old_err = np.seterr(all='ignore') >>> np.base_repr(np.geterrobj()[1], 8) '0' >>> old_err = np.seterr(divide='warn', over='log', under='call', ... invalid='print') >>> np.base_repr(np.geterrobj()[1], 8) '4351' """) add_newdoc('numpy.core.umath', 'seterrobj', """ seterrobj(errobj) Set the object that defines floating-point error handling. The error object contains all information that defines the error handling behavior in NumPy. `seterrobj` is used internally by the other functions that set error handling behavior (`seterr`, `seterrcall`). Parameters ---------- errobj : list The error object, a list containing three elements: [internal numpy buffer size, error mask, error callback function]. The error mask is a single integer that holds the treatment information on all four floating point errors. The information for each error type is contained in three bits of the integer. If we print it in base 8, we can see what treatment is set for "invalid", "under", "over", and "divide" (in that order). The printed string can be interpreted with * 0 : 'ignore' * 1 : 'warn' * 2 : 'raise' * 3 : 'call' * 4 : 'print' * 5 : 'log' See Also -------- geterrobj, seterr, geterr, seterrcall, geterrcall getbufsize, setbufsize Notes ----- For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- >>> old_errobj = np.geterrobj() # first get the defaults >>> old_errobj [8192, 521, None] >>> def err_handler(type, flag): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> new_errobj = [20000, 12, err_handler] >>> np.seterrobj(new_errobj) >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') '14' >>> np.geterr() {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} >>> np.geterrcall() is err_handler True """) ############################################################################## # # compiled_base functions # ############################################################################## add_newdoc('numpy.core.multiarray', 'add_docstring', """ add_docstring(obj, docstring) Add a docstring to a built-in obj if possible. If the obj already has a docstring raise a RuntimeError If this routine does not know how to add a docstring to the object raise a TypeError """) add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', """ add_ufunc_docstring(ufunc, new_docstring) Replace the docstring for a ufunc with new_docstring. This method will only work if the current docstring for the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) Parameters ---------- ufunc : numpy.ufunc A ufunc whose current doc is NULL. new_docstring : string The new docstring for the ufunc. Notes ----- This method allocates memory for new_docstring on the heap. Technically this creates a mempory leak, since this memory will not be reclaimed until the end of the program even if the ufunc itself is removed. However this will only be a problem if the user is repeatedly creating ufuncs with no documentation, adding documentation via add_newdoc_ufunc, and then throwing away the ufunc. """) add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', """ format_float_OSprintf_g(val, precision) Print a floating point scalar using the system's printf function, equivalent to: printf("%.*g", precision, val); for half/float/double, or replacing 'g' by 'Lg' for longdouble. This method is designed to help cross-validate the format_float_* methods. Parameters ---------- val : python float or numpy floating scalar Value to format. precision : non-negative integer, optional Precision given to printf. Returns ------- rep : string The string representation of the floating point value See Also -------- format_float_scientific format_float_positional """) ############################################################################## # # Documentation for ufunc attributes and methods # ############################################################################## ############################################################################## # # ufunc object # ############################################################################## add_newdoc('numpy.core', 'ufunc', """ Functions that operate element by element on whole arrays. To see the documentation for a specific ufunc, use `info`. For example, ``np.info(np.sin)``. Because ufuncs are written in C (for speed) and linked into Python with NumPy's ufunc facility, Python's help() function finds this page whenever help() is called on a ufunc. A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. Calling ufuncs: =============== op(*x[, out], where=True, **kwargs) Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. The broadcasting rules are: * Dimensions of length 1 may be prepended to either array. * Arrays may be repeated along dimensions of length 1. Parameters ---------- *x : array_like Input arrays. out : ndarray, None, or tuple of ndarray and None, optional Alternate array object(s) in which to put the result; if provided, it must have a shape that the inputs broadcast to. A tuple of arrays (possible only as a keyword argument) must have length equal to the number of outputs; use `None` for uninitialized outputs to be allocated by the ufunc. where : array_like, optional This condition is broadcast over the input. At locations where the condition is True, the `out` array will be set to the ufunc result. Elsewhere, the `out` array will retain its original value. Note that if an uninitialized `out` array is created via the default ``out=None``, locations within it where the condition is False will remain uninitialized. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. Returns ------- r : ndarray or tuple of ndarray `r` will have the shape that the arrays in `x` broadcast to; if `out` is provided, it will be returned. If not, `r` will be allocated and may contain uninitialized values. If the function has more than one output, then the result will be a tuple of arrays. """) ############################################################################## # # ufunc attributes # ############################################################################## add_newdoc('numpy.core', 'ufunc', ('identity', """ The identity value. Data attribute containing the identity element for the ufunc, if it has one. If it does not, the attribute value is None. Examples -------- >>> np.add.identity 0 >>> np.multiply.identity 1 >>> np.power.identity 1 >>> print(np.exp.identity) None """)) add_newdoc('numpy.core', 'ufunc', ('nargs', """ The number of arguments. Data attribute containing the number of arguments the ufunc takes, including optional ones. Notes ----- Typically this value will be one more than what you might expect because all ufuncs take the optional "out" argument. Examples -------- >>> np.add.nargs 3 >>> np.multiply.nargs 3 >>> np.power.nargs 3 >>> np.exp.nargs 2 """)) add_newdoc('numpy.core', 'ufunc', ('nin', """ The number of inputs. Data attribute containing the number of arguments the ufunc treats as input. Examples -------- >>> np.add.nin 2 >>> np.multiply.nin 2 >>> np.power.nin 2 >>> np.exp.nin 1 """)) add_newdoc('numpy.core', 'ufunc', ('nout', """ The number of outputs. Data attribute containing the number of arguments the ufunc treats as output. Notes ----- Since all ufuncs can take output arguments, this will always be (at least) 1. Examples -------- >>> np.add.nout 1 >>> np.multiply.nout 1 >>> np.power.nout 1 >>> np.exp.nout 1 """)) add_newdoc('numpy.core', 'ufunc', ('ntypes', """ The number of types. The number of numerical NumPy types - of which there are 18 total - on which the ufunc can operate. See Also -------- numpy.ufunc.types Examples -------- >>> np.add.ntypes 18 >>> np.multiply.ntypes 18 >>> np.power.ntypes 17 >>> np.exp.ntypes 7 >>> np.remainder.ntypes 14 """)) add_newdoc('numpy.core', 'ufunc', ('types', """ Returns a list with types grouped input->output. Data attribute listing the data-type "Domain-Range" groupings the ufunc can deliver. The data-types are given using the character codes. See Also -------- numpy.ufunc.ntypes Examples -------- >>> np.add.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.multiply.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.power.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.exp.types ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] >>> np.remainder.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] """)) add_newdoc('numpy.core', 'ufunc', ('signature', """ Definition of the core elements a generalized ufunc operates on. The signature determines how the dimensions of each input/output array are split into core and loop dimensions: 1. Each dimension in the signature is matched to a dimension of the corresponding passed-in array, starting from the end of the shape tuple. 2. Core dimensions assigned to the same label in the signature must have exactly matching sizes, no broadcasting is performed. 3. The core dimensions are removed from all inputs and the remaining dimensions are broadcast together, defining the loop dimensions. Notes ----- Generalized ufuncs are used internally in many linalg functions, and in the testing suite; the examples below are taken from these. For ufuncs that operate on scalars, the signature is `None`, which is equivalent to '()' for every argument. Examples -------- >>> np.core.umath_tests.matrix_multiply.signature '(m,n),(n,p)->(m,p)' >>> np.linalg._umath_linalg.det.signature '(m,m)->()' >>> np.add.signature is None True # equivalent to '(),()->()' """)) ############################################################################## # # ufunc methods # ############################################################################## add_newdoc('numpy.core', 'ufunc', ('reduce', """ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True) Reduces `a`'s dimension by one, by applying ufunc along one axis. Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = the result of iterating `j` over :math:`range(N_i)`, cumulatively applying ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. For a one-dimensional array, reduce produces results equivalent to: :: r = op.identity # op = ufunc for i in range(len(A)): r = op(r, A[i]) return r For example, add.reduce() is equivalent to sum(). Parameters ---------- a : array_like The array to act on. axis : None or int or tuple of ints, optional Axis or axes along which a reduction is performed. The default (`axis` = 0) is perform a reduction over the first dimension of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is `None`, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. For operations which are either not commutative or not associative, doing a reduction over multiple axes is not well-defined. The ufuncs do not currently raise an exception in this case, but will likely do so in the future. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data-type of the output array if this is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.7.0 initial : scalar, optional The value with which to start the reduction. If the ufunc has no identity or the dtype is object, this defaults to None - otherwise it defaults to ufunc.identity. If ``None`` is given, the first element of the reduction is used, and an error is thrown if the reduction is empty. .. versionadded:: 1.15.0 where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of `a`, and selects elements to include in the reduction. Note that for ufuncs like ``minimum`` that do not have an identity defined, one has to pass in also ``initial``. .. versionadded:: 1.17.0 Returns ------- r : ndarray The reduced array. If `out` was supplied, `r` is a reference to it. Examples -------- >>> np.multiply.reduce([2,3,5]) 30 A multi-dimensional array example: >>> X = np.arange(8).reshape((2,2,2)) >>> X array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.add.reduce(X, 0) array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X) # confirm: default axis value is 0 array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X, 1) array([[ 2, 4], [10, 12]]) >>> np.add.reduce(X, 2) array([[ 1, 5], [ 9, 13]]) You can use the ``initial`` keyword argument to initialize the reduction with a different value, and ``where`` to select specific elements to include: >>> np.add.reduce([10], initial=5) 15 >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) array([14., 14.]) >>> a = np.array([10., np.nan, 10]) >>> np.add.reduce(a, where=~np.isnan(a)) 20.0 Allows reductions of empty arrays where they would normally fail, i.e. for ufuncs without an identity. >>> np.minimum.reduce([], initial=np.inf) inf >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) array([ 1., 10.]) >>> np.minimum.reduce([]) Traceback (most recent call last): ... ValueError: zero-size array to reduction operation minimum which has no identity """)) add_newdoc('numpy.core', 'ufunc', ('accumulate', """ accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. For a one-dimensional array, accumulate produces results equivalent to:: r = np.empty(len(A)) t = op.identity # op = the ufunc being applied to A's elements for i in range(len(A)): t = op(t, A[i]) r[i] = t return r For example, add.accumulate() is equivalent to np.cumsum(). For a multi-dimensional array, accumulate is applied along only one axis (axis zero by default; see Examples below) so repeated use is necessary if one wants to accumulate over multiple axes. Parameters ---------- array : array_like The array to act on. axis : int, optional The axis along which to apply the accumulation; default is zero. dtype : data-type code, optional The data-type used to represent the intermediate results. Defaults to the data-type of the output array if such is provided, or the the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The accumulated values. If `out` was supplied, `r` is a reference to `out`. Examples -------- 1-D array examples: >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) array([ 2, 6, 30]) 2-D array examples: >>> I = np.eye(2) >>> I array([[1., 0.], [0., 1.]]) Accumulate along axis 0 (rows), down columns: >>> np.add.accumulate(I, 0) array([[1., 0.], [1., 1.]]) >>> np.add.accumulate(I) # no axis specified = axis zero array([[1., 0.], [1., 1.]]) Accumulate along axis 1 (columns), through rows: >>> np.add.accumulate(I, 1) array([[1., 1.], [0., 1.]]) """)) add_newdoc('numpy.core', 'ufunc', ('reduceat', """ reduceat(a, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. For i in ``range(len(indices))``, `reduceat` computes ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th generalized "row" parallel to `axis` in the final result (i.e., in a 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if `axis = 1`, it becomes the i-th column). There are three exceptions to this: * when ``i = len(indices) - 1`` (so for the last index), ``indices[i+1] = a.shape[axis]``. * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is simply ``a[indices[i]]``. * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. The shape of the output depends on the size of `indices`, and may be larger than `a` (this happens if ``len(indices) > a.shape[axis]``). Parameters ---------- a : array_like The array to act on. indices : array_like Paired indices, comma separated (not colon), specifying slices to reduce. axis : int, optional The axis along which to apply the reduceat. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data type of the output array if this is provided, or the data type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The reduced values. If `out` was supplied, `r` is a reference to `out`. Notes ----- A descriptive example: If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as ``ufunc.reduceat(a, indices)[::2]`` where `indices` is ``range(len(array) - 1)`` with a zero placed in every other element: ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. Don't be fooled by this attribute's name: `reduceat(a)` is not necessarily smaller than `a`. Examples -------- To take the running sum of four successive values: >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) A 2-D example: >>> x = np.linspace(0, 15, 16).reshape(4,4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) :: # reduce such that the result has the following five rows: # [row1 + row2 + row3] # [row4] # [row2] # [row3] # [row1 + row2 + row3 + row4] >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) array([[12., 15., 18., 21.], [12., 13., 14., 15.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [24., 28., 32., 36.]]) :: # reduce such that result has the following two columns: # [col1 * col2 * col3, col4] >>> np.multiply.reduceat(x, [0, 3], 1) array([[ 0., 3.], [ 120., 7.], [ 720., 11.], [2184., 15.]]) """)) add_newdoc('numpy.core', 'ufunc', ('outer', """ outer(A, B, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of ``op.outer(A, B)`` is an array of dimension M + N such that: .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) For `A` and `B` one-dimensional, this is equivalent to:: r = empty(len(A),len(B)) for i in range(len(A)): for j in range(len(B)): r[i,j] = op(A[i], B[j]) # op = ufunc in question Parameters ---------- A : array_like First array B : array_like Second array kwargs : any Arguments to pass on to the ufunc. Typically `dtype` or `out`. Returns ------- r : ndarray Output array See Also -------- numpy.outer Examples -------- >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) array([[ 4, 5, 6], [ 8, 10, 12], [12, 15, 18]]) A multi-dimensional example: >>> A = np.array([[1, 2, 3], [4, 5, 6]]) >>> A.shape (2, 3) >>> B = np.array([[1, 2, 3, 4]]) >>> B.shape (1, 4) >>> C = np.multiply.outer(A, B) >>> C.shape; C (2, 3, 1, 4) array([[[[ 1, 2, 3, 4]], [[ 2, 4, 6, 8]], [[ 3, 6, 9, 12]]], [[[ 4, 8, 12, 16]], [[ 5, 10, 15, 20]], [[ 6, 12, 18, 24]]]]) """)) add_newdoc('numpy.core', 'ufunc', ('at', """ at(a, indices, b=None) Performs unbuffered in place operation on operand 'a' for elements specified by 'indices'. For addition ufunc, this method is equivalent to ``a[indices] += b``, except that results are accumulated for elements that are indexed more than once. For example, ``a[[0,0]] += 1`` will only increment the first element once because of buffering, whereas ``add.at(a, [0,0], 1)`` will increment the first element twice. .. versionadded:: 1.8.0 Parameters ---------- a : array_like The array to perform in place operation on. indices : array_like or tuple Array like index object or slice object for indexing into first operand. If first operand has multiple dimensions, indices can be a tuple of array like index objects or slice objects. b : array_like Second operand for ufuncs requiring two operands. Operand must be broadcastable over first operand after indexing or slicing. Examples -------- Set items 0 and 1 to their negative values: >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> a array([-1, -2, 3, 4]) Increment items 0 and 1, and increment item 2 twice: >>> a = np.array([1, 2, 3, 4]) >>> np.add.at(a, [0, 1, 2, 2], 1) >>> a array([2, 3, 5, 4]) Add items 0 and 1 in first array to second array, and store results in first array: >>> a = np.array([1, 2, 3, 4]) >>> b = np.array([1, 2]) >>> np.add.at(a, [0, 1], b) >>> a array([2, 4, 3, 4]) """)) ############################################################################## # # Documentation for dtype attributes and methods # ############################################################################## ############################################################################## # # dtype object # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', """ dtype(obj, align=False, copy=False) Create a data type object. A numpy array is homogeneous, and contains elements described by a dtype object. A dtype object can be constructed from different combinations of fundamental numeric types. Parameters ---------- obj Object to be converted to a data type object. align : bool, optional Add padding to the fields to match what a C compiler would output for a similar C-struct. Can be ``True`` only if `obj` is a dictionary or a comma-separated string. If a struct dtype is being created, this also sets a sticky alignment flag ``isalignedstruct``. copy : bool, optional Make a new copy of the data-type object. If ``False``, the result may just be a reference to a built-in data-type object. See also -------- result_type Examples -------- Using array-scalar type: >>> np.dtype(np.int16) dtype('int16') Structured type, one field name 'f1', containing int16: >>> np.dtype([('f1', np.int16)]) dtype([('f1', '<i2')]) Structured type, one field named 'f1', in itself containing a structured type with one field: >>> np.dtype([('f1', [('f1', np.int16)])]) dtype([('f1', [('f1', '<i2')])]) Structured type, two fields: the first field contains an unsigned int, the second an int32: >>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) dtype([('f1', '<u8'), ('f2', '<i4')]) Using array-protocol type strings: >>> np.dtype([('a','f8'),('b','S10')]) dtype([('a', '<f8'), ('b', 'S10')]) Using comma-separated field formats. The shape is (2,3): >>> np.dtype("i4, (2,3)f8") dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))]) Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` is a flexible type, here of size 10: >>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) dtype([('hello', '<i8', (3,)), ('world', 'V10')]) Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are the offsets in bytes: >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) Using dictionaries. Two fields named 'gender' and 'age': >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) dtype([('gender', 'S1'), ('age', 'u1')]) Offsets in bytes, here 0 and 25: >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) dtype([('surname', 'S25'), ('age', 'u1')]) """) ############################################################################## # # dtype attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', """ The required alignment (bytes) of this data-type according to the compiler. More information is available in the C-API section of the manual. Examples -------- >>> x = np.dtype('i4') >>> x.alignment 4 >>> x = np.dtype(float) >>> x.alignment 8 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', """ A character indicating the byte-order of this data-type object. One of: === ============== '=' native '<' little-endian '>' big-endian '|' not applicable === ============== All built-in data-type objects have byteorder either '=' or '|'. Examples -------- >>> dt = np.dtype('i2') >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers >>> np.dtype('i1').byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder '|' >>> # Even if specific code is given, and it is native >>> # '=' is the byteorder >>> import sys >>> sys_is_le = sys.byteorder == 'little' >>> native_code = sys_is_le and '<' or '>' >>> swapped_code = sys_is_le and '>' or '<' >>> dt = np.dtype(native_code + 'i2') >>> dt.byteorder '=' >>> # Swapped code shows up as itself >>> dt = np.dtype(swapped_code + 'i2') >>> dt.byteorder == swapped_code True """)) add_newdoc('numpy.core.multiarray', 'dtype', ('char', """A unique character code for each of the 21 different built-in types. Examples -------- >>> x = np.dtype(float) >>> x.char 'd' """)) add_newdoc('numpy.core.multiarray', 'dtype', ('descr', """ `__array_interface__` description of the data-type. The format is that required by the 'descr' key in the `__array_interface__` attribute. Warning: This attribute exists specifically for `__array_interface__`, and is not a datatype description compatible with `np.dtype`. Examples -------- >>> x = np.dtype(float) >>> x.descr [('', '<f8')] >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.descr [('name', '<U16'), ('grades', '<f8', (2,))] """)) add_newdoc('numpy.core.multiarray', 'dtype', ('fields', """ Dictionary of named fields defined for this data type, or ``None``. The dictionary is indexed by keys that are the names of the fields. Each entry in the dictionary is a tuple fully describing the field:: (dtype, offset[, title]) Offset is limited to C int, which is signed and usually 32 bits. If present, the optional title can be any object (if it is a string or unicode then it will also be a key in the fields dictionary, otherwise it's meta-data). Notice also that the first two elements of the tuple can be passed directly as arguments to the ``ndarray.getfield`` and ``ndarray.setfield`` methods. See Also -------- ndarray.getfield, ndarray.setfield Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} """)) add_newdoc('numpy.core.multiarray', 'dtype', ('flags', """ Bit-flags describing how this data type is to be interpreted. Bit-masks are in `numpy.core.multiarray` as the constants `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation of these flags is in C-API documentation; they are largely useful for user-defined data-types. The following example demonstrates that operations on this particular dtype requires Python C-API. Examples -------- >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.flags 16 >>> np.core.multiarray.NEEDS_PYAPI 16 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', """ Boolean indicating whether this dtype contains any reference-counted objects in any fields or sub-dtypes. Recall that what is actually in the ndarray memory representing the Python object is the memory address of that object (a pointer). Special handling may be required, and this attribute is useful for distinguishing data types that may contain arbitrary Python objects and data-types that won't. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', """ Integer indicating how this dtype relates to the built-in dtypes. Read-only. = ======================================================================== 0 if this is a structured array type, with fields 1 if this is a dtype compiled into numpy (such as ints, floats etc) 2 if the dtype is for a user-defined numpy type A user-defined type uses the numpy C-API machinery to extend numpy to handle a new array type. See :ref:`user.user-defined-data-types` in the NumPy manual. = ======================================================================== Examples -------- >>> dt = np.dtype('i2') >>> dt.isbuiltin 1 >>> dt = np.dtype('f8') >>> dt.isbuiltin 1 >>> dt = np.dtype([('field1', 'f8')]) >>> dt.isbuiltin 0 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', """ Boolean indicating whether the byte order of this dtype is native to the platform. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', """ Boolean indicating whether the dtype is a struct which maintains field alignment. This flag is sticky, so when combining multiple structs together, it is preserved and produces new dtypes which are also aligned. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', """ The element size of this data-type object. For 18 of the 21 types this number is fixed by the data-type. For the flexible data-types, this number can be anything. Examples -------- >>> arr = np.array([[1, 2], [3, 4]]) >>> arr.dtype dtype('int64') >>> arr.itemsize 8 >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.itemsize 80 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('kind', """ A character code (one of 'biufcmMOSUV') identifying the general kind of data. = ====================== b boolean i signed integer u unsigned integer f floating-point c complex floating-point m timedelta M datetime O object S (byte-)string U Unicode V void = ====================== Examples -------- >>> dt = np.dtype('i4') >>> dt.kind 'i' >>> dt = np.dtype('f8') >>> dt.kind 'f' >>> dt = np.dtype([('field1', 'f8')]) >>> dt.kind 'V' """)) add_newdoc('numpy.core.multiarray', 'dtype', ('name', """ A bit-width name for this data-type. Un-sized flexible data-type objects do not have this attribute. Examples -------- >>> x = np.dtype(float) >>> x.name 'float64' >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.name 'void640' """)) add_newdoc('numpy.core.multiarray', 'dtype', ('names', """ Ordered list of field names, or ``None`` if there are no fields. The names are ordered according to increasing byte offset. This can be used, for example, to walk through all of the named fields in offset order. Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.names ('name', 'grades') """)) add_newdoc('numpy.core.multiarray', 'dtype', ('num', """ A unique number for each of the 21 different built-in types. These are roughly ordered from least-to-most precision. Examples -------- >>> dt = np.dtype(str) >>> dt.num 19 >>> dt = np.dtype(float) >>> dt.num 12 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('shape', """ Shape tuple of the sub-array if this data type describes a sub-array, and ``()`` otherwise. Examples -------- >>> dt = np.dtype(('i4', 4)) >>> dt.shape (4,) >>> dt = np.dtype(('i4', (2, 3))) >>> dt.shape (2, 3) """)) add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', """ Number of dimensions of the sub-array if this data type describes a sub-array, and ``0`` otherwise. .. versionadded:: 1.13.0 Examples -------- >>> x = np.dtype(float) >>> x.ndim 0 >>> x = np.dtype((float, 8)) >>> x.ndim 1 >>> x = np.dtype(('i4', (3, 4))) >>> x.ndim 2 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('str', """The array-protocol typestring of this data-type object.""")) add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', """ Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and None otherwise. The *shape* is the fixed shape of the sub-array described by this data type, and *item_dtype* the data type of the array. If a field whose dtype object has this attribute is retrieved, then the extra dimensions implied by *shape* are tacked on to the end of the retrieved array. See Also -------- dtype.base Examples -------- >>> x = numpy.dtype('8f') >>> x.subdtype (dtype('float32'), (8,)) >>> x = numpy.dtype('i2') >>> x.subdtype >>> """)) add_newdoc('numpy.core.multiarray', 'dtype', ('base', """ Returns dtype for the base element of the subarrays, regardless of their dimension or shape. See Also -------- dtype.subdtype Examples -------- >>> x = numpy.dtype('8f') >>> x.base dtype('float32') >>> x = numpy.dtype('i2') >>> x.base dtype('int16') """)) add_newdoc('numpy.core.multiarray', 'dtype', ('type', """The type object used to instantiate a scalar of this data-type.""")) ############################################################################## # # dtype methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', """ newbyteorder(new_order='S') Return a new dtype with a different byte order. Changes are also made in all fields and sub-arrays of the data type. Parameters ---------- new_order : string, optional Byte order to force; a value from the byte order specifications below. The default value ('S') results in swapping the current byte order. `new_order` codes can be any of: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) The code does a case-insensitive check on the first letter of `new_order` for these alternatives. For example, any of '>' or 'B' or 'b' or 'brian' are valid to specify big-endian. Returns ------- new_dtype : dtype New dtype object with the given change to the byte order. Notes ----- Changes are also made in all fields and sub-arrays of the data type. Examples -------- >>> import sys >>> sys_is_le = sys.byteorder == 'little' >>> native_code = sys_is_le and '<' or '>' >>> swapped_code = sys_is_le and '>' or '<' >>> native_dt = np.dtype(native_code+'i2') >>> swapped_dt = np.dtype(swapped_code+'i2') >>> native_dt.newbyteorder('S') == swapped_dt True >>> native_dt.newbyteorder() == swapped_dt True >>> native_dt == swapped_dt.newbyteorder('S') True >>> native_dt == swapped_dt.newbyteorder('=') True >>> native_dt == swapped_dt.newbyteorder('N') True >>> native_dt == native_dt.newbyteorder('|') True >>> np.dtype('<i2') == native_dt.newbyteorder('<') True >>> np.dtype('<i2') == native_dt.newbyteorder('L') True >>> np.dtype('>i2') == native_dt.newbyteorder('>') True >>> np.dtype('>i2') == native_dt.newbyteorder('B') True """)) ############################################################################## # # Datetime-related Methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'busdaycalendar', """ busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information defining valid days for the busday family of functions. The default valid days are Monday through Friday ("business days"). A busdaycalendar object can be specified with any set of weekly valid days, plus an optional "holiday" dates that always will be invalid. Once a busdaycalendar object is created, the weekmask and holidays cannot be modified. .. versionadded:: 1.7.0 Parameters ---------- weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates, no matter which weekday they fall upon. Holiday dates may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. Returns ------- out : busdaycalendar A business day calendar object containing the specified weekmask and holidays values. See Also -------- is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. busday_count : Counts how many valid days are in a half-open date range. Attributes ---------- Note: once a busdaycalendar object is created, you cannot modify the weekmask or holidays. The attributes return copies of internal data. weekmask : (copy) seven-element array of bool holidays : (copy) sorted array of datetime64[D] Examples -------- >>> # Some important days in July ... bdd = np.busdaycalendar( ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) >>> # Default is Monday to Friday weekdays ... bdd.weekmask array([ True, True, True, True, True, False, False]) >>> # Any holidays already on the weekend are removed ... bdd.holidays array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') """) add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', """A copy of the seven-element boolean mask indicating valid days.""")) add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', """A copy of the holiday array indicating additional invalid days.""")) add_newdoc('numpy.core.multiarray', 'normalize_axis_index', """ normalize_axis_index(axis, ndim, msg_prefix=None) Normalizes an axis index, `axis`, such that is a valid positive index into the shape of array with `ndim` dimensions. Raises an AxisError with an appropriate message if this is not possible. Used internally by all axis-checking logic. .. versionadded:: 1.13.0 Parameters ---------- axis : int The un-normalized index of the axis. Can be negative ndim : int The number of dimensions of the array that `axis` should be normalized against msg_prefix : str A prefix to put before the message, typically the name of the argument Returns ------- normalized_axis : int The normalized axis index, such that `0 <= normalized_axis < ndim` Raises ------ AxisError If the axis index is invalid, when `-ndim <= axis < ndim` is false. Examples -------- >>> normalize_axis_index(0, ndim=3) 0 >>> normalize_axis_index(1, ndim=3) 1 >>> normalize_axis_index(-1, ndim=3) 2 >>> normalize_axis_index(3, ndim=3) Traceback (most recent call last): ... AxisError: axis 3 is out of bounds for array of dimension 3 >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') Traceback (most recent call last): ... AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 """) add_newdoc('numpy.core.multiarray', 'datetime_data', """ datetime_data(dtype, /) Get information about the step size of a date or time type. The returned tuple can be passed as the second argument of `numpy.datetime64` and `numpy.timedelta64`. Parameters ---------- dtype : dtype The dtype object, which must be a `datetime64` or `timedelta64` type. Returns ------- unit : str The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype is based. count : int The number of base units in a step. Examples -------- >>> dt_25s = np.dtype('timedelta64[25s]') >>> np.datetime_data(dt_25s) ('s', 25) >>> np.array(10, dt_25s).astype('timedelta64[s]') array(250, dtype='timedelta64[s]') The result can be used to construct a datetime that uses the same units as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) numpy.datetime64('2010-01-01T00:00:00','25s') """) ############################################################################## # # Documentation for `generic` attributes and methods # ############################################################################## add_newdoc('numpy.core.numerictypes', 'generic', """ Base class for numpy scalar types. Class from which most (all?) numpy scalar types are derived. For consistency, exposes the same API as `ndarray`, despite many consequent attributes being either "get-only," or completely irrelevant. This is the class from which it is strongly suggested users should derive custom scalar types. """) # Attributes add_newdoc('numpy.core.numerictypes', 'generic', ('T', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('base', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', """Get array data-descriptor.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('flags', """The integer value of flags.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('flat', """A 1-D view of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('imag', """The imaginary part of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', """The length of one element in bytes.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', """The length of the scalar in bytes.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', """The number of array dimensions.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('real', """The real part of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('shape', """Tuple of array dimensions.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('size', """The number of elements in the gentype.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('strides', """Tuple of bytes steps in each dimension.""")) # Methods add_newdoc('numpy.core.numerictypes', 'generic', ('all', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('any', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('astype', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('choose', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('clip', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('compress', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('copy', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('dump', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('fill', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('item', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('max', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('mean', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('min', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', """ newbyteorder(new_order='S') Return a new `dtype` with a different byte order. Changes are also made in all fields and sub-arrays of the data type. The `new_order` code can be any from the following: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) Parameters ---------- new_order : str, optional Byte order to force; a value from the byte order specifications above. The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_dtype : dtype New `dtype` object with the given change to the byte order. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('prod', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('put', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('resize', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('round', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('sort', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('std', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('sum', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('take', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('trace', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('var', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('view', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) ############################################################################## # # Documentation for scalar type abstract base classes in type hierarchy # ############################################################################## add_newdoc('numpy.core.numerictypes', 'number', """ Abstract base class of all numeric scalar types. """) add_newdoc('numpy.core.numerictypes', 'integer', """ Abstract base class of all integer scalar types. """) add_newdoc('numpy.core.numerictypes', 'signedinteger', """ Abstract base class of all signed integer scalar types. """) add_newdoc('numpy.core.numerictypes', 'unsignedinteger', """ Abstract base class of all unsigned integer scalar types. """) add_newdoc('numpy.core.numerictypes', 'inexact', """ Abstract base class of all numeric scalar types with a (potentially) inexact representation of the values in its range, such as floating-point numbers. """) add_newdoc('numpy.core.numerictypes', 'floating', """ Abstract base class of all floating-point scalar types. """) add_newdoc('numpy.core.numerictypes', 'complexfloating', """ Abstract base class of all complex number scalar types that are made up of floating-point numbers. """) add_newdoc('numpy.core.numerictypes', 'flexible', """ Abstract base class of all scalar types without predefined length. The actual size of these types depends on the specific `np.dtype` instantiation. """) add_newdoc('numpy.core.numerictypes', 'character', """ Abstract base class of all character string scalar types. """) ############################################################################## # # Documentation for concrete scalar classes # ############################################################################## def numeric_type_aliases(aliases): def type_aliases_gen(): for alias, doc in aliases: try: alias_type = getattr(_numerictypes, alias) except AttributeError: # The set of aliases that actually exist varies between platforms pass else: yield (alias_type, alias, doc) return list(type_aliases_gen()) possible_aliases = numeric_type_aliases([ ('int8', '8-bit signed integer (-128 to 127)'), ('int16', '16-bit signed integer (-32768 to 32767)'), ('int32', '32-bit signed integer (-2147483648 to 2147483647)'), ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'), ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), ('uint8', '8-bit unsigned integer (0 to 255)'), ('uint16', '16-bit unsigned integer (0 to 65535)'), ('uint32', '32-bit unsigned integer (0 to 4294967295)'), ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'), ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), ('float96', '96-bit extended-precision floating-point number type'), ('float128', '128-bit extended-precision floating-point number type'), ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), ]) def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): o = getattr(_numerictypes, obj) character_code = dtype(o).char canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj) alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases) alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc) for (alias_type, alias, doc) in possible_aliases if alias_type is o) docstring = """ {doc} Character code: ``'{character_code}'``. {canonical_name_doc}{alias_doc} """.format(doc=doc.strip(), character_code=character_code, canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) add_newdoc('numpy.core.numerictypes', obj, docstring) add_newdoc_for_scalar_type('bool_', ['bool8'], """ Boolean type (True or False), stored as a byte. """) add_newdoc_for_scalar_type('byte', [], """ Signed integer type, compatible with C ``char``. """) add_newdoc_for_scalar_type('short', [], """ Signed integer type, compatible with C ``short``. """) add_newdoc_for_scalar_type('intc', [], """ Signed integer type, compatible with C ``int``. """) add_newdoc_for_scalar_type('int_', [], """ Signed integer type, compatible with Python `int` anc C ``long``. """) add_newdoc_for_scalar_type('longlong', [], """ Signed integer type, compatible with C ``long long``. """) add_newdoc_for_scalar_type('ubyte', [], """ Unsigned integer type, compatible with C ``unsigned char``. """) add_newdoc_for_scalar_type('ushort', [], """ Unsigned integer type, compatible with C ``unsigned short``. """) add_newdoc_for_scalar_type('uintc', [], """ Unsigned integer type, compatible with C ``unsigned int``. """) add_newdoc_for_scalar_type('uint', [], """ Unsigned integer type, compatible with C ``unsigned long``. """) add_newdoc_for_scalar_type('ulonglong', [], """ Signed integer type, compatible with C ``unsigned long long``. """) add_newdoc_for_scalar_type('half', [], """ Half-precision floating-point number type. """) add_newdoc_for_scalar_type('single', [], """ Single-precision floating-point number type, compatible with C ``float``. """) add_newdoc_for_scalar_type('double', ['float_'], """ Double-precision floating-point number type, compatible with Python `float` and C ``double``. """) add_newdoc_for_scalar_type('longdouble', ['longfloat'], """ Extended-precision floating-point number type, compatible with C ``long double`` but not necessarily with IEEE 754 quadruple-precision. """) add_newdoc_for_scalar_type('csingle', ['singlecomplex'], """ Complex number type composed of two single-precision floating-point numbers. """) add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], """ Complex number type composed of two double-precision floating-point numbers, compatible with Python `complex`. """) add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], """ Complex number type composed of two extended-precision floating-point numbers. """) add_newdoc_for_scalar_type('object_', [], """ Any Python object. """) # TODO: work out how to put this on the base class, np.floating for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio', """ {ftype}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original floating point number, and with a positive denominator. Raise OverflowError on infinities and a ValueError on NaNs. >>> np.{ftype}(10.0).as_integer_ratio() (10, 1) >>> np.{ftype}(0.0).as_integer_ratio() (0, 1) >>> np.{ftype}(-.25).as_integer_ratio() (-1, 4) """.format(ftype=float_name)))
from __future__ import division, absolute_import, print_function import sys import platform import pytest import numpy as np # import the c-extension module directly since _arg is not exported via umath import numpy.core._multiarray_umath as ncu from numpy.testing import ( assert_raises, assert_equal, assert_array_equal, assert_almost_equal ) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. #FIXME: this will probably change when we require full C99 campatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) # TODO This can be xfail when the generator functions are got rid of. platform_skip = pytest.mark.skipif(xfail_complex_tests, reason="Inadequate C99 complex support") class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp check(f, 1, 0, np.exp(1), 0, False) check(f, 0, 1, np.cos(1), np.sin(1), False) ref = np.exp(1) * complex(np.cos(1), np.sin(1)) check(f, 1, 1, ref.real, ref.imag, False) @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i check(f, np.PZERO, 0, 1, 0, False) check(f, np.NZERO, 0, 1, 0, False) # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception check(f, 1, np.inf, np.nan, np.nan) check(f, -1, np.inf, np.nan, np.nan) check(f, 0, np.inf, np.nan, np.nan) # cexp(inf + 0i) is inf + 0i check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y check(f, -np.inf, 1, np.PZERO, np.PZERO) check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_inf(None) # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_inf(None) # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_nan(None) # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, -1, np.nan, np.nan) check(f, np.nan, np.inf, np.nan, np.nan) check(f, np.nan, -np.inf, np.nan, np.nan) # cexp(nan + nani) is nan + nani check(f, np.nan, np.nan, np.nan, np.nan) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i check = check_complex_value f = np.exp check(f, np.nan, 0, np.nan, 0) class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex(-np.inf, np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex(-np.inf, 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=complex) y = complex(np.nan, np.nan) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=complex) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) class TestCsqrt(object): def test_simple(self): # sqrt(1) check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) rres = 0.5*np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) # sqrt(-1) check_complex_value(np.sqrt, -1, 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): # C99: Sec G 6.4.2 check = check_complex_value f = np.sqrt # csqrt(+-0 + 0i) is 0 + 0i check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) # csqrt(x + infi) is inf + infi for any x (including NaN) check(f, 1, np.inf, np.inf, np.inf) check(f, -1, np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, -np.inf, np.inf, np.inf, np.inf) check(f, -np.nan, np.inf, np.inf, np.inf) # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) check(f, -1, np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) # csqrt(-inf + yi) is +0 + infi for any finite y > 0 check(f, -np.inf, 1, np.PZERO, np.inf) # csqrt(inf + yi) is +inf + 0i for any finite y > 0 check(f, np.inf, 1, np.inf, np.PZERO) # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # csqrt(+inf + nani) is inf + nani check(f, np.inf, np.nan, np.inf, np.nan) # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan) # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) check_real_value(np.abs, np.nan, np.nan, np.nan) x.append(np.nan) y.append(-np.nan) check_real_value(np.abs, -np.nan, np.nan, np.nan) # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) check_real_value(np.abs, np.inf, np.nan, np.inf) x.append(-np.inf) y.append(np.nan) check_real_value(np.abs, -np.inf, np.nan, np.inf) # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(complex(a, b)) xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) check_real_value(f, x[i], y[i], ref) class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip( reason="Complex arithmetic with signed zero fails on most platforms") def test_zero(self): # carg(-0 +- 0i) returns +- pi check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) # carg(+0 +- 0i) returns +- 0 check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) # carg(x +- 0i) returns +- 0 for x > 0 check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) # carg(x +- 0i) returns +- pi for x < 0 check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) # carg(+- 0 + yi) returns pi/2 for y > 0 check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) # carg(+- 0 + yi) returns -pi/2 for y < 0 check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 check_real_value(ncu._arg, -np.inf, 1, np.pi, False) check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) # carg(np.inf +- yi) returns +-0 for finite y > 0 check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) # carg(x +- np.infi) returns +-pi/2 for finite x check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) # carg(-np.inf +- np.infi) returns +-3pi/4 check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) # carg(np.inf +- np.infi) returns +-pi/4 check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) # carg(x + yi) returns np.nan if x or y is nan check_real_value(ncu._arg, np.nan, 0, np.nan, False) check_real_value(ncu._arg, 0, np.nan, np.nan, False) check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2)
pizzathief/numpy
numpy/core/tests/test_umath_complex.py
numpy/core/_add_newdocs.py
"""============================= Subclassing ndarray in python ============================= Introduction ------------ Subclassing ndarray is relatively simple, but it has some complications compared to other Python objects. On this page we explain the machinery that allows you to subclass ndarray, and the implications for implementing a subclass. ndarrays and object creation ============================ Subclassing ndarray is complicated by the fact that new instances of ndarray classes can come about in three different ways. These are: #. Explicit constructor call - as in ``MySubClass(params)``. This is the usual route to Python instance creation. #. View casting - casting an existing ndarray as a given subclass #. New from template - creating a new instance from a template instance. Examples include returning slices from a subclassed array, creating return types from ufuncs, and copying arrays. See :ref:`new-from-template` for more details The last two are characteristics of ndarrays - in order to support things like array slicing. The complications of subclassing ndarray are due to the mechanisms numpy has to support these latter two routes of instance creation. .. _view-casting: View casting ------------ *View casting* is the standard ndarray mechanism by which you take an ndarray of any subclass, and return a view of the array as another (specified) subclass: >>> import numpy as np >>> # create a completely useless ndarray subclass >>> class C(np.ndarray): pass >>> # create a standard ndarray >>> arr = np.zeros((3,)) >>> # take a view of it, as our useless subclass >>> c_arr = arr.view(C) >>> type(c_arr) <class 'C'> .. _new-from-template: Creating new from template -------------------------- New instances of an ndarray subclass can also come about by a very similar mechanism to :ref:`view-casting`, when numpy finds it needs to create a new instance from a template instance. The most obvious place this has to happen is when you are taking slices of subclassed arrays. For example: >>> v = c_arr[1:] >>> type(v) # the view is of type 'C' <class 'C'> >>> v is c_arr # but it's a new instance False The slice is a *view* onto the original ``c_arr`` data. So, when we take a view from the ndarray, we return a new ndarray, of the same class, that points to the data in the original. There are other points in the use of ndarrays where we need such views, such as copying arrays (``c_arr.copy()``), creating ufunc output arrays (see also :ref:`array-wrap`), and reducing methods (like ``c_arr.mean()``. Relationship of view casting and new-from-template -------------------------------------------------- These paths both use the same machinery. We make the distinction here, because they result in different input to your methods. Specifically, :ref:`view-casting` means you have created a new instance of your array type from any potential subclass of ndarray. :ref:`new-from-template` means you have created a new instance of your class from a pre-existing instance, allowing you - for example - to copy across attributes that are particular to your subclass. Implications for subclassing ---------------------------- If we subclass ndarray, we need to deal not only with explicit construction of our array type, but also :ref:`view-casting` or :ref:`new-from-template`. NumPy has the machinery to do this, and this machinery that makes subclassing slightly non-standard. There are two aspects to the machinery that ndarray uses to support views and new-from-template in subclasses. The first is the use of the ``ndarray.__new__`` method for the main work of object initialization, rather then the more usual ``__init__`` method. The second is the use of the ``__array_finalize__`` method to allow subclasses to clean up after the creation of views and new instances from templates. A brief Python primer on ``__new__`` and ``__init__`` ===================================================== ``__new__`` is a standard Python method, and, if present, is called before ``__init__`` when we create a class instance. See the `python __new__ documentation <https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail. For example, consider the following Python code: .. testcode:: class C(object): def __new__(cls, *args): print('Cls in __new__:', cls) print('Args in __new__:', args) # The `object` type __new__ method takes a single argument. return object.__new__(cls) def __init__(self, *args): print('type(self) in __init__:', type(self)) print('Args in __init__:', args) meaning that we get: >>> c = C('hello') Cls in __new__: <class 'C'> Args in __new__: ('hello',) type(self) in __init__: <class 'C'> Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class as first argument, and the passed argument, which is the string ``'hello'``. After python calls ``__new__``, it usually (see below) calls our ``__init__`` method, with the output of ``__new__`` as the first argument (now a class instance), and the passed arguments following. As you can see, the object can be initialized in the ``__new__`` method or the ``__init__`` method, or both, and in fact ndarray does not have an ``__init__`` method, because all the initialization is done in the ``__new__`` method. Why use ``__new__`` rather than just the usual ``__init__``? Because in some cases, as for ndarray, we want to be able to return an object of some other class. Consider the following: .. testcode:: class D(C): def __new__(cls, *args): print('D cls is:', cls) print('D args in __new__:', args) return C.__new__(C, *args) def __init__(self, *args): # we never get here print('In D __init__') meaning that: >>> obj = D('hello') D cls is: <class 'D'> D args in __new__: ('hello',) Cls in __new__: <class 'C'> Args in __new__: ('hello',) >>> type(obj) <class 'C'> The definition of ``C`` is the same as before, but for ``D``, the ``__new__`` method returns an instance of class ``C`` rather than ``D``. Note that the ``__init__`` method of ``D`` does not get called. In general, when the ``__new__`` method returns an object of class other than the class in which it is defined, the ``__init__`` method of that class is not called. This is how subclasses of the ndarray class are able to return views that preserve the class type. When taking a view, the standard ndarray machinery creates the new ndarray object with something like:: obj = ndarray.__new__(subtype, shape, ... where ``subdtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now we have a new problem. The machinery of ndarray can set the class this way, in its standard methods for taking views, but the ndarray ``__new__`` method knows nothing of what we have done in our own ``__new__`` method in order to set attributes, and so on. (Aside - why not call ``obj = subdtype.__new__(...`` then? Because we may not have a ``__new__`` method with the same call signature). The role of ``__array_finalize__`` ================================== ``__array_finalize__`` is the mechanism that numpy provides to allow subclasses to handle the various ways that new instances get created. Remember that subclass instances can come about in these three ways: #. explicit constructor call (``obj = MySubClass(params)``). This will call the usual sequence of ``MySubClass.__new__`` then (if it exists) ``MySubClass.__init__``. #. :ref:`view-casting` #. :ref:`new-from-template` Our ``MySubClass.__new__`` method only gets called in the case of the explicit constructor call, so we can't rely on ``MySubClass.__new__`` or ``MySubClass.__init__`` to deal with the view casting and new-from-template. It turns out that ``MySubClass.__array_finalize__`` *does* get called for all three methods of object creation, so this is where our object creation housekeeping usually goes. * For the explicit constructor call, our subclass will need to create a new ndarray instance of its own class. In practice this means that we, the authors of the code, will need to make a call to ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an existing array (see below) * For view casting and new-from-template, the equivalent of ``ndarray.__new__(MySubClass,...`` is called, at the C level. The arguments that ``__array_finalize__`` receives differ for the three methods of instance creation above. The following code allows us to look at the call sequences and arguments: .. testcode:: import numpy as np class C(np.ndarray): def __new__(cls, *args, **kwargs): print('In __new__ with class %s' % cls) return super(C, cls).__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ # method for your subclass print('In __init__ with class %s' % self.__class__) def __array_finalize__(self, obj): print('In array_finalize:') print(' self type is %s' % type(self)) print(' obj type is %s' % type(obj)) Now: >>> # Explicit constructor >>> c = C((10,)) In __new__ with class <class 'C'> In array_finalize: self type is <class 'C'> obj type is <type 'NoneType'> In __init__ with class <class 'C'> >>> # View casting >>> a = np.arange(10) >>> cast_a = a.view(C) In array_finalize: self type is <class 'C'> obj type is <type 'numpy.ndarray'> >>> # Slicing (example of new-from-template) >>> cv = c[:1] In array_finalize: self type is <class 'C'> obj type is <class 'C'> The signature of ``__array_finalize__`` is:: def __array_finalize__(self, obj): One sees that the ``super`` call, which goes to ``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our own class (``self``) as well as the object from which the view has been taken (``obj``). As you can see from the output above, the ``self`` is always a newly created instance of our subclass, and the type of ``obj`` differs for the three instance creation methods: * When called from the explicit constructor, ``obj`` is ``None`` * When called from view casting, ``obj`` can be an instance of any subclass of ndarray, including our own. * When called in new-from-template, ``obj`` is another instance of our own subclass, that we might use to update the new ``self`` instance. Because ``__array_finalize__`` is the only method that always sees new instances being created, it is the sensible place to fill in instance defaults for new object attributes, among other tasks. This may be clearer with an example. Simple example - adding an extra attribute to ndarray ----------------------------------------------------- .. testcode:: import numpy as np class InfoArray(np.ndarray): def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard # ndarray constructor, but return an object of our type. # It also triggers a call to InfoArray.__array_finalize__ obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, buffer, offset, strides, order) # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # ``self`` is a new object resulting from # ndarray.__new__(InfoArray, ...), therefore it only has # attributes that the ndarray.__new__ constructor gave it - # i.e. those of a standard ndarray. # # We could have got to the ndarray.__new__ call in 3 ways: # From an explicit constructor - e.g. InfoArray(): # obj is None # (we're in the middle of the InfoArray.__new__ # constructor, and self.info will be set when we return to # InfoArray.__new__) if obj is None: return # From view casting - e.g arr.view(InfoArray): # obj is arr # (type(obj) can be InfoArray) # From new-from-template - e.g infoarr[:3] # type(obj) is InfoArray # # Note that it is here, rather than in the __new__ method, # that we set the default value for 'info', because this # method sees all creation of default objects - with the # InfoArray.__new__ constructor, but also with # arr.view(InfoArray). self.info = getattr(obj, 'info', None) # We do not need to return anything Using the object looks like this: >>> obj = InfoArray(shape=(3,)) # explicit constructor >>> type(obj) <class 'InfoArray'> >>> obj.info is None True >>> obj = InfoArray(shape=(3,), info='information') >>> obj.info 'information' >>> v = obj[1:] # new-from-template - here - slicing >>> type(v) <class 'InfoArray'> >>> v.info 'information' >>> arr = np.arange(10) >>> cast_arr = arr.view(InfoArray) # view casting >>> type(cast_arr) <class 'InfoArray'> >>> cast_arr.info is None True This class isn't very useful, because it has the same constructor as the bare ndarray object, including passing in buffers and shapes and so on. We would probably prefer the constructor to be able to take an already formed ndarray from the usual numpy calls to ``np.array`` and return an object. Slightly more realistic example - attribute added to existing array ------------------------------------------------------------------- Here is a class that takes a standard ndarray that already exists, casts as our type, and adds an extra attribute. .. testcode:: import numpy as np class RealisticInfoArray(np.ndarray): def __new__(cls, input_array, info=None): # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array).view(cls) # add the new attribute to the created instance obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.info = getattr(obj, 'info', None) So: >>> arr = np.arange(5) >>> obj = RealisticInfoArray(arr, info='information') >>> type(obj) <class 'RealisticInfoArray'> >>> obj.info 'information' >>> v = obj[1:] >>> type(v) <class 'RealisticInfoArray'> >>> v.info 'information' .. _array-ufunc: ``__array_ufunc__`` for ufuncs ------------------------------ .. versionadded:: 1.13 A subclass can override what happens when executing numpy ufuncs on it by overriding the default ``ndarray.__array_ufunc__`` method. This method is executed *instead* of the ufunc and should return either the result of the operation, or :obj:`NotImplemented` if the operation requested is not implemented. The signature of ``__array_ufunc__`` is:: def __array_ufunc__(ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either ``"__call__"`` to indicate it was called directly, or one of its :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``, ``"reduceat"``, ``"outer"``, or ``"at"``. - *inputs* is a tuple of the input arguments to the ``ufunc`` - *kwargs* contains any optional or keyword arguments passed to the function. This includes any ``out`` arguments, which are always contained in a tuple. A typical implementation would convert any inputs or outputs that are instances of one's own class, pass everything on to a superclass using ``super()``, and finally return the results after possible back-conversion. An example, taken from the test case ``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the following. .. testcode:: input numpy as np class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): args = [] in_no = [] for i, input_ in enumerate(inputs): if isinstance(input_, A): in_no.append(i) args.append(input_.view(np.ndarray)) else: args.append(input_) outputs = kwargs.pop('out', None) out_no = [] if outputs: out_args = [] for j, output in enumerate(outputs): if isinstance(output, A): out_no.append(j) out_args.append(output.view(np.ndarray)) else: out_args.append(output) kwargs['out'] = tuple(out_args) else: outputs = (None,) * ufunc.nout info = {} if in_no: info['inputs'] = in_no if out_no: info['outputs'] = out_no results = super(A, self).__array_ufunc__(ufunc, method, *args, **kwargs) if results is NotImplemented: return NotImplemented if method == 'at': if isinstance(inputs[0], A): inputs[0].info = info return if ufunc.nout == 1: results = (results,) results = tuple((np.asarray(result).view(A) if output is None else output) for result, output in zip(results, outputs)) if results and isinstance(results[0], A): results[0].info = info return results[0] if len(results) == 1 else results So, this class does not actually do anything interesting: it just converts any instances of its own to regular ndarray (otherwise, we'd get infinite recursion!), and adds an ``info`` dictionary that tells which inputs and outputs it converted. Hence, e.g., >>> a = np.arange(5.).view(A) >>> b = np.sin(a) >>> b.info {'inputs': [0]} >>> b = np.sin(np.arange(5.), out=(a,)) >>> b.info {'outputs': [0]} >>> a = np.arange(5.).view(A) >>> b = np.ones(1).view(A) >>> c = a + b >>> c.info {'inputs': [0, 1]} >>> a += b >>> a.info {'inputs': [0, 1], 'outputs': [0]} Note that another approach would be to to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand also defines ``__array_ufunc__``. E.g., lets assume that we evalulate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which means it cannot evaluate the result itself. Thus, it will return `NotImplemented` and so will our class ``A``. Then, control will be passed over to ``b``, which either knows how to deal with us and produces a result, or does not and returns `NotImplemented`, raising a ``TypeError``. If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` will be called, but now it sees an ``ndarray`` as the other argument. Likely, it will know how to handle this, and return a new instance of the ``B`` class to us. Our example class is not set up to handle this, but it might well be the best approach if, e.g., one were to re-implement ``MaskedArray`` using ``__array_ufunc__``. As a final note: if the ``super`` route is suited to a given class, an advantage of using it is that it helps in constructing class hierarchies. E.g., suppose that our other class ``B`` also used the ``super`` in its ``__array_ufunc__`` implementation, and we created a class ``C`` that depended on both, i.e., ``class C(A, B)`` (with, for simplicity, not another ``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to ``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to ``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. .. _array-wrap: ``__array_wrap__`` for ufuncs and other functions ------------------------------------------------- Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using ``__array_wrap__`` and ``__array_prepare__``. These two allowed one to change the output type of a ufunc, but, in contrast to ``__array_ufunc__``, did not allow one to make any changes to the inputs. It is hoped to eventually deprecate these, but ``__array_wrap__`` is also used by other numpy functions and methods, such as ``squeeze``, so at the present time is still needed for full functionality. Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of allowing a subclass to set the type of the return value and update attributes and metadata. Let's show how this works with an example. First we return to the simpler example subclass, but with a different name and some print statements: .. testcode:: import numpy as np class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def __array_finalize__(self, obj): print('In __array_finalize__:') print(' self is %s' % repr(self)) print(' obj is %s' % repr(obj)) if obj is None: return self.info = getattr(obj, 'info', None) def __array_wrap__(self, out_arr, context=None): print('In __array_wrap__:') print(' self is %s' % repr(self)) print(' arr is %s' % repr(out_arr)) # then just call the parent return super(MySubClass, self).__array_wrap__(self, out_arr, context) We run a ufunc on an instance of our new array: >>> obj = MySubClass(np.arange(5), info='spam') In __array_finalize__: self is MySubClass([0, 1, 2, 3, 4]) obj is array([0, 1, 2, 3, 4]) >>> arr2 = np.arange(5)+1 >>> ret = np.add(arr2, obj) In __array_wrap__: self is MySubClass([0, 1, 2, 3, 4]) arr is array([1, 3, 5, 7, 9]) In __array_finalize__: self is MySubClass([1, 3, 5, 7, 9]) obj is MySubClass([0, 1, 2, 3, 4]) >>> ret MySubClass([1, 3, 5, 7, 9]) >>> ret.info 'spam' Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result of the addition. In turn, the default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, and called ``__array_finalize__`` - hence the copying of the ``info`` attribute. This has all happened at the C level. But, we could do anything we wanted: .. testcode:: class SillySubClass(np.ndarray): def __array_wrap__(self, arr, context=None): return 'I lost your data' >>> arr1 = np.arange(5) >>> obj = arr1.view(SillySubClass) >>> arr2 = np.arange(5) >>> ret = np.multiply(obj, arr2) >>> ret 'I lost your data' So, by defining a specific ``__array_wrap__`` method for our subclass, we can tweak the output from ufuncs. The ``__array_wrap__`` method requires ``self``, then an argument - which is the result of the ufunc - and an optional parameter *context*. This parameter is returned by ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, domain of the ufunc), but is not set by other numpy functions. Though, as seen above, it is possible to do otherwise, ``__array_wrap__`` should return an instance of its containing class. See the masked array subclass for an implementation. In addition to ``__array_wrap__``, which is called on the way out of the ufunc, there is also an ``__array_prepare__`` method which is called on the way into the ufunc, after the output arrays are created but before any computation has been performed. The default implementation does nothing but pass through the array. ``__array_prepare__`` should not attempt to access the array data or resize the array, it is intended for setting the output array type, updating attributes and metadata, and performing any checks based on the input that may be desired before computation begins. Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or subclass thereof or raise an error. Extra gotchas - custom ``__del__`` methods and ndarray.base ----------------------------------------------------------- One of the problems that ndarray solves is keeping track of memory ownership of ndarrays and their views. Consider the case where we have created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. The two objects are looking at the same memory. NumPy keeps track of where the data came from for a particular array or view, with the ``base`` attribute: >>> # A normal ndarray, that owns its own data >>> arr = np.zeros((4,)) >>> # In this case, base is None >>> arr.base is None True >>> # We take a view >>> v1 = arr[1:] >>> # base now points to the array that it derived from >>> v1.base is arr True >>> # Take a view of a view >>> v2 = v1[1:] >>> # base points to the view it derived from >>> v2.base is v1 True In general, if the array owns its own memory, as for ``arr`` in this case, then ``arr.base`` will be None - there are some exceptions to this - see the numpy book for more details. The ``base`` attribute is useful in being able to tell whether we have a view or the original array. This in turn can be useful if we need to know whether or not to do some specific cleanup when the subclassed array is deleted. For example, we may only want to do the cleanup if the original array is deleted, but not the views. For an example of how this can work, have a look at the ``memmap`` class in ``numpy.core``. Subclassing and Downstream Compatibility ---------------------------------------- When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` interface, it is your responsibility to decide how aligned your APIs will be with those of numpy. For convenience, many numpy functions that have a corresponding ``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking if the first argument to a function has a method of the same name. If it exists, the method is called instead of coercing the arguments to a numpy array. For example, if you want your sub-class or duck-type to be compatible with numpy's ``sum`` function, the method signature for this object's ``sum`` method should be the following: .. testcode:: def sum(self, axis=None, dtype=None, out=None, keepdims=False): ... This is the exact same method signature for ``np.sum``, so now if a user calls ``np.sum`` on this object, numpy will call the object's own ``sum`` method and pass in these arguments enumerated above in the signature, and no errors will be raised because the signatures are completely compatible with each other. If, however, you decide to deviate from this signature and do something like this: .. testcode:: def sum(self, axis=None, dtype=None): ... This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError to be raised. If you wish to maintain compatibility with numpy and its subsequent versions (which might add new keyword arguments) but do not want to surface all of numpy's arguments, your function's signature should accept ``**kwargs``. For example: .. testcode:: def sum(self, axis=None, dtype=None, **unused_kwargs): ... This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. """ from __future__ import division, absolute_import, print_function
from __future__ import division, absolute_import, print_function import sys import platform import pytest import numpy as np # import the c-extension module directly since _arg is not exported via umath import numpy.core._multiarray_umath as ncu from numpy.testing import ( assert_raises, assert_equal, assert_array_equal, assert_almost_equal ) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. #FIXME: this will probably change when we require full C99 campatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) # TODO This can be xfail when the generator functions are got rid of. platform_skip = pytest.mark.skipif(xfail_complex_tests, reason="Inadequate C99 complex support") class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp check(f, 1, 0, np.exp(1), 0, False) check(f, 0, 1, np.cos(1), np.sin(1), False) ref = np.exp(1) * complex(np.cos(1), np.sin(1)) check(f, 1, 1, ref.real, ref.imag, False) @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i check(f, np.PZERO, 0, 1, 0, False) check(f, np.NZERO, 0, 1, 0, False) # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception check(f, 1, np.inf, np.nan, np.nan) check(f, -1, np.inf, np.nan, np.nan) check(f, 0, np.inf, np.nan, np.nan) # cexp(inf + 0i) is inf + 0i check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y check(f, -np.inf, 1, np.PZERO, np.PZERO) check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_inf(None) # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_inf(None) # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_nan(None) # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, -1, np.nan, np.nan) check(f, np.nan, np.inf, np.nan, np.nan) check(f, np.nan, -np.inf, np.nan, np.nan) # cexp(nan + nani) is nan + nani check(f, np.nan, np.nan, np.nan, np.nan) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i check = check_complex_value f = np.exp check(f, np.nan, 0, np.nan, 0) class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex(-np.inf, np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex(-np.inf, 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=complex) y = complex(np.nan, np.nan) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=complex) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) class TestCsqrt(object): def test_simple(self): # sqrt(1) check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) rres = 0.5*np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) # sqrt(-1) check_complex_value(np.sqrt, -1, 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): # C99: Sec G 6.4.2 check = check_complex_value f = np.sqrt # csqrt(+-0 + 0i) is 0 + 0i check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) # csqrt(x + infi) is inf + infi for any x (including NaN) check(f, 1, np.inf, np.inf, np.inf) check(f, -1, np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, -np.inf, np.inf, np.inf, np.inf) check(f, -np.nan, np.inf, np.inf, np.inf) # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) check(f, -1, np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) # csqrt(-inf + yi) is +0 + infi for any finite y > 0 check(f, -np.inf, 1, np.PZERO, np.inf) # csqrt(inf + yi) is +inf + 0i for any finite y > 0 check(f, np.inf, 1, np.inf, np.PZERO) # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # csqrt(+inf + nani) is inf + nani check(f, np.inf, np.nan, np.inf, np.nan) # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan) # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) check_real_value(np.abs, np.nan, np.nan, np.nan) x.append(np.nan) y.append(-np.nan) check_real_value(np.abs, -np.nan, np.nan, np.nan) # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) check_real_value(np.abs, np.inf, np.nan, np.inf) x.append(-np.inf) y.append(np.nan) check_real_value(np.abs, -np.inf, np.nan, np.inf) # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(complex(a, b)) xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) check_real_value(f, x[i], y[i], ref) class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip( reason="Complex arithmetic with signed zero fails on most platforms") def test_zero(self): # carg(-0 +- 0i) returns +- pi check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) # carg(+0 +- 0i) returns +- 0 check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) # carg(x +- 0i) returns +- 0 for x > 0 check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) # carg(x +- 0i) returns +- pi for x < 0 check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) # carg(+- 0 + yi) returns pi/2 for y > 0 check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) # carg(+- 0 + yi) returns -pi/2 for y < 0 check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 check_real_value(ncu._arg, -np.inf, 1, np.pi, False) check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) # carg(np.inf +- yi) returns +-0 for finite y > 0 check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) # carg(x +- np.infi) returns +-pi/2 for finite x check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) # carg(-np.inf +- np.infi) returns +-3pi/4 check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) # carg(np.inf +- np.infi) returns +-pi/4 check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) # carg(x + yi) returns np.nan if x or y is nan check_real_value(ncu._arg, np.nan, 0, np.nan, False) check_real_value(ncu._arg, 0, np.nan, np.nan, False) check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2)
pizzathief/numpy
numpy/core/tests/test_umath_complex.py
numpy/doc/subclassing.py
""" Basic functions used by several sub-packages and useful to have in the main name-space. Type Handling ------------- ================ =================== iscomplexobj Test for complex object, scalar result isrealobj Test for real object, scalar result iscomplex Test for complex elements, array result isreal Test for real elements, array result imag Imaginary part real Real part real_if_close Turns complex number with tiny imaginary part to real isneginf Tests for negative infinity, array result isposinf Tests for positive infinity, array result isnan Tests for nans, array result isinf Tests for infinity, array result isfinite Tests for finite numbers, array result isscalar True if argument is a scalar nan_to_num Replaces NaN's with 0 and infinities with large numbers cast Dictionary of functions to force cast to each type common_type Determine the minimum common type code for a group of arrays mintypecode Return minimal allowed common typecode. ================ =================== Index Tricks ------------ ================ =================== mgrid Method which allows easy construction of N-d 'mesh-grids' ``r_`` Append and construct arrays: turns slice objects into ranges and concatenates them, for 2d arrays appends rows. index_exp Konrad Hinsen's index_expression class instance which can be useful for building complicated slicing syntax. ================ =================== Useful Functions ---------------- ================ =================== select Extension of where to multiple conditions and choices extract Extract 1d array from flattened array according to mask insert Insert 1d array of values into Nd array according to mask linspace Evenly spaced samples in linear space logspace Evenly spaced samples in logarithmic space fix Round x to nearest integer towards zero mod Modulo mod(x,y) = x % y except keeps sign of y amax Array maximum along axis amin Array minimum along axis ptp Array max-min along axis cumsum Cumulative sum along axis prod Product of elements along axis cumprod Cumluative product along axis diff Discrete differences along axis angle Returns angle of complex argument unwrap Unwrap phase along given axis (1-d algorithm) sort_complex Sort a complex-array (based on real, then imaginary) trim_zeros Trim the leading and trailing zeros from 1D array. vectorize A class that wraps a Python function taking scalar arguments into a generalized function which can handle arrays of arguments using the broadcast rules of numerix Python. ================ =================== Shape Manipulation ------------------ ================ =================== squeeze Return a with length-one dimensions removed. atleast_1d Force arrays to be >= 1D atleast_2d Force arrays to be >= 2D atleast_3d Force arrays to be >= 3D vstack Stack arrays vertically (row on row) hstack Stack arrays horizontally (column on column) column_stack Stack 1D arrays as columns into 2D array dstack Stack arrays depthwise (along third dimension) stack Stack arrays along a new axis split Divide array into a list of sub-arrays hsplit Split into columns vsplit Split into rows dsplit Split along third dimension ================ =================== Matrix (2D Array) Manipulations ------------------------------- ================ =================== fliplr 2D array with columns flipped flipud 2D array with rows flipped rot90 Rotate a 2D array a multiple of 90 degrees eye Return a 2D array with ones down a given diagonal diag Construct a 2D array from a vector, or return a given diagonal from a 2D array. mat Construct a Matrix bmat Build a Matrix from blocks ================ =================== Polynomials ----------- ================ =================== poly1d A one-dimensional polynomial class poly Return polynomial coefficients from roots roots Find roots of polynomial given coefficients polyint Integrate polynomial polyder Differentiate polynomial polyadd Add polynomials polysub Subtract polynomials polymul Multiply polynomials polydiv Divide polynomials polyval Evaluate polynomial at given argument ================ =================== Iterators --------- ================ =================== Arrayterator A buffered iterator for big arrays. ================ =================== Import Tricks ------------- ================ =================== ppimport Postpone module import until trying to use it ppimport_attr Postpone module import until trying to use its attribute ppresolve Import postponed module and return it. ================ =================== Machine Arithmetics ------------------- ================ =================== machar_single Single precision floating point arithmetic parameters machar_double Double precision floating point arithmetic parameters ================ =================== Threading Tricks ---------------- ================ =================== ParallelExec Execute commands in parallel thread. ================ =================== Array Set Operations ----------------------- Set operations for numeric arrays based on sort() function. ================ =================== unique Unique elements of an array. isin Test whether each element of an ND array is present anywhere within a second array. ediff1d Array difference (auxiliary function). intersect1d Intersection of 1D arrays with unique elements. setxor1d Set exclusive-or of 1D arrays with unique elements. in1d Test whether elements in a 1D array are also present in another array. union1d Union of 1D arrays with unique elements. setdiff1d Set difference of 1D arrays with unique elements. ================ =================== """ from __future__ import division, absolute_import, print_function depends = ['core', 'testing'] global_symbols = ['*']
from __future__ import division, absolute_import, print_function import sys import platform import pytest import numpy as np # import the c-extension module directly since _arg is not exported via umath import numpy.core._multiarray_umath as ncu from numpy.testing import ( assert_raises, assert_equal, assert_array_equal, assert_almost_equal ) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. #FIXME: this will probably change when we require full C99 campatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) # TODO This can be xfail when the generator functions are got rid of. platform_skip = pytest.mark.skipif(xfail_complex_tests, reason="Inadequate C99 complex support") class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp check(f, 1, 0, np.exp(1), 0, False) check(f, 0, 1, np.cos(1), np.sin(1), False) ref = np.exp(1) * complex(np.cos(1), np.sin(1)) check(f, 1, 1, ref.real, ref.imag, False) @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i check(f, np.PZERO, 0, 1, 0, False) check(f, np.NZERO, 0, 1, 0, False) # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception check(f, 1, np.inf, np.nan, np.nan) check(f, -1, np.inf, np.nan, np.nan) check(f, 0, np.inf, np.nan, np.nan) # cexp(inf + 0i) is inf + 0i check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y check(f, -np.inf, 1, np.PZERO, np.PZERO) check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_inf(None) # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_inf(None) # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_nan(None) # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, -1, np.nan, np.nan) check(f, np.nan, np.inf, np.nan, np.nan) check(f, np.nan, -np.inf, np.nan, np.nan) # cexp(nan + nani) is nan + nani check(f, np.nan, np.nan, np.nan, np.nan) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i check = check_complex_value f = np.exp check(f, np.nan, 0, np.nan, 0) class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex(-np.inf, np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex(-np.inf, 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=complex) y = complex(np.nan, np.nan) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=complex) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) class TestCsqrt(object): def test_simple(self): # sqrt(1) check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) rres = 0.5*np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) # sqrt(-1) check_complex_value(np.sqrt, -1, 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): # C99: Sec G 6.4.2 check = check_complex_value f = np.sqrt # csqrt(+-0 + 0i) is 0 + 0i check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) # csqrt(x + infi) is inf + infi for any x (including NaN) check(f, 1, np.inf, np.inf, np.inf) check(f, -1, np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, -np.inf, np.inf, np.inf, np.inf) check(f, -np.nan, np.inf, np.inf, np.inf) # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) check(f, -1, np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) # csqrt(-inf + yi) is +0 + infi for any finite y > 0 check(f, -np.inf, 1, np.PZERO, np.inf) # csqrt(inf + yi) is +inf + 0i for any finite y > 0 check(f, np.inf, 1, np.inf, np.PZERO) # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # csqrt(+inf + nani) is inf + nani check(f, np.inf, np.nan, np.inf, np.nan) # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan) # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) check_real_value(np.abs, np.nan, np.nan, np.nan) x.append(np.nan) y.append(-np.nan) check_real_value(np.abs, -np.nan, np.nan, np.nan) # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) check_real_value(np.abs, np.inf, np.nan, np.inf) x.append(-np.inf) y.append(np.nan) check_real_value(np.abs, -np.inf, np.nan, np.inf) # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(complex(a, b)) xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) check_real_value(f, x[i], y[i], ref) class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip( reason="Complex arithmetic with signed zero fails on most platforms") def test_zero(self): # carg(-0 +- 0i) returns +- pi check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) # carg(+0 +- 0i) returns +- 0 check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) # carg(x +- 0i) returns +- 0 for x > 0 check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) # carg(x +- 0i) returns +- pi for x < 0 check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) # carg(+- 0 + yi) returns pi/2 for y > 0 check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) # carg(+- 0 + yi) returns -pi/2 for y < 0 check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 check_real_value(ncu._arg, -np.inf, 1, np.pi, False) check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) # carg(np.inf +- yi) returns +-0 for finite y > 0 check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) # carg(x +- np.infi) returns +-pi/2 for finite x check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) # carg(-np.inf +- np.infi) returns +-3pi/4 check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) # carg(np.inf +- np.infi) returns +-pi/4 check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) # carg(x + yi) returns np.nan if x or y is nan check_real_value(ncu._arg, np.nan, 0, np.nan, False) check_real_value(ncu._arg, 0, np.nan, np.nan, False) check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2)
pizzathief/numpy
numpy/core/tests/test_umath_complex.py
numpy/lib/info.py
#!/usr/bin/env python """ C declarations, CPP macros, and C functions for f2py2e. Only required declarations/macros/functions will be used. Copyright 1999,2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/05/06 11:42:34 $ Pearu Peterson """ from __future__ import division, absolute_import, print_function import sys import copy from . import __version__ f2py_version = __version__.version errmess = sys.stderr.write ##################### Definitions ################## outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], 'userincludes': [], 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], 'commonhooks': []} needs = {} includes0 = {'includes0': '/*need_includes0*/'} includes = {'includes': '/*need_includes*/'} userincludes = {'userincludes': '/*need_userincludes*/'} typedefs = {'typedefs': '/*need_typedefs*/'} typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} cppmacros = {'cppmacros': '/*need_cppmacros*/'} cfuncs = {'cfuncs': '/*need_cfuncs*/'} callbacks = {'callbacks': '/*need_callbacks*/'} f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', 'initf90modhooksstatic': '/*initf90modhooksstatic*/', 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', } commonhooks = {'commonhooks': '/*need_commonhooks*/', 'initcommonhooks': '/*need_initcommonhooks*/', } ############ Includes ################### includes0['math.h'] = '#include <math.h>' includes0['string.h'] = '#include <string.h>' includes0['setjmp.h'] = '#include <setjmp.h>' includes['Python.h'] = '#include "Python.h"' needs['arrayobject.h'] = ['Python.h'] includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API #include "arrayobject.h"''' includes['arrayobject.h'] = '#include "fortranobject.h"' includes['stdarg.h'] = '#include <stdarg.h>' ############# Type definitions ############### typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' typedefs['signed_char'] = 'typedef signed char signed_char;' typedefs['long_long'] = """\ #ifdef _WIN32 typedef __int64 long_long; #else typedef long long long_long; typedef unsigned long long unsigned_long_long; #endif """ typedefs['unsigned_long_long'] = """\ #ifdef _WIN32 typedef __uint64 long_long; #else typedef unsigned long long unsigned_long_long; #endif """ typedefs['long_double'] = """\ #ifndef _LONG_DOUBLE typedef long double long_double; #endif """ typedefs[ 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' typedefs['string'] = """typedef char * string;""" ############### CPP macros #################### cppmacros['CFUNCSMESS'] = """\ #ifdef DEBUGCFUNCS #define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); #define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ fprintf(stderr,\"\\n\"); #else #define CFUNCSMESS(mess) #define CFUNCSMESSPY(mess,obj) #endif """ cppmacros['F_FUNC'] = """\ #if defined(PREPEND_FORTRAN) #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) _##F #else #define F_FUNC(f,F) _##f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) _##F##_ #else #define F_FUNC(f,F) _##f##_ #endif #endif #else #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) F #else #define F_FUNC(f,F) f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) F##_ #else #define F_FUNC(f,F) f##_ #endif #endif #endif #if defined(UNDERSCORE_G77) #define F_FUNC_US(f,F) F_FUNC(f##_,F##_) #else #define F_FUNC_US(f,F) F_FUNC(f,F) #endif """ cppmacros['F_WRAPPEDFUNC'] = """\ #if defined(PREPEND_FORTRAN) #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F #else #define F_WRAPPEDFUNC(f,F) _f2pywrap##f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ #else #define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ #endif #endif #else #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) F2PYWRAP##F #else #define F_WRAPPEDFUNC(f,F) f2pywrap##f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ #else #define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ #endif #endif #endif #if defined(UNDERSCORE_G77) #define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) #else #define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) #endif """ cppmacros['F_MODFUNC'] = """\ #if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ #if defined(NO_APPEND_FORTRAN) #define F_MODFUNCNAME(m,f) $ ## m ## $ ## f #else #define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ #endif #endif #if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ #if defined(NO_APPEND_FORTRAN) #define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f #else #define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ #endif #endif #if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ #if defined(NO_APPEND_FORTRAN) #define F_MODFUNCNAME(m,f) f ## .in. ## m #else #define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ #endif #endif /* #if defined(UPPERCASE_FORTRAN) #define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) #else #define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) #endif */ #define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) """ cppmacros['SWAPUNSAFE'] = """\ #define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) """ cppmacros['SWAP'] = """\ #define SWAP(a,b,t) {\\ t *c;\\ c = a;\\ a = b;\\ b = c;} """ # cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & # NPY_ARRAY_C_CONTIGUOUS)' cppmacros['PRINTPYOBJERR'] = """\ #define PRINTPYOBJERR(obj)\\ fprintf(stderr,\"#modulename#.error is related to \");\\ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ fprintf(stderr,\"\\n\"); """ cppmacros['MINMAX'] = """\ #ifndef max #define max(a,b) ((a > b) ? (a) : (b)) #endif #ifndef min #define min(a,b) ((a < b) ? (a) : (b)) #endif #ifndef MAX #define MAX(a,b) ((a > b) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) ((a < b) ? (a) : (b)) #endif """ needs['len..'] = ['f2py_size'] cppmacros['len..'] = """\ #define rank(var) var ## _Rank #define shape(var,dim) var ## _Dims[dim] #define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp))) #define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim) #define fshape(var,dim) shape(var,rank(var)-dim-1) #define len(var) shape(var,0) #define flen(var) fshape(var,0) #define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) /* #define index(i) capi_i ## i */ #define slen(var) capi_ ## var ## _len #define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) """ needs['f2py_size'] = ['stdarg.h'] cfuncs['f2py_size'] = """\ static int f2py_size(PyArrayObject* var, ...) { npy_int sz = 0; npy_int dim; npy_int rank; va_list argp; va_start(argp, var); dim = va_arg(argp, npy_int); if (dim==-1) { sz = PyArray_SIZE(var); } else { rank = PyArray_NDIM(var); if (dim>=1 && dim<=rank) sz = PyArray_DIM(var, dim-1); else fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); } va_end(argp); return sz; } """ cppmacros[ 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyInt_FromLong(v))' cppmacros[ 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyInt_FromLong(v))' needs['pyobj_from_int1'] = ['signed_char'] cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyInt_FromLong(v))' cppmacros[ 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))' needs['pyobj_from_long_long1'] = ['long_long'] cppmacros['pyobj_from_long_long1'] = """\ #ifdef HAVE_LONG_LONG #define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) #else #warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. #define pyobj_from_long_long1(v) (PyLong_FromLong(v)) #endif """ needs['pyobj_from_long_double1'] = ['long_double'] cppmacros[ 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' cppmacros[ 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' cppmacros[ 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] cppmacros[ 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' needs['pyobj_from_complex_double1'] = ['complex_double'] cppmacros[ 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' needs['pyobj_from_complex_float1'] = ['complex_float'] cppmacros[ 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' needs['pyobj_from_string1'] = ['string'] cppmacros[ 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))' needs['pyobj_from_string1size'] = ['string'] cppmacros[ 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] cppmacros['TRYPYARRAYTEMPLATE'] = """\ /* New SciPy */ #define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; #define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; #define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; #define TRYPYARRAYTEMPLATE(ctype,typecode) \\ PyArrayObject *arr = NULL;\\ if (!obj) return -2;\\ if (!PyArray_Check(obj)) return -1;\\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ switch (PyArray_TYPE(arr)) {\\ case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ default: return -2;\\ };\\ return 1 """ needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ #define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; #define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ PyArrayObject *arr = NULL;\\ if (!obj) return -2;\\ if (!PyArray_Check(obj)) return -1;\\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ if (PyArray_DESCR(arr)->type==typecode) {\\ *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ return 1;\\ }\\ switch (PyArray_TYPE(arr)) {\\ case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ default: return -2;\\ };\\ return -1; """ # cppmacros['NUMFROMARROBJ']="""\ # define NUMFROMARROBJ(typenum,ctype) \\ # if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ # else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ # if (arr) {\\ # if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ # if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ # goto capi_fail;\\ # } else {\\ # (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ # }\\ # if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ # return 1;\\ # } # """ # XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ # cppmacros['CNUMFROMARROBJ']="""\ # define CNUMFROMARROBJ(typenum,ctype) \\ # if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ # else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ # if (arr) {\\ # if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ # if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ # goto capi_fail;\\ # } else {\\ # (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ # }\\ # if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ # return 1;\\ # } # """ needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] cppmacros['GETSTRFROMPYTUPLE'] = """\ #define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ if (rv_cb_str == NULL)\\ goto capi_fail;\\ if (PyString_Check(rv_cb_str)) {\\ str[len-1]='\\0';\\ STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ } else {\\ PRINTPYOBJERR(rv_cb_str);\\ PyErr_SetString(#modulename#_error,\"string object expected\");\\ goto capi_fail;\\ }\\ } """ cppmacros['GETSCALARFROMPYTUPLE'] = """\ #define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ goto capi_fail;\\ } """ cppmacros['FAILNULL'] = """\\ #define FAILNULL(p) do { \\ if ((p) == NULL) { \\ PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ goto capi_fail; \\ } \\ } while (0) """ needs['MEMCOPY'] = ['string.h', 'FAILNULL'] cppmacros['MEMCOPY'] = """\ #define MEMCOPY(to,from,n)\\ do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) """ cppmacros['STRINGMALLOC'] = """\ #define STRINGMALLOC(str,len)\\ if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ goto capi_fail;\\ } else {\\ (str)[len] = '\\0';\\ } """ cppmacros['STRINGFREE'] = """\ #define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) """ needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] cppmacros['STRINGCOPYN'] = """\ #define STRINGCOPYN(to,from,buf_size) \\ do { \\ int _m = (buf_size); \\ char *_to = (to); \\ char *_from = (from); \\ FAILNULL(_to); FAILNULL(_from); \\ (void)strncpy(_to, _from, sizeof(char)*_m); \\ _to[_m-1] = '\\0'; \\ /* Padding with spaces instead of nulls */ \\ for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ _to[_m] = ' '; \\ } \\ } while (0) """ needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] cppmacros['STRINGCOPY'] = """\ #define STRINGCOPY(to,from)\\ do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) """ cppmacros['CHECKGENERIC'] = """\ #define CHECKGENERIC(check,tcheck,name) \\ if (!(check)) {\\ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ /*goto capi_fail;*/\\ } else """ cppmacros['CHECKARRAY'] = """\ #define CHECKARRAY(check,tcheck,name) \\ if (!(check)) {\\ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ /*goto capi_fail;*/\\ } else """ cppmacros['CHECKSTRING'] = """\ #define CHECKSTRING(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ PyErr_SetString(#modulename#_error, errstring);\\ /*goto capi_fail;*/\\ } else """ cppmacros['CHECKSCALAR'] = """\ #define CHECKSCALAR(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ PyErr_SetString(#modulename#_error,errstring);\\ /*goto capi_fail;*/\\ } else """ # cppmacros['CHECKDIMS']="""\ # define CHECKDIMS(dims,rank) \\ # for (int i=0;i<(rank);i++)\\ # if (dims[i]<0) {\\ # fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ # goto capi_fail;\\ # } # """ cppmacros[ 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' cppmacros['OLDPYNUM'] = """\ #ifdef OLDPYNUM #error You need to install NumPy version 13 or higher. See https://scipy.org/install.html #endif """ ################# C functions ############### cfuncs['calcarrindex'] = """\ static int calcarrindex(int *i,PyArrayObject *arr) { int k,ii = i[0]; for (k=1; k < PyArray_NDIM(arr); k++) ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ return ii; }""" cfuncs['calcarrindextr'] = """\ static int calcarrindextr(int *i,PyArrayObject *arr) { int k,ii = i[PyArray_NDIM(arr)-1]; for (k=1; k < PyArray_NDIM(arr); k++) ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ return ii; }""" cfuncs['forcomb'] = """\ static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; static int initforcomb(npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; forcombcache.nd = nd; forcombcache.d = dims; forcombcache.tr = tr; if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; for (k=1;k<nd;k++) { forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0; } forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; return 1; } static int *nextforcomb(void) { int j,*i,*i_tr,k; int nd=forcombcache.nd; if ((i=forcombcache.i) == NULL) return NULL; if ((i_tr=forcombcache.i_tr) == NULL) return NULL; if (forcombcache.d == NULL) return NULL; i[0]++; if (i[0]==forcombcache.d[0]) { j=1; while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++; if (j==nd) { free(i); free(i_tr); return NULL; } for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0; i[j]++; i_tr[nd-j-1]++; } else i_tr[nd-1]++; if (forcombcache.tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] cfuncs['try_pyarr_from_string'] = """\ static int try_pyarr_from_string(PyObject *obj,const string str) { PyArrayObject *arr = NULL; if (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL))) { STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); } return 1; capi_fail: PRINTPYOBJERR(obj); PyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); return 0; } """ needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] cfuncs['string_from_pyobj'] = """\ static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { PyArrayObject *arr = NULL; PyObject *tmp = NULL; #ifdef DEBUGCFUNCS fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); #endif if (obj == Py_None) { if (*len == -1) *len = strlen(inistr); /* Will this cause problems? */ STRINGMALLOC(*str,*len); STRINGCOPYN(*str,inistr,*len+1); return 1; } if (PyArray_Check(obj)) { if ((arr = (PyArrayObject *)obj) == NULL) goto capi_fail; if (!ISCONTIGUOUS(arr)) { PyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); goto capi_fail; } if (*len == -1) *len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr); STRINGMALLOC(*str,*len); STRINGCOPYN(*str,PyArray_DATA(arr),*len+1); return 1; } if (PyString_Check(obj)) { tmp = obj; Py_INCREF(tmp); } #if PY_VERSION_HEX >= 0x03000000 else if (PyUnicode_Check(obj)) { tmp = PyUnicode_AsASCIIString(obj); } else { PyObject *tmp2; tmp2 = PyObject_Str(obj); if (tmp2) { tmp = PyUnicode_AsASCIIString(tmp2); Py_DECREF(tmp2); } else { tmp = NULL; } } #else else { tmp = PyObject_Str(obj); } #endif if (tmp == NULL) goto capi_fail; if (*len == -1) *len = PyString_GET_SIZE(tmp); STRINGMALLOC(*str,*len); STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); Py_DECREF(tmp); return 1; capi_fail: Py_XDECREF(tmp); { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['char_from_pyobj'] = ['int_from_pyobj'] cfuncs['char_from_pyobj'] = """\ static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { int i=0; if (int_from_pyobj(&i,obj,errmess)) { *v = (char)i; return 1; } return 0; } """ needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] cfuncs['signed_char_from_pyobj'] = """\ static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { int i=0; if (int_from_pyobj(&i,obj,errmess)) { *v = (signed_char)i; return 1; } return 0; } """ needs['short_from_pyobj'] = ['int_from_pyobj'] cfuncs['short_from_pyobj'] = """\ static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { int i=0; if (int_from_pyobj(&i,obj,errmess)) { *v = (short)i; return 1; } return 0; } """ cfuncs['int_from_pyobj'] = """\ static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyInt_Check(obj)) { *v = (int)PyInt_AS_LONG(obj); return 1; } tmp = PyNumber_Int(obj); if (tmp) { *v = PyInt_AS_LONG(tmp); Py_DECREF(tmp); return 1; } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ cfuncs['long_from_pyobj'] = """\ static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyInt_Check(obj)) { *v = PyInt_AS_LONG(obj); return 1; } tmp = PyNumber_Int(obj); if (tmp) { *v = PyInt_AS_LONG(tmp); Py_DECREF(tmp); return 1; } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['long_long_from_pyobj'] = ['long_long'] cfuncs['long_long_from_pyobj'] = """\ static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyLong_Check(obj)) { *v = PyLong_AsLongLong(obj); return (!PyErr_Occurred()); } if (PyInt_Check(obj)) { *v = (long_long)PyInt_AS_LONG(obj); return 1; } tmp = PyNumber_Long(obj); if (tmp) { *v = PyLong_AsLongLong(tmp); Py_DECREF(tmp); return (!PyErr_Occurred()); } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] cfuncs['long_double_from_pyobj'] = """\ static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { double d=0; if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, LongDouble) { PyArray_ScalarAsCtype(obj, v); return 1; } else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { (*v) = *((npy_longdouble *)PyArray_DATA(obj)); return 1; } } if (double_from_pyobj(&d,obj,errmess)) { *v = (long_double)d; return 1; } return 0; } """ cfuncs['double_from_pyobj'] = """\ static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyFloat_Check(obj)) { #ifdef __sgi *v = PyFloat_AsDouble(obj); #else *v = PyFloat_AS_DOUBLE(obj); #endif return 1; } tmp = PyNumber_Float(obj); if (tmp) { #ifdef __sgi *v = PyFloat_AsDouble(tmp); #else *v = PyFloat_AS_DOUBLE(tmp); #endif Py_DECREF(tmp); return 1; } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['float_from_pyobj'] = ['double_from_pyobj'] cfuncs['float_from_pyobj'] = """\ static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { double d=0.0; if (double_from_pyobj(&d,obj,errmess)) { *v = (float)d; return 1; } return 0; } """ needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', 'complex_double_from_pyobj'] cfuncs['complex_long_double_from_pyobj'] = """\ static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { complex_double cd={0.0,0.0}; if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, CLongDouble) { PyArray_ScalarAsCtype(obj, v); return 1; } else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; return 1; } } if (complex_double_from_pyobj(&cd,obj,errmess)) { (*v).r = (long_double)cd.r; (*v).i = (long_double)cd.i; return 1; } return 0; } """ needs['complex_double_from_pyobj'] = ['complex_double'] cfuncs['complex_double_from_pyobj'] = """\ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { Py_complex c; if (PyComplex_Check(obj)) { c=PyComplex_AsCComplex(obj); (*v).r=c.real, (*v).i=c.imag; return 1; } if (PyArray_IsScalar(obj, ComplexFloating)) { if (PyArray_IsScalar(obj, CFloat)) { npy_cfloat new; PyArray_ScalarAsCtype(obj, &new); (*v).r = (double)new.real; (*v).i = (double)new.imag; } else if (PyArray_IsScalar(obj, CLongDouble)) { npy_clongdouble new; PyArray_ScalarAsCtype(obj, &new); (*v).r = (double)new.real; (*v).i = (double)new.imag; } else { /* if (PyArray_IsScalar(obj, CDouble)) */ PyArray_ScalarAsCtype(obj, v); } return 1; } if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ PyObject *arr; if (PyArray_Check(obj)) { arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); } else { arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); } if (arr==NULL) return 0; (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; return 1; } /* Python does not provide PyNumber_Complex function :-( */ (*v).i=0.0; if (PyFloat_Check(obj)) { #ifdef __sgi (*v).r = PyFloat_AsDouble(obj); #else (*v).r = PyFloat_AS_DOUBLE(obj); #endif return 1; } if (PyInt_Check(obj)) { (*v).r = (double)PyInt_AS_LONG(obj); return 1; } if (PyLong_Check(obj)) { (*v).r = PyLong_AsDouble(obj); return (!PyErr_Occurred()); } if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { PyObject *tmp = PySequence_GetItem(obj,0); if (tmp) { if (complex_double_from_pyobj(v,tmp,errmess)) { Py_DECREF(tmp); return 1; } Py_DECREF(tmp); } } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = PyExc_TypeError; PyErr_SetString(err,errmess); } return 0; } """ needs['complex_float_from_pyobj'] = [ 'complex_float', 'complex_double_from_pyobj'] cfuncs['complex_float_from_pyobj'] = """\ static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { complex_double cd={0.0,0.0}; if (complex_double_from_pyobj(&cd,obj,errmess)) { (*v).r = (float)cd.r; (*v).i = (float)cd.i; return 1; } return 0; } """ needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] cfuncs[ 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] cfuncs[ 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' needs['try_pyarr_from_long_long'] = [ 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] cfuncs[ 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' needs['try_pyarr_from_complex_float'] = [ 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] cfuncs[ 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' needs['try_pyarr_from_complex_double'] = [ 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] cfuncs[ 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] cfuncs['create_cb_arglist'] = """\ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { PyObject *tmp = NULL; PyObject *tmp_fun = NULL; int tot,opt,ext,siz,i,di=0; CFUNCSMESS(\"create_cb_arglist\\n\"); tot=opt=ext=siz=0; /* Get the total number of arguments */ if (PyFunction_Check(fun)) tmp_fun = fun; else { di = 1; if (PyObject_HasAttrString(fun,\"im_func\")) { tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); } else if (PyObject_HasAttrString(fun,\"__call__\")) { tmp = PyObject_GetAttrString(fun,\"__call__\"); if (PyObject_HasAttrString(tmp,\"im_func\")) tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); else { tmp_fun = fun; /* built-in function */ tot = maxnofargs; if (xa != NULL) tot += PyTuple_Size((PyObject *)xa); } Py_XDECREF(tmp); } else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { tot = maxnofargs; if (xa != NULL) tot += PyTuple_Size((PyObject *)xa); tmp_fun = fun; } else if (F2PyCapsule_Check(fun)) { tot = maxnofargs; if (xa != NULL) ext = PyTuple_Size((PyObject *)xa); if(ext>0) { fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); goto capi_fail; } tmp_fun = fun; } } if (tmp_fun==NULL) { fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); goto capi_fail; } #if PY_VERSION_HEX >= 0x03000000 if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) #else if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) #endif tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; Py_XDECREF(tmp); } /* Get the number of optional arguments */ #if PY_VERSION_HEX >= 0x03000000 if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) #else if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) #endif opt = PyTuple_Size(tmp); Py_XDECREF(tmp); } /* Get the number of extra arguments */ if (xa != NULL) ext = PyTuple_Size((PyObject *)xa); /* Calculate the size of call-backs argument list */ siz = MIN(maxnofargs+ext,tot); *nofargs = MAX(0,siz-ext); #ifdef DEBUGCFUNCS fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); #endif if (siz<tot-opt) { fprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt); goto capi_fail; } /* Initialize argument list */ *args = (PyTupleObject *)PyTuple_New(siz); for (i=0;i<*nofargs;i++) { Py_INCREF(Py_None); PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); } if (xa != NULL) for (i=(*nofargs);i<siz;i++) { tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs)); Py_INCREF(tmp); PyTuple_SET_ITEM(*args,i,tmp); } CFUNCSMESS(\"create_cb_arglist-end\\n\"); return 1; capi_fail: if ((PyErr_Occurred())==NULL) PyErr_SetString(#modulename#_error,errmess); return 0; } """ def buildcfuncs(): from .capi_maps import c2capi_map for k in c2capi_map.keys(): m = 'pyarr_from_p_%s1' % k cppmacros[ m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k]) k = 'string' m = 'pyarr_from_p_%s1' % k # NPY_CHAR compatibility, NPY_STRING with itemsize 1 cppmacros[ m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m) ############ Auxiliary functions for sorting needs ################### def append_needs(need, flag=1): global outneeds, needs if isinstance(need, list): for n in need: append_needs(n, flag) elif isinstance(need, str): if not need: return if need in includes0: n = 'includes0' elif need in includes: n = 'includes' elif need in typedefs: n = 'typedefs' elif need in typedefs_generated: n = 'typedefs_generated' elif need in cppmacros: n = 'cppmacros' elif need in cfuncs: n = 'cfuncs' elif need in callbacks: n = 'callbacks' elif need in f90modhooks: n = 'f90modhooks' elif need in commonhooks: n = 'commonhooks' else: errmess('append_needs: unknown need %s\n' % (repr(need))) return if need in outneeds[n]: return if flag: tmp = {} if need in needs: for nn in needs[need]: t = append_needs(nn, 0) if isinstance(t, dict): for nnn in t.keys(): if nnn in tmp: tmp[nnn] = tmp[nnn] + t[nnn] else: tmp[nnn] = t[nnn] for nn in tmp.keys(): for nnn in tmp[nn]: if nnn not in outneeds[nn]: outneeds[nn] = [nnn] + outneeds[nn] outneeds[n].append(need) else: tmp = {} if need in needs: for nn in needs[need]: t = append_needs(nn, flag) if isinstance(t, dict): for nnn in t.keys(): if nnn in tmp: tmp[nnn] = t[nnn] + tmp[nnn] else: tmp[nnn] = t[nnn] if n not in tmp: tmp[n] = [] tmp[n].append(need) return tmp else: errmess('append_needs: expected list or string but got :%s\n' % (repr(need))) def get_needs(): global outneeds, needs res = {} for n in outneeds.keys(): out = [] saveout = copy.copy(outneeds[n]) while len(outneeds[n]) > 0: if outneeds[n][0] not in needs: out.append(outneeds[n][0]) del outneeds[n][0] else: flag = 0 for k in outneeds[n][1:]: if k in needs[outneeds[n][0]]: flag = 1 break if flag: outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] else: out.append(outneeds[n][0]) del outneeds[n][0] if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ and outneeds[n] != []: print(n, saveout) errmess( 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') out = out + saveout break saveout = copy.copy(outneeds[n]) if out == []: out = [n] res[n] = out return res
from __future__ import division, absolute_import, print_function import sys import platform import pytest import numpy as np # import the c-extension module directly since _arg is not exported via umath import numpy.core._multiarray_umath as ncu from numpy.testing import ( assert_raises, assert_equal, assert_array_equal, assert_almost_equal ) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' # TODO: FPU exceptions # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. #FIXME: this will probably change when we require full C99 campatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(np.NZERO, 0)).imag != np.pi)) # TODO: replace with a check on whether platform-provided C99 funcs are used xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) # TODO This can be xfail when the generator functions are got rid of. platform_skip = pytest.mark.skipif(xfail_complex_tests, reason="Inadequate C99 complex support") class TestCexp(object): def test_simple(self): check = check_complex_value f = np.exp check(f, 1, 0, np.exp(1), 0, False) check(f, 0, 1, np.cos(1), np.sin(1), False) ref = np.exp(1) * complex(np.cos(1), np.sin(1)) check(f, 1, 1, ref.real, ref.imag, False) @platform_skip def test_special_values(self): # C99: Section G 6.3.1 check = check_complex_value f = np.exp # cexp(+-0 + 0i) is 1 + 0i check(f, np.PZERO, 0, 1, 0, False) check(f, np.NZERO, 0, 1, 0, False) # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU # exception check(f, 1, np.inf, np.nan, np.nan) check(f, -1, np.inf, np.nan, np.nan) check(f, 0, np.inf, np.nan, np.nan) # cexp(inf + 0i) is inf + 0i check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y check(f, -np.inf, 1, np.PZERO, np.PZERO) check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_inf(None) # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_inf(None) # cexp(-inf + nan i) is +-0 +- 0i def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # cexp(inf + nan i) is +-inf + nan def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) _check_inf_nan(None) # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU # ex) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, -1, np.nan, np.nan) check(f, np.nan, np.inf, np.nan, np.nan) check(f, np.nan, -np.inf, np.nan, np.nan) # cexp(nan + nani) is nan + nani check(f, np.nan, np.nan, np.nan, np.nan) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") def test_special_values2(self): # XXX: most implementations get it wrong here (including glibc <= 2.10) # cexp(nan + 0i) is nan + 0i check = check_complex_value f = np.exp check(f, np.nan, 0, np.nan, 0) class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) @platform_skip @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") def test_special_values(self): xl = [] yl = [] # From C99 std (Sec 6.3.2) # XXX: check exceptions raised # --- raise for invalid fails. # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([np.NZERO], dtype=complex) y = complex(-np.inf, np.pi) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): x = np.array([0], dtype=complex) y = complex(-np.inf, 0) assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. x = np.array([complex(1, np.inf)], dtype=complex) y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): x = np.array([complex(1., np.nan)], dtype=complex) y = complex(np.nan, np.nan) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) with np.errstate(invalid='raise'): x = np.array([np.inf + 1j * np.nan], dtype=complex) #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. x = np.array([-np.inf + 1j], dtype=complex) y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. x = np.array([np.inf + 1j], dtype=complex) y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. x = np.array([complex(-np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. x = np.array([complex(np.inf, np.inf)], dtype=complex) y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. x = np.array([complex(np.inf, np.nan)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. x = np.array([complex(np.nan, 1)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. x = np.array([complex(np.nan, np.inf)], dtype=complex) y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. x = np.array([complex(np.nan, np.nan)], dtype=complex) y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). xa = np.array(xl, dtype=complex) ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) class TestCsqrt(object): def test_simple(self): # sqrt(1) check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) rres = 0.5*np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) # sqrt(-1) check_complex_value(np.sqrt, -1, 0, 0, 1) def test_simple_conjugate(self): ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) check_complex_value(f, 1, 1, ref.real, ref.imag, False) #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1) @platform_skip def test_special_values(self): # C99: Sec G 6.4.2 check = check_complex_value f = np.sqrt # csqrt(+-0 + 0i) is 0 + 0i check(f, np.PZERO, 0, 0, 0) check(f, np.NZERO, 0, 0, 0) # csqrt(x + infi) is inf + infi for any x (including NaN) check(f, 1, np.inf, np.inf, np.inf) check(f, -1, np.inf, np.inf, np.inf) check(f, np.PZERO, np.inf, np.inf, np.inf) check(f, np.NZERO, np.inf, np.inf, np.inf) check(f, np.inf, np.inf, np.inf, np.inf) check(f, -np.inf, np.inf, np.inf, np.inf) check(f, -np.nan, np.inf, np.inf, np.inf) # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) check(f, -1, np.nan, np.nan, np.nan) check(f, 0, np.nan, np.nan, np.nan) # csqrt(-inf + yi) is +0 + infi for any finite y > 0 check(f, -np.inf, 1, np.PZERO, np.inf) # csqrt(inf + yi) is +inf + 0i for any finite y > 0 check(f, np.inf, 1, np.inf, np.PZERO) # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) _check_ninf_nan(None) # csqrt(+inf + nani) is inf + nani check(f, np.inf, np.nan, np.inf, np.nan) # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x # + nani) check(f, np.nan, 0, np.nan, np.nan) check(f, np.nan, 1, np.nan, np.nan) check(f, np.nan, np.nan, np.nan, np.nan) # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) class TestCpow(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_scalar(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy complex scalars n_r = [x[i] ** y[i] for i in lx] for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) lx = list(range(len(x))) # Compute the values for complex type in python p_r = [complex(x[i]) ** complex(y[i]) for i in lx] # Substitute a result allowed by C99 standard p_r[4] = complex(np.inf, np.nan) # Do the same with numpy arrays n_r = x ** y for i in lx: assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) class TestCabs(object): def setup(self): self.olderr = np.seterr(invalid='ignore') def teardown(self): np.seterr(**self.olderr) def test_simple(self): x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) for i in range(len(x)): assert_almost_equal(y[i], y_r[i]) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) check_real_value(np.abs, np.nan, np.nan, np.nan) x.append(np.nan) y.append(-np.nan) check_real_value(np.abs, -np.nan, np.nan, np.nan) # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) check_real_value(np.abs, np.inf, np.nan, np.inf) x.append(-np.inf) y.append(np.nan) check_real_value(np.abs, -np.inf, np.nan, np.inf) # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(complex(a, b)) xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) check_real_value(f, x[i], y[i], ref) class TestCarg(object): def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) # TODO This can be xfail when the generator functions are got rid of. @pytest.mark.skip( reason="Complex arithmetic with signed zero fails on most platforms") def test_zero(self): # carg(-0 +- 0i) returns +- pi check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) # carg(+0 +- 0i) returns +- 0 check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) # carg(x +- 0i) returns +- 0 for x > 0 check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) # carg(x +- 0i) returns +- pi for x < 0 check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) # carg(+- 0 + yi) returns pi/2 for y > 0 check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) # carg(+- 0 + yi) returns -pi/2 for y < 0 check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) def test_special_values(self): # carg(-np.inf +- yi) returns +-pi for finite y > 0 check_real_value(ncu._arg, -np.inf, 1, np.pi, False) check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) # carg(np.inf +- yi) returns +-0 for finite y > 0 check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) # carg(x +- np.infi) returns +-pi/2 for finite x check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) # carg(-np.inf +- np.infi) returns +-3pi/4 check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) # carg(np.inf +- np.infi) returns +-pi/4 check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) # carg(x + yi) returns np.nan if x or y is nan check_real_value(ncu._arg, np.nan, 0, np.nan, False) check_real_value(ncu._arg, 0, np.nan, np.nan, False) check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) def check_real_value(f, x1, y1, x, exact=True): z1 = np.array([complex(x1, y1)]) if exact: assert_equal(f(z1), x) else: assert_almost_equal(f(z1), x) def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) else: assert_almost_equal(f(z1), z2)
pizzathief/numpy
numpy/core/tests/test_umath_complex.py
numpy/f2py/cfuncs.py
"""Config flow for ONVIF.""" from pprint import pformat from typing import List from urllib.parse import urlparse from onvif.exceptions import ONVIFError import voluptuous as vol from wsdiscovery.discovery import ThreadedWSDiscovery as WSDiscovery from wsdiscovery.scope import Scope from wsdiscovery.service import Service from zeep.exceptions import Fault from homeassistant import config_entries from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, ) from homeassistant.core import callback # pylint: disable=unused-import from .const import ( CONF_DEVICE_ID, CONF_RTSP_TRANSPORT, DEFAULT_ARGUMENTS, DEFAULT_PORT, DOMAIN, LOGGER, RTSP_TRANS_PROTOCOLS, ) from .device import get_device CONF_MANUAL_INPUT = "Manually configure ONVIF device" def wsdiscovery() -> List[Service]: """Get ONVIF Profile S devices from network.""" discovery = WSDiscovery(ttl=4) discovery.start() services = discovery.searchServices( scopes=[Scope("onvif://www.onvif.org/Profile/Streaming")] ) discovery.stop() return services async def async_discovery(hass) -> bool: """Return if there are devices that can be discovered.""" LOGGER.debug("Starting ONVIF discovery...") services = await hass.async_add_executor_job(wsdiscovery) devices = [] for service in services: url = urlparse(service.getXAddrs()[0]) device = { CONF_DEVICE_ID: None, CONF_NAME: service.getEPR(), CONF_HOST: url.hostname, CONF_PORT: url.port or 80, } for scope in service.getScopes(): scope_str = scope.getValue() if scope_str.lower().startswith("onvif://www.onvif.org/name"): device[CONF_NAME] = scope_str.split("/")[-1] if scope_str.lower().startswith("onvif://www.onvif.org/mac"): device[CONF_DEVICE_ID] = scope_str.split("/")[-1] devices.append(device) return devices class OnvifFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a ONVIF config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return OnvifOptionsFlowHandler(config_entry) def __init__(self): """Initialize the ONVIF config flow.""" self.device_id = None self.devices = [] self.onvif_config = {} async def async_step_user(self, user_input=None): """Handle user flow.""" if user_input is not None: return await self.async_step_device() return self.async_show_form(step_id="user") async def async_step_device(self, user_input=None): """Handle WS-Discovery. Let user choose between discovered devices and manual configuration. If no device is found allow user to manually input configuration. """ if user_input: if CONF_MANUAL_INPUT == user_input[CONF_HOST]: return await self.async_step_manual_input() for device in self.devices: name = f"{device[CONF_NAME]} ({device[CONF_HOST]})" if name == user_input[CONF_HOST]: self.device_id = device[CONF_DEVICE_ID] self.onvif_config = { CONF_NAME: device[CONF_NAME], CONF_HOST: device[CONF_HOST], CONF_PORT: device[CONF_PORT], } return await self.async_step_auth() discovery = await async_discovery(self.hass) for device in discovery: configured = any( entry.unique_id == device[CONF_DEVICE_ID] for entry in self._async_current_entries() ) if not configured: self.devices.append(device) LOGGER.debug("Discovered ONVIF devices %s", pformat(self.devices)) if self.devices: names = [ f"{device[CONF_NAME]} ({device[CONF_HOST]})" for device in self.devices ] names.append(CONF_MANUAL_INPUT) return self.async_show_form( step_id="device", data_schema=vol.Schema({vol.Optional(CONF_HOST): vol.In(names)}), ) return await self.async_step_manual_input() async def async_step_manual_input(self, user_input=None): """Manual configuration.""" if user_input: self.onvif_config = user_input return await self.async_step_auth() return self.async_show_form( step_id="manual_input", data_schema=vol.Schema( { vol.Required(CONF_NAME): str, vol.Required(CONF_HOST): str, vol.Required(CONF_PORT, default=DEFAULT_PORT): int, } ), ) async def async_step_auth(self, user_input=None): """Username and Password configuration for ONVIF device.""" if user_input: self.onvif_config[CONF_USERNAME] = user_input[CONF_USERNAME] self.onvif_config[CONF_PASSWORD] = user_input[CONF_PASSWORD] return await self.async_step_profiles() # Username and Password are optional and default empty # due to some cameras not allowing you to change ONVIF user settings. # See https://github.com/home-assistant/core/issues/39182 # and https://github.com/home-assistant/core/issues/35904 return self.async_show_form( step_id="auth", data_schema=vol.Schema( { vol.Optional(CONF_USERNAME, default=""): str, vol.Optional(CONF_PASSWORD, default=""): str, } ), ) async def async_step_profiles(self, user_input=None): """Fetch ONVIF device profiles.""" errors = {} LOGGER.debug( "Fetching profiles from ONVIF device %s", pformat(self.onvif_config) ) device = get_device( self.hass, self.onvif_config[CONF_HOST], self.onvif_config[CONF_PORT], self.onvif_config[CONF_USERNAME], self.onvif_config[CONF_PASSWORD], ) try: await device.update_xaddrs() device_mgmt = device.create_devicemgmt_service() # Get the MAC address to use as the unique ID for the config flow if not self.device_id: try: network_interfaces = await device_mgmt.GetNetworkInterfaces() for interface in network_interfaces: if interface.Enabled: self.device_id = interface.Info.HwAddress except Fault as fault: if "not implemented" not in fault.message: raise fault LOGGER.debug( "Couldn't get network interfaces from ONVIF deivice '%s'. Error: %s", self.onvif_config[CONF_NAME], fault, ) # If no network interfaces are exposed, fallback to serial number if not self.device_id: device_info = await device_mgmt.GetDeviceInformation() self.device_id = device_info.SerialNumber if not self.device_id: return self.async_abort(reason="no_mac") await self.async_set_unique_id(self.device_id, raise_on_progress=False) self._abort_if_unique_id_configured( updates={ CONF_HOST: self.onvif_config[CONF_HOST], CONF_PORT: self.onvif_config[CONF_PORT], CONF_NAME: self.onvif_config[CONF_NAME], } ) # Verify there is an H264 profile media_service = device.create_media_service() profiles = await media_service.GetProfiles() h264 = any( profile.VideoEncoderConfiguration and profile.VideoEncoderConfiguration.Encoding == "H264" for profile in profiles ) if not h264: return self.async_abort(reason="no_h264") await device.close() title = f"{self.onvif_config[CONF_NAME]} - {self.device_id}" return self.async_create_entry(title=title, data=self.onvif_config) except ONVIFError as err: LOGGER.error( "Couldn't setup ONVIF device '%s'. Error: %s", self.onvif_config[CONF_NAME], err, ) await device.close() return self.async_abort(reason="onvif_error") except Fault: errors["base"] = "cannot_connect" await device.close() return self.async_show_form(step_id="auth", errors=errors) async def async_step_import(self, user_input): """Handle import.""" self.onvif_config = user_input return await self.async_step_profiles() class OnvifOptionsFlowHandler(config_entries.OptionsFlow): """Handle ONVIF options.""" def __init__(self, config_entry): """Initialize ONVIF options flow.""" self.config_entry = config_entry self.options = dict(config_entry.options) async def async_step_init(self, user_input=None): """Manage the ONVIF options.""" return await self.async_step_onvif_devices() async def async_step_onvif_devices(self, user_input=None): """Manage the ONVIF devices options.""" if user_input is not None: self.options[CONF_EXTRA_ARGUMENTS] = user_input[CONF_EXTRA_ARGUMENTS] self.options[CONF_RTSP_TRANSPORT] = user_input[CONF_RTSP_TRANSPORT] return self.async_create_entry(title="", data=self.options) return self.async_show_form( step_id="onvif_devices", data_schema=vol.Schema( { vol.Optional( CONF_EXTRA_ARGUMENTS, default=self.config_entry.options.get( CONF_EXTRA_ARGUMENTS, DEFAULT_ARGUMENTS ), ): str, vol.Optional( CONF_RTSP_TRANSPORT, default=self.config_entry.options.get( CONF_RTSP_TRANSPORT, RTSP_TRANS_PROTOCOLS[0] ), ): vol.In(RTSP_TRANS_PROTOCOLS), } ), )
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/onvif/config_flow.py
"""Config flow for Hisense AEH-W4A1 integration.""" from pyaehw4a1.aehw4a1 import AehW4a1 from homeassistant import config_entries from homeassistant.helpers import config_entry_flow from .const import DOMAIN async def _async_has_devices(hass): """Return if there are devices that can be discovered.""" aehw4a1_ip_addresses = await AehW4a1().discovery() return len(aehw4a1_ip_addresses) > 0 config_entry_flow.register_discovery_flow( DOMAIN, "Hisense AEH-W4A1", _async_has_devices, config_entries.CONN_CLASS_LOCAL_POLL )
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/hisense_aehw4a1/config_flow.py
"""Support for OpenTherm Gateway binary sensors.""" import logging from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity from homeassistant.const import CONF_ID from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity import async_generate_entity_id from . import DOMAIN from .const import BINARY_SENSOR_INFO, DATA_GATEWAYS, DATA_OPENTHERM_GW _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the OpenTherm Gateway binary sensors.""" sensors = [] for var, info in BINARY_SENSOR_INFO.items(): device_class = info[0] friendly_name_format = info[1] sensors.append( OpenThermBinarySensor( hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]], var, device_class, friendly_name_format, ) ) async_add_entities(sensors) class OpenThermBinarySensor(BinarySensorEntity): """Represent an OpenTherm Gateway binary sensor.""" def __init__(self, gw_dev, var, device_class, friendly_name_format): """Initialize the binary sensor.""" self.entity_id = async_generate_entity_id( ENTITY_ID_FORMAT, f"{var}_{gw_dev.gw_id}", hass=gw_dev.hass ) self._gateway = gw_dev self._var = var self._state = None self._device_class = device_class self._friendly_name = friendly_name_format.format(gw_dev.name) self._unsub_updates = None async def async_added_to_hass(self): """Subscribe to updates from the component.""" _LOGGER.debug("Added OpenTherm Gateway binary sensor %s", self._friendly_name) self._unsub_updates = async_dispatcher_connect( self.hass, self._gateway.update_signal, self.receive_report ) async def async_will_remove_from_hass(self): """Unsubscribe from updates from the component.""" _LOGGER.debug( "Removing OpenTherm Gateway binary sensor %s", self._friendly_name ) self._unsub_updates() @property def available(self): """Return availability of the sensor.""" return self._state is not None @property def entity_registry_enabled_default(self): """Disable binary_sensors by default.""" return False @callback def receive_report(self, status): """Handle status updates from the component.""" state = status.get(self._var) self._state = None if state is None else bool(state) self.async_write_ha_state() @property def name(self): """Return the friendly name.""" return self._friendly_name @property def device_info(self): """Return device info.""" return { "identifiers": {(DOMAIN, self._gateway.gw_id)}, "name": self._gateway.name, "manufacturer": "Schelte Bron", "model": "OpenTherm Gateway", "sw_version": self._gateway.gw_version, } @property def unique_id(self): """Return a unique ID.""" return f"{self._gateway.gw_id}-{self._var}" @property def is_on(self): """Return true if the binary sensor is on.""" return self._state @property def device_class(self): """Return the class of this device.""" return self._device_class @property def should_poll(self): """Return False because entity pushes its state.""" return False
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/opentherm_gw/binary_sensor.py
"""Config flow to configure the Elgato Key Light integration.""" from typing import Any, Dict, Optional from elgato import Elgato, ElgatoError, Info import voluptuous as vol from homeassistant.config_entries import CONN_CLASS_LOCAL_POLL, ConfigFlow from homeassistant.const import CONF_HOST, CONF_PORT from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.typing import ConfigType from .const import CONF_SERIAL_NUMBER, DOMAIN # pylint: disable=unused-import class ElgatoFlowHandler(ConfigFlow, domain=DOMAIN): """Handle a Elgato Key Light config flow.""" VERSION = 1 CONNECTION_CLASS = CONN_CLASS_LOCAL_POLL async def async_step_user( self, user_input: Optional[ConfigType] = None ) -> Dict[str, Any]: """Handle a flow initiated by the user.""" if user_input is None: return self._show_setup_form() try: info = await self._get_elgato_info( user_input[CONF_HOST], user_input[CONF_PORT] ) except ElgatoError: return self._show_setup_form({"base": "cannot_connect"}) # Check if already configured await self.async_set_unique_id(info.serial_number) self._abort_if_unique_id_configured() return self.async_create_entry( title=info.serial_number, data={ CONF_HOST: user_input[CONF_HOST], CONF_PORT: user_input[CONF_PORT], CONF_SERIAL_NUMBER: info.serial_number, }, ) async def async_step_zeroconf( self, user_input: Optional[ConfigType] = None ) -> Dict[str, Any]: """Handle zeroconf discovery.""" if user_input is None: return self.async_abort(reason="cannot_connect") try: info = await self._get_elgato_info( user_input[CONF_HOST], user_input[CONF_PORT] ) except ElgatoError: return self.async_abort(reason="cannot_connect") # Check if already configured await self.async_set_unique_id(info.serial_number) self._abort_if_unique_id_configured(updates={CONF_HOST: user_input[CONF_HOST]}) # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 self.context.update( { CONF_HOST: user_input[CONF_HOST], CONF_PORT: user_input[CONF_PORT], CONF_SERIAL_NUMBER: info.serial_number, "title_placeholders": {"serial_number": info.serial_number}, } ) # Prepare configuration flow return self._show_confirm_dialog() # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 async def async_step_zeroconf_confirm( self, user_input: ConfigType = None ) -> Dict[str, Any]: """Handle a flow initiated by zeroconf.""" if user_input is None: return self._show_confirm_dialog() try: info = await self._get_elgato_info( self.context.get(CONF_HOST), self.context.get(CONF_PORT) ) except ElgatoError: return self.async_abort(reason="cannot_connect") # Check if already configured await self.async_set_unique_id(info.serial_number) self._abort_if_unique_id_configured() return self.async_create_entry( title=self.context.get(CONF_SERIAL_NUMBER), data={ CONF_HOST: self.context.get(CONF_HOST), CONF_PORT: self.context.get(CONF_PORT), CONF_SERIAL_NUMBER: self.context.get(CONF_SERIAL_NUMBER), }, ) def _show_setup_form(self, errors: Optional[Dict] = None) -> Dict[str, Any]: """Show the setup form to the user.""" return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_HOST): str, vol.Optional(CONF_PORT, default=9123): int, } ), errors=errors or {}, ) def _show_confirm_dialog(self) -> Dict[str, Any]: """Show the confirm dialog to the user.""" # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 serial_number = self.context.get(CONF_SERIAL_NUMBER) return self.async_show_form( step_id="zeroconf_confirm", description_placeholders={"serial_number": serial_number}, ) async def _get_elgato_info(self, host: str, port: int) -> Info: """Get device information from an Elgato Key Light device.""" session = async_get_clientsession(self.hass) elgato = Elgato( host, port=port, session=session, ) return await elgato.info()
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/elgato/config_flow.py
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5).""" from dataclasses import dataclass import logging from miio import AirQualityMonitor, DeviceException # pylint: disable=import-error from miio.gateway import ( GATEWAY_MODEL_AC_V1, GATEWAY_MODEL_AC_V2, GATEWAY_MODEL_AC_V3, GATEWAY_MODEL_EU, DeviceType, GatewayException, ) import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_TOKEN, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_PRESSURE, DEVICE_CLASS_TEMPERATURE, LIGHT_LUX, PERCENTAGE, PRESSURE_HPA, TEMP_CELSIUS, ) from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from .config_flow import CONF_FLOW_TYPE, CONF_GATEWAY from .const import DOMAIN from .gateway import XiaomiGatewayDevice _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Xiaomi Miio Sensor" DATA_KEY = "sensor.xiaomi_miio" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) ATTR_POWER = "power" ATTR_CHARGING = "charging" ATTR_BATTERY_LEVEL = "battery_level" ATTR_DISPLAY_CLOCK = "display_clock" ATTR_NIGHT_MODE = "night_mode" ATTR_NIGHT_TIME_BEGIN = "night_time_begin" ATTR_NIGHT_TIME_END = "night_time_end" ATTR_SENSOR_STATE = "sensor_state" ATTR_MODEL = "model" SUCCESS = ["ok"] @dataclass class SensorType: """Class that holds device specific info for a xiaomi aqara sensor.""" unit: str = None icon: str = None device_class: str = None GATEWAY_SENSOR_TYPES = { "temperature": SensorType( unit=TEMP_CELSIUS, icon=None, device_class=DEVICE_CLASS_TEMPERATURE ), "humidity": SensorType( unit=PERCENTAGE, icon=None, device_class=DEVICE_CLASS_HUMIDITY ), "pressure": SensorType( unit=PRESSURE_HPA, icon=None, device_class=DEVICE_CLASS_PRESSURE ), } async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Xiaomi sensor from a config entry.""" entities = [] if config_entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY: gateway = hass.data[DOMAIN][config_entry.entry_id] # Gateway illuminance sensor if gateway.model not in [ GATEWAY_MODEL_AC_V1, GATEWAY_MODEL_AC_V2, GATEWAY_MODEL_AC_V3, GATEWAY_MODEL_EU, ]: entities.append( XiaomiGatewayIlluminanceSensor( gateway, config_entry.title, config_entry.unique_id ) ) # Gateway sub devices sub_devices = gateway.devices for sub_device in sub_devices.values(): sensor_variables = None if sub_device.type == DeviceType.SensorHT: sensor_variables = ["temperature", "humidity"] if sub_device.type == DeviceType.AqaraHT: sensor_variables = ["temperature", "humidity", "pressure"] if sensor_variables is not None: entities.extend( [ XiaomiGatewaySensor(sub_device, config_entry, variable) for variable in sensor_variables ] ) async_add_entities(entities, update_before_add=True) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the sensor from config.""" if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} host = config[CONF_HOST] token = config[CONF_TOKEN] name = config[CONF_NAME] _LOGGER.info("Initializing with host %s (token %s...)", host, token[:5]) try: air_quality_monitor = AirQualityMonitor(host, token) device_info = await hass.async_add_executor_job(air_quality_monitor.info) model = device_info.model unique_id = f"{model}-{device_info.mac_address}" _LOGGER.info( "%s %s %s detected", model, device_info.firmware_version, device_info.hardware_version, ) device = XiaomiAirQualityMonitor(name, air_quality_monitor, model, unique_id) except DeviceException as ex: raise PlatformNotReady from ex hass.data[DATA_KEY][host] = device async_add_entities([device], update_before_add=True) class XiaomiAirQualityMonitor(Entity): """Representation of a Xiaomi Air Quality Monitor.""" def __init__(self, name, device, model, unique_id): """Initialize the entity.""" self._name = name self._device = device self._model = model self._unique_id = unique_id self._icon = "mdi:cloud" self._unit_of_measurement = "AQI" self._available = None self._state = None self._state_attrs = { ATTR_POWER: None, ATTR_BATTERY_LEVEL: None, ATTR_CHARGING: None, ATTR_DISPLAY_CLOCK: None, ATTR_NIGHT_MODE: None, ATTR_NIGHT_TIME_BEGIN: None, ATTR_NIGHT_TIME_END: None, ATTR_SENSOR_STATE: None, ATTR_MODEL: self._model, } @property def unique_id(self): """Return an unique ID.""" return self._unique_id @property def name(self): """Return the name of this entity, if any.""" return self._name @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def icon(self): """Return the icon to use for device if any.""" return self._icon @property def available(self): """Return true when state is known.""" return self._available @property def state(self): """Return the state of the device.""" return self._state @property def device_state_attributes(self): """Return the state attributes of the device.""" return self._state_attrs async def async_update(self): """Fetch state from the miio device.""" try: state = await self.hass.async_add_executor_job(self._device.status) _LOGGER.debug("Got new state: %s", state) self._available = True self._state = state.aqi self._state_attrs.update( { ATTR_POWER: state.power, ATTR_CHARGING: state.usb_power, ATTR_BATTERY_LEVEL: state.battery, ATTR_DISPLAY_CLOCK: state.display_clock, ATTR_NIGHT_MODE: state.night_mode, ATTR_NIGHT_TIME_BEGIN: state.night_time_begin, ATTR_NIGHT_TIME_END: state.night_time_end, ATTR_SENSOR_STATE: state.sensor_state, } ) except DeviceException as ex: if self._available: self._available = False _LOGGER.error("Got exception while fetching the state: %s", ex) class XiaomiGatewaySensor(XiaomiGatewayDevice): """Representation of a XiaomiGatewaySensor.""" def __init__(self, sub_device, entry, data_key): """Initialize the XiaomiSensor.""" super().__init__(sub_device, entry) self._data_key = data_key self._unique_id = f"{sub_device.sid}-{data_key}" self._name = f"{data_key} ({sub_device.sid})".capitalize() @property def icon(self): """Return the icon to use in the frontend.""" return GATEWAY_SENSOR_TYPES[self._data_key].icon @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return GATEWAY_SENSOR_TYPES[self._data_key].unit @property def device_class(self): """Return the device class of this entity.""" return GATEWAY_SENSOR_TYPES[self._data_key].device_class @property def state(self): """Return the state of the sensor.""" return self._sub_device.status[self._data_key] class XiaomiGatewayIlluminanceSensor(Entity): """Representation of the gateway device's illuminance sensor.""" def __init__(self, gateway_device, gateway_name, gateway_device_id): """Initialize the entity.""" self._gateway = gateway_device self._name = f"{gateway_name} Illuminance" self._gateway_device_id = gateway_device_id self._unique_id = f"{gateway_device_id}-illuminance" self._available = False self._state = None @property def unique_id(self): """Return an unique ID.""" return self._unique_id @property def device_info(self): """Return the device info of the gateway.""" return { "identifiers": {(DOMAIN, self._gateway_device_id)}, } @property def name(self): """Return the name of this entity, if any.""" return self._name @property def available(self): """Return true when state is known.""" return self._available @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return LIGHT_LUX @property def device_class(self): """Return the device class of this entity.""" return DEVICE_CLASS_ILLUMINANCE @property def state(self): """Return the state of the device.""" return self._state async def async_update(self): """Fetch state from the device.""" try: self._state = await self.hass.async_add_executor_job( self._gateway.get_illumination ) self._available = True except GatewayException as ex: if self._available: self._available = False _LOGGER.error( "Got exception while fetching the gateway illuminance state: %s", ex )
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/xiaomi_miio/sensor.py
"""Support for the Tesla sensors.""" from typing import Optional from homeassistant.components.sensor import DEVICE_CLASSES from homeassistant.const import ( LENGTH_KILOMETERS, LENGTH_MILES, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) from homeassistant.helpers.entity import Entity from homeassistant.util.distance import convert from . import DOMAIN as TESLA_DOMAIN, TeslaDevice async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Tesla binary_sensors by config_entry.""" coordinator = hass.data[TESLA_DOMAIN][config_entry.entry_id]["coordinator"] entities = [] for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"]["sensor"]: if device.type == "temperature sensor": entities.append(TeslaSensor(device, coordinator, "inside")) entities.append(TeslaSensor(device, coordinator, "outside")) else: entities.append(TeslaSensor(device, coordinator)) async_add_entities(entities, True) class TeslaSensor(TeslaDevice, Entity): """Representation of Tesla sensors.""" def __init__(self, tesla_device, coordinator, sensor_type=None): """Initialize of the sensor.""" super().__init__(tesla_device, coordinator) self.type = sensor_type if self.type: self._name = f"{super().name} ({self.type})" self._unique_id = f"{super().unique_id}_{self.type}" @property def state(self) -> Optional[float]: """Return the state of the sensor.""" if self.tesla_device.type == "temperature sensor": if self.type == "outside": return self.tesla_device.get_outside_temp() return self.tesla_device.get_inside_temp() if self.tesla_device.type in ["range sensor", "mileage sensor"]: units = self.tesla_device.measurement if units == "LENGTH_MILES": return self.tesla_device.get_value() return round( convert(self.tesla_device.get_value(), LENGTH_MILES, LENGTH_KILOMETERS), 2, ) if self.tesla_device.type == "charging rate sensor": return self.tesla_device.charging_rate return self.tesla_device.get_value() @property def unit_of_measurement(self) -> Optional[str]: """Return the unit_of_measurement of the device.""" units = self.tesla_device.measurement if units == "F": return TEMP_FAHRENHEIT if units == "C": return TEMP_CELSIUS if units == "LENGTH_MILES": return LENGTH_MILES if units == "LENGTH_KILOMETERS": return LENGTH_KILOMETERS return units @property def device_class(self) -> Optional[str]: """Return the device_class of the device.""" return ( self.tesla_device.device_class if self.tesla_device.device_class in DEVICE_CLASSES else None ) @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = self._attributes.copy() if self.tesla_device.type == "charging rate sensor": attr.update( { "time_left": self.tesla_device.time_left, "added_range": self.tesla_device.added_range, "charge_energy_added": self.tesla_device.charge_energy_added, "charge_current_request": self.tesla_device.charge_current_request, "charger_actual_current": self.tesla_device.charger_actual_current, "charger_voltage": self.tesla_device.charger_voltage, } ) return attr
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/tesla/sensor.py
"""Helper functions for the Minecraft Server integration.""" from typing import Any, Dict import aiodns from homeassistant.const import CONF_HOST, CONF_PORT from homeassistant.helpers.typing import HomeAssistantType from .const import SRV_RECORD_PREFIX async def async_check_srv_record(hass: HomeAssistantType, host: str) -> Dict[str, Any]: """Check if the given host is a valid Minecraft SRV record.""" # Check if 'host' is a valid SRV record. return_value = None srv_records = None try: srv_records = await aiodns.DNSResolver().query( host=f"{SRV_RECORD_PREFIX}.{host}", qtype="SRV" ) except (aiodns.error.DNSError): # 'host' is not a SRV record. pass else: # 'host' is a valid SRV record, extract the data. return_value = { CONF_HOST: srv_records[0].host, CONF_PORT: srv_records[0].port, } return return_value
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/minecraft_server/helpers.py
"""Decorators for the Websocket API.""" import asyncio from functools import wraps from typing import Awaitable, Callable from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import Unauthorized from . import const, messages from .connection import ActiveConnection # mypy: allow-untyped-calls, allow-untyped-defs async def _handle_async_response(func, hass, connection, msg): """Create a response and handle exception.""" try: await func(hass, connection, msg) except Exception as err: # pylint: disable=broad-except connection.async_handle_exception(msg, err) def async_response( func: Callable[[HomeAssistant, ActiveConnection, dict], Awaitable[None]] ) -> const.WebSocketCommandHandler: """Decorate an async function to handle WebSocket API messages.""" @callback @wraps(func) def schedule_handler(hass, connection, msg): """Schedule the handler.""" # As the webserver is now started before the start # event we do not want to block for websocket responders asyncio.create_task(_handle_async_response(func, hass, connection, msg)) return schedule_handler def require_admin(func: const.WebSocketCommandHandler) -> const.WebSocketCommandHandler: """Websocket decorator to require user to be an admin.""" @wraps(func) def with_admin(hass, connection, msg): """Check admin and call function.""" user = connection.user if user is None or not user.is_admin: raise Unauthorized() func(hass, connection, msg) return with_admin def ws_require_user( only_owner=False, only_system_user=False, allow_system_user=True, only_active_user=True, only_inactive_user=False, ): """Decorate function validating login user exist in current WS connection. Will write out error message if not authenticated. """ def validator(func): """Decorate func.""" @wraps(func) def check_current_user(hass, connection, msg): """Check current user.""" def output_error(message_id, message): """Output error message.""" connection.send_message( messages.error_message(msg["id"], message_id, message) ) if connection.user is None: output_error("no_user", "Not authenticated as a user") return if only_owner and not connection.user.is_owner: output_error("only_owner", "Only allowed as owner") return if only_system_user and not connection.user.system_generated: output_error("only_system_user", "Only allowed as system user") return if not allow_system_user and connection.user.system_generated: output_error("not_system_user", "Not allowed as system user") return if only_active_user and not connection.user.is_active: output_error("only_active_user", "Only allowed as active user") return if only_inactive_user and connection.user.is_active: output_error("only_inactive_user", "Not allowed as active user") return return func(hass, connection, msg) return check_current_user return validator def websocket_command( schema: dict, ) -> Callable[[const.WebSocketCommandHandler], const.WebSocketCommandHandler]: """Tag a function as a websocket command.""" command = schema["type"] def decorate(func): """Decorate ws command function.""" # pylint: disable=protected-access func._ws_schema = messages.BASE_COMMAND_MESSAGE_SCHEMA.extend(schema) func._ws_command = command return func return decorate
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/websocket_api/decorators.py
"""Support for Transport NSW (AU) to query next leave event.""" from datetime import timedelta from TransportNSW import TransportNSW import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_ATTRIBUTION, ATTR_MODE, CONF_API_KEY, CONF_NAME, TIME_MINUTES, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity ATTR_STOP_ID = "stop_id" ATTR_ROUTE = "route" ATTR_DUE_IN = "due" ATTR_DELAY = "delay" ATTR_REAL_TIME = "real_time" ATTR_DESTINATION = "destination" ATTRIBUTION = "Data provided by Transport NSW" CONF_STOP_ID = "stop_id" CONF_ROUTE = "route" CONF_DESTINATION = "destination" DEFAULT_NAME = "Next Bus" ICONS = { "Train": "mdi:train", "Lightrail": "mdi:tram", "Bus": "mdi:bus", "Coach": "mdi:bus", "Ferry": "mdi:ferry", "Schoolbus": "mdi:bus", "n/a": "mdi:clock", None: "mdi:clock", } SCAN_INTERVAL = timedelta(seconds=60) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_STOP_ID): cv.string, vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_ROUTE, default=""): cv.string, vol.Optional(CONF_DESTINATION, default=""): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Transport NSW sensor.""" stop_id = config[CONF_STOP_ID] api_key = config[CONF_API_KEY] route = config.get(CONF_ROUTE) destination = config.get(CONF_DESTINATION) name = config.get(CONF_NAME) data = PublicTransportData(stop_id, route, destination, api_key) add_entities([TransportNSWSensor(data, stop_id, name)], True) class TransportNSWSensor(Entity): """Implementation of an Transport NSW sensor.""" def __init__(self, data, stop_id, name): """Initialize the sensor.""" self.data = data self._name = name self._stop_id = stop_id self._times = self._state = None self._icon = ICONS[None] @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def device_state_attributes(self): """Return the state attributes.""" if self._times is not None: return { ATTR_DUE_IN: self._times[ATTR_DUE_IN], ATTR_STOP_ID: self._stop_id, ATTR_ROUTE: self._times[ATTR_ROUTE], ATTR_DELAY: self._times[ATTR_DELAY], ATTR_REAL_TIME: self._times[ATTR_REAL_TIME], ATTR_DESTINATION: self._times[ATTR_DESTINATION], ATTR_MODE: self._times[ATTR_MODE], ATTR_ATTRIBUTION: ATTRIBUTION, } @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" return TIME_MINUTES @property def icon(self): """Icon to use in the frontend, if any.""" return self._icon def update(self): """Get the latest data from Transport NSW and update the states.""" self.data.update() self._times = self.data.info self._state = self._times[ATTR_DUE_IN] self._icon = ICONS[self._times[ATTR_MODE]] class PublicTransportData: """The Class for handling the data retrieval.""" def __init__(self, stop_id, route, destination, api_key): """Initialize the data object.""" self._stop_id = stop_id self._route = route self._destination = destination self._api_key = api_key self.info = { ATTR_ROUTE: self._route, ATTR_DUE_IN: "n/a", ATTR_DELAY: "n/a", ATTR_REAL_TIME: "n/a", ATTR_DESTINATION: "n/a", ATTR_MODE: None, } self.tnsw = TransportNSW() def update(self): """Get the next leave time.""" _data = self.tnsw.get_departures( self._stop_id, self._route, self._destination, self._api_key ) self.info = { ATTR_ROUTE: _data["route"], ATTR_DUE_IN: _data["due"], ATTR_DELAY: _data["delay"], ATTR_REAL_TIME: _data["real_time"], ATTR_DESTINATION: _data["destination"], ATTR_MODE: _data["mode"], }
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/transport_nsw/sensor.py
"""Support for EnOcean sensors.""" import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_DEVICE_CLASS, CONF_ID, CONF_NAME, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_POWER, DEVICE_CLASS_TEMPERATURE, PERCENTAGE, POWER_WATT, STATE_CLOSED, STATE_OPEN, TEMP_CELSIUS, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.restore_state import RestoreEntity from .device import EnOceanEntity CONF_MAX_TEMP = "max_temp" CONF_MIN_TEMP = "min_temp" CONF_RANGE_FROM = "range_from" CONF_RANGE_TO = "range_to" DEFAULT_NAME = "EnOcean sensor" SENSOR_TYPE_HUMIDITY = "humidity" SENSOR_TYPE_POWER = "powersensor" SENSOR_TYPE_TEMPERATURE = "temperature" SENSOR_TYPE_WINDOWHANDLE = "windowhandle" SENSOR_TYPES = { SENSOR_TYPE_HUMIDITY: { "name": "Humidity", "unit": PERCENTAGE, "icon": "mdi:water-percent", "class": DEVICE_CLASS_HUMIDITY, }, SENSOR_TYPE_POWER: { "name": "Power", "unit": POWER_WATT, "icon": "mdi:power-plug", "class": DEVICE_CLASS_POWER, }, SENSOR_TYPE_TEMPERATURE: { "name": "Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "class": DEVICE_CLASS_TEMPERATURE, }, SENSOR_TYPE_WINDOWHANDLE: { "name": "WindowHandle", "unit": None, "icon": "mdi:window", "class": None, }, } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_DEVICE_CLASS, default=SENSOR_TYPE_POWER): cv.string, vol.Optional(CONF_MAX_TEMP, default=40): vol.Coerce(int), vol.Optional(CONF_MIN_TEMP, default=0): vol.Coerce(int), vol.Optional(CONF_RANGE_FROM, default=255): cv.positive_int, vol.Optional(CONF_RANGE_TO, default=0): cv.positive_int, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up an EnOcean sensor device.""" dev_id = config.get(CONF_ID) dev_name = config.get(CONF_NAME) sensor_type = config.get(CONF_DEVICE_CLASS) if sensor_type == SENSOR_TYPE_TEMPERATURE: temp_min = config.get(CONF_MIN_TEMP) temp_max = config.get(CONF_MAX_TEMP) range_from = config.get(CONF_RANGE_FROM) range_to = config.get(CONF_RANGE_TO) add_entities( [ EnOceanTemperatureSensor( dev_id, dev_name, temp_min, temp_max, range_from, range_to ) ] ) elif sensor_type == SENSOR_TYPE_HUMIDITY: add_entities([EnOceanHumiditySensor(dev_id, dev_name)]) elif sensor_type == SENSOR_TYPE_POWER: add_entities([EnOceanPowerSensor(dev_id, dev_name)]) elif sensor_type == SENSOR_TYPE_WINDOWHANDLE: add_entities([EnOceanWindowHandle(dev_id, dev_name)]) class EnOceanSensor(EnOceanEntity, RestoreEntity): """Representation of an EnOcean sensor device such as a power meter.""" def __init__(self, dev_id, dev_name, sensor_type): """Initialize the EnOcean sensor device.""" super().__init__(dev_id, dev_name) self._sensor_type = sensor_type self._device_class = SENSOR_TYPES[self._sensor_type]["class"] self._dev_name = f"{SENSOR_TYPES[self._sensor_type]['name']} {dev_name}" self._unit_of_measurement = SENSOR_TYPES[self._sensor_type]["unit"] self._icon = SENSOR_TYPES[self._sensor_type]["icon"] self._state = None @property def name(self): """Return the name of the device.""" return self._dev_name @property def icon(self): """Icon to use in the frontend.""" return self._icon @property def device_class(self): """Return the device class of the sensor.""" return self._device_class @property def state(self): """Return the state of the device.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit_of_measurement async def async_added_to_hass(self): """Call when entity about to be added to hass.""" # If not None, we got an initial value. await super().async_added_to_hass() if self._state is not None: return state = await self.async_get_last_state() if state is not None: self._state = state.state def value_changed(self, packet): """Update the internal state of the sensor.""" class EnOceanPowerSensor(EnOceanSensor): """Representation of an EnOcean power sensor. EEPs (EnOcean Equipment Profiles): - A5-12-01 (Automated Meter Reading, Electricity) """ def __init__(self, dev_id, dev_name): """Initialize the EnOcean power sensor device.""" super().__init__(dev_id, dev_name, SENSOR_TYPE_POWER) def value_changed(self, packet): """Update the internal state of the sensor.""" if packet.rorg != 0xA5: return packet.parse_eep(0x12, 0x01) if packet.parsed["DT"]["raw_value"] == 1: # this packet reports the current value raw_val = packet.parsed["MR"]["raw_value"] divisor = packet.parsed["DIV"]["raw_value"] self._state = raw_val / (10 ** divisor) self.schedule_update_ha_state() class EnOceanTemperatureSensor(EnOceanSensor): """Representation of an EnOcean temperature sensor device. EEPs (EnOcean Equipment Profiles): - A5-02-01 to A5-02-1B All 8 Bit Temperature Sensors of A5-02 - A5-10-01 to A5-10-14 (Room Operating Panels) - A5-04-01 (Temp. and Humidity Sensor, Range 0°C to +40°C and 0% to 100%) - A5-04-02 (Temp. and Humidity Sensor, Range -20°C to +60°C and 0% to 100%) - A5-10-10 (Temp. and Humidity Sensor and Set Point) - A5-10-12 (Temp. and Humidity Sensor, Set Point and Occupancy Control) - 10 Bit Temp. Sensors are not supported (A5-02-20, A5-02-30) For the following EEPs the scales must be set to "0 to 250": - A5-04-01 - A5-04-02 - A5-10-10 to A5-10-14 """ def __init__(self, dev_id, dev_name, scale_min, scale_max, range_from, range_to): """Initialize the EnOcean temperature sensor device.""" super().__init__(dev_id, dev_name, SENSOR_TYPE_TEMPERATURE) self._scale_min = scale_min self._scale_max = scale_max self.range_from = range_from self.range_to = range_to def value_changed(self, packet): """Update the internal state of the sensor.""" if packet.data[0] != 0xA5: return temp_scale = self._scale_max - self._scale_min temp_range = self.range_to - self.range_from raw_val = packet.data[3] temperature = temp_scale / temp_range * (raw_val - self.range_from) temperature += self._scale_min self._state = round(temperature, 1) self.schedule_update_ha_state() class EnOceanHumiditySensor(EnOceanSensor): """Representation of an EnOcean humidity sensor device. EEPs (EnOcean Equipment Profiles): - A5-04-01 (Temp. and Humidity Sensor, Range 0°C to +40°C and 0% to 100%) - A5-04-02 (Temp. and Humidity Sensor, Range -20°C to +60°C and 0% to 100%) - A5-10-10 to A5-10-14 (Room Operating Panels) """ def __init__(self, dev_id, dev_name): """Initialize the EnOcean humidity sensor device.""" super().__init__(dev_id, dev_name, SENSOR_TYPE_HUMIDITY) def value_changed(self, packet): """Update the internal state of the sensor.""" if packet.rorg != 0xA5: return humidity = packet.data[2] * 100 / 250 self._state = round(humidity, 1) self.schedule_update_ha_state() class EnOceanWindowHandle(EnOceanSensor): """Representation of an EnOcean window handle device. EEPs (EnOcean Equipment Profiles): - F6-10-00 (Mechanical handle / Hoppe AG) """ def __init__(self, dev_id, dev_name): """Initialize the EnOcean window handle sensor device.""" super().__init__(dev_id, dev_name, SENSOR_TYPE_WINDOWHANDLE) def value_changed(self, packet): """Update the internal state of the sensor.""" action = (packet.data[1] & 0x70) >> 4 if action == 0x07: self._state = STATE_CLOSED if action in (0x04, 0x06): self._state = STATE_OPEN if action == 0x05: self._state = "tilt" self.schedule_update_ha_state()
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/enocean/sensor.py
"""Provides device triggers for binary sensors.""" import voluptuous as vol from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA from homeassistant.components.device_automation.const import ( CONF_TURNED_OFF, CONF_TURNED_ON, ) from homeassistant.components.homeassistant.triggers import state as state_trigger from homeassistant.const import ATTR_DEVICE_CLASS, CONF_ENTITY_ID, CONF_FOR, CONF_TYPE from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity_registry import async_entries_for_device from . import ( DEVICE_CLASS_BATTERY, DEVICE_CLASS_BATTERY_CHARGING, DEVICE_CLASS_COLD, DEVICE_CLASS_CONNECTIVITY, DEVICE_CLASS_DOOR, DEVICE_CLASS_GARAGE_DOOR, DEVICE_CLASS_GAS, DEVICE_CLASS_HEAT, DEVICE_CLASS_LIGHT, DEVICE_CLASS_LOCK, DEVICE_CLASS_MOISTURE, DEVICE_CLASS_MOTION, DEVICE_CLASS_MOVING, DEVICE_CLASS_OCCUPANCY, DEVICE_CLASS_OPENING, DEVICE_CLASS_PLUG, DEVICE_CLASS_POWER, DEVICE_CLASS_PRESENCE, DEVICE_CLASS_PROBLEM, DEVICE_CLASS_SAFETY, DEVICE_CLASS_SMOKE, DEVICE_CLASS_SOUND, DEVICE_CLASS_VIBRATION, DEVICE_CLASS_WINDOW, DOMAIN, ) # mypy: allow-untyped-defs, no-check-untyped-defs DEVICE_CLASS_NONE = "none" CONF_BAT_LOW = "bat_low" CONF_NOT_BAT_LOW = "not_bat_low" CONF_CHARGING = "charging" CONF_NOT_CHARGING = "not_charging" CONF_COLD = "cold" CONF_NOT_COLD = "not_cold" CONF_CONNECTED = "connected" CONF_NOT_CONNECTED = "not_connected" CONF_GAS = "gas" CONF_NO_GAS = "no_gas" CONF_HOT = "hot" CONF_NOT_HOT = "not_hot" CONF_LIGHT = "light" CONF_NO_LIGHT = "no_light" CONF_LOCKED = "locked" CONF_NOT_LOCKED = "not_locked" CONF_MOIST = "moist" CONF_NOT_MOIST = "not_moist" CONF_MOTION = "motion" CONF_NO_MOTION = "no_motion" CONF_MOVING = "moving" CONF_NOT_MOVING = "not_moving" CONF_OCCUPIED = "occupied" CONF_NOT_OCCUPIED = "not_occupied" CONF_PLUGGED_IN = "plugged_in" CONF_NOT_PLUGGED_IN = "not_plugged_in" CONF_POWERED = "powered" CONF_NOT_POWERED = "not_powered" CONF_PRESENT = "present" CONF_NOT_PRESENT = "not_present" CONF_PROBLEM = "problem" CONF_NO_PROBLEM = "no_problem" CONF_UNSAFE = "unsafe" CONF_NOT_UNSAFE = "not_unsafe" CONF_SMOKE = "smoke" CONF_NO_SMOKE = "no_smoke" CONF_SOUND = "sound" CONF_NO_SOUND = "no_sound" CONF_VIBRATION = "vibration" CONF_NO_VIBRATION = "no_vibration" CONF_OPENED = "opened" CONF_NOT_OPENED = "not_opened" TURNED_ON = [ CONF_BAT_LOW, CONF_COLD, CONF_CONNECTED, CONF_GAS, CONF_HOT, CONF_LIGHT, CONF_NOT_LOCKED, CONF_MOIST, CONF_MOTION, CONF_MOVING, CONF_OCCUPIED, CONF_OPENED, CONF_PLUGGED_IN, CONF_POWERED, CONF_PRESENT, CONF_PROBLEM, CONF_SMOKE, CONF_SOUND, CONF_UNSAFE, CONF_VIBRATION, CONF_TURNED_ON, ] TURNED_OFF = [ CONF_NOT_BAT_LOW, CONF_NOT_COLD, CONF_NOT_CONNECTED, CONF_NOT_HOT, CONF_LOCKED, CONF_NOT_MOIST, CONF_NOT_MOVING, CONF_NOT_OCCUPIED, CONF_NOT_OPENED, CONF_NOT_PLUGGED_IN, CONF_NOT_POWERED, CONF_NOT_PRESENT, CONF_NOT_UNSAFE, CONF_NO_GAS, CONF_NO_LIGHT, CONF_NO_MOTION, CONF_NO_PROBLEM, CONF_NO_SMOKE, CONF_NO_SOUND, CONF_NO_VIBRATION, CONF_TURNED_OFF, ] ENTITY_TRIGGERS = { DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_BAT_LOW}, {CONF_TYPE: CONF_NOT_BAT_LOW}], DEVICE_CLASS_BATTERY_CHARGING: [ {CONF_TYPE: CONF_CHARGING}, {CONF_TYPE: CONF_NOT_CHARGING}, ], DEVICE_CLASS_COLD: [{CONF_TYPE: CONF_COLD}, {CONF_TYPE: CONF_NOT_COLD}], DEVICE_CLASS_CONNECTIVITY: [ {CONF_TYPE: CONF_CONNECTED}, {CONF_TYPE: CONF_NOT_CONNECTED}, ], DEVICE_CLASS_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}], DEVICE_CLASS_GARAGE_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}], DEVICE_CLASS_GAS: [{CONF_TYPE: CONF_GAS}, {CONF_TYPE: CONF_NO_GAS}], DEVICE_CLASS_HEAT: [{CONF_TYPE: CONF_HOT}, {CONF_TYPE: CONF_NOT_HOT}], DEVICE_CLASS_LIGHT: [{CONF_TYPE: CONF_LIGHT}, {CONF_TYPE: CONF_NO_LIGHT}], DEVICE_CLASS_LOCK: [{CONF_TYPE: CONF_LOCKED}, {CONF_TYPE: CONF_NOT_LOCKED}], DEVICE_CLASS_MOISTURE: [{CONF_TYPE: CONF_MOIST}, {CONF_TYPE: CONF_NOT_MOIST}], DEVICE_CLASS_MOTION: [{CONF_TYPE: CONF_MOTION}, {CONF_TYPE: CONF_NO_MOTION}], DEVICE_CLASS_MOVING: [{CONF_TYPE: CONF_MOVING}, {CONF_TYPE: CONF_NOT_MOVING}], DEVICE_CLASS_OCCUPANCY: [ {CONF_TYPE: CONF_OCCUPIED}, {CONF_TYPE: CONF_NOT_OCCUPIED}, ], DEVICE_CLASS_OPENING: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}], DEVICE_CLASS_PLUG: [{CONF_TYPE: CONF_PLUGGED_IN}, {CONF_TYPE: CONF_NOT_PLUGGED_IN}], DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_POWERED}, {CONF_TYPE: CONF_NOT_POWERED}], DEVICE_CLASS_PRESENCE: [{CONF_TYPE: CONF_PRESENT}, {CONF_TYPE: CONF_NOT_PRESENT}], DEVICE_CLASS_PROBLEM: [{CONF_TYPE: CONF_PROBLEM}, {CONF_TYPE: CONF_NO_PROBLEM}], DEVICE_CLASS_SAFETY: [{CONF_TYPE: CONF_UNSAFE}, {CONF_TYPE: CONF_NOT_UNSAFE}], DEVICE_CLASS_SMOKE: [{CONF_TYPE: CONF_SMOKE}, {CONF_TYPE: CONF_NO_SMOKE}], DEVICE_CLASS_SOUND: [{CONF_TYPE: CONF_SOUND}, {CONF_TYPE: CONF_NO_SOUND}], DEVICE_CLASS_VIBRATION: [ {CONF_TYPE: CONF_VIBRATION}, {CONF_TYPE: CONF_NO_VIBRATION}, ], DEVICE_CLASS_WINDOW: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}], DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_TURNED_ON}, {CONF_TYPE: CONF_TURNED_OFF}], } TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TYPE): vol.In(TURNED_OFF + TURNED_ON), vol.Optional(CONF_FOR): cv.positive_time_period_dict, } ) async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" trigger_type = config[CONF_TYPE] if trigger_type in TURNED_ON: from_state = "off" to_state = "on" else: from_state = "on" to_state = "off" state_config = { state_trigger.CONF_PLATFORM: "state", state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID], state_trigger.CONF_FROM: from_state, state_trigger.CONF_TO: to_state, } if CONF_FOR in config: state_config[CONF_FOR] = config[CONF_FOR] state_config = state_trigger.TRIGGER_SCHEMA(state_config) return await state_trigger.async_attach_trigger( hass, state_config, action, automation_info, platform_type="device" ) async def async_get_triggers(hass, device_id): """List device triggers.""" triggers = [] entity_registry = await hass.helpers.entity_registry.async_get_registry() entries = [ entry for entry in async_entries_for_device(entity_registry, device_id) if entry.domain == DOMAIN ] for entry in entries: device_class = DEVICE_CLASS_NONE state = hass.states.get(entry.entity_id) if state: device_class = state.attributes.get(ATTR_DEVICE_CLASS) templates = ENTITY_TRIGGERS.get( device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE] ) triggers.extend( { **automation, "platform": "device", "device_id": device_id, "entity_id": entry.entity_id, "domain": DOMAIN, } for automation in templates ) return triggers async def async_get_trigger_capabilities(hass, config): """List trigger capabilities.""" return { "extra_fields": vol.Schema( {vol.Optional(CONF_FOR): cv.positive_time_period_dict} ) }
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/binary_sensor/device_trigger.py
"""Sensor platform for the PoolSense sensor.""" from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_EMAIL, DEVICE_CLASS_BATTERY, DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_TIMESTAMP, PERCENTAGE, TEMP_CELSIUS, ) from homeassistant.helpers.entity import Entity from . import PoolSenseEntity from .const import ATTRIBUTION, DOMAIN SENSORS = { "Chlorine": { "unit": "mV", "icon": "mdi:pool", "name": "Chlorine", "device_class": None, }, "pH": {"unit": None, "icon": "mdi:pool", "name": "pH", "device_class": None}, "Battery": { "unit": PERCENTAGE, "icon": None, "name": "Battery", "device_class": DEVICE_CLASS_BATTERY, }, "Water Temp": { "unit": TEMP_CELSIUS, "icon": "mdi:coolant-temperature", "name": "Temperature", "device_class": DEVICE_CLASS_TEMPERATURE, }, "Last Seen": { "unit": None, "icon": "mdi:clock", "name": "Last Seen", "device_class": DEVICE_CLASS_TIMESTAMP, }, "Chlorine High": { "unit": "mV", "icon": "mdi:pool", "name": "Chlorine High", "device_class": None, }, "Chlorine Low": { "unit": "mV", "icon": "mdi:pool", "name": "Chlorine Low", "device_class": None, }, "pH High": { "unit": None, "icon": "mdi:pool", "name": "pH High", "device_class": None, }, "pH Low": { "unit": None, "icon": "mdi:pool", "name": "pH Low", "device_class": None, }, } async def async_setup_entry(hass, config_entry, async_add_entities): """Defer sensor setup to the shared sensor module.""" coordinator = hass.data[DOMAIN][config_entry.entry_id] sensors_list = [] for sensor in SENSORS: sensors_list.append( PoolSenseSensor(coordinator, config_entry.data[CONF_EMAIL], sensor) ) async_add_entities(sensors_list, False) class PoolSenseSensor(PoolSenseEntity, Entity): """Sensor representing poolsense data.""" @property def name(self): """Return the name of the particular component.""" return f"PoolSense {SENSORS[self.info_type]['name']}" @property def state(self): """State of the sensor.""" return self.coordinator.data[self.info_type] @property def device_class(self): """Return the device class.""" return SENSORS[self.info_type]["device_class"] @property def icon(self): """Return the icon.""" return SENSORS[self.info_type]["icon"] @property def unit_of_measurement(self): """Return unit of measurement.""" return SENSORS[self.info_type]["unit"] @property def device_state_attributes(self): """Return device attributes.""" return {ATTR_ATTRIBUTION: ATTRIBUTION}
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/poolsense/sensor.py
"""Support for Freebox devices (Freebox v6 and Freebox mini 4K).""" from typing import Dict from homeassistant.config_entries import ConfigEntry from homeassistant.const import DATA_RATE_KILOBYTES_PER_SECOND from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity import Entity from homeassistant.helpers.typing import HomeAssistantType import homeassistant.util.dt as dt_util from .const import ( CALL_SENSORS, CONNECTION_SENSORS, DOMAIN, SENSOR_DEVICE_CLASS, SENSOR_ICON, SENSOR_NAME, SENSOR_UNIT, TEMPERATURE_SENSOR_TEMPLATE, ) from .router import FreeboxRouter async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up the sensors.""" router = hass.data[DOMAIN][entry.unique_id] entities = [] for sensor_name in router.sensors_temperature: entities.append( FreeboxSensor( router, sensor_name, {**TEMPERATURE_SENSOR_TEMPLATE, SENSOR_NAME: f"Freebox {sensor_name}"}, ) ) for sensor_key in CONNECTION_SENSORS: entities.append( FreeboxSensor(router, sensor_key, CONNECTION_SENSORS[sensor_key]) ) for sensor_key in CALL_SENSORS: entities.append(FreeboxCallSensor(router, sensor_key, CALL_SENSORS[sensor_key])) async_add_entities(entities, True) class FreeboxSensor(Entity): """Representation of a Freebox sensor.""" def __init__( self, router: FreeboxRouter, sensor_type: str, sensor: Dict[str, any] ) -> None: """Initialize a Freebox sensor.""" self._state = None self._router = router self._sensor_type = sensor_type self._name = sensor[SENSOR_NAME] self._unit = sensor[SENSOR_UNIT] self._icon = sensor[SENSOR_ICON] self._device_class = sensor[SENSOR_DEVICE_CLASS] self._unique_id = f"{self._router.mac} {self._name}" @callback def async_update_state(self) -> None: """Update the Freebox sensor.""" state = self._router.sensors[self._sensor_type] if self._unit == DATA_RATE_KILOBYTES_PER_SECOND: self._state = round(state / 1000, 2) else: self._state = state @property def unique_id(self) -> str: """Return a unique ID.""" return self._unique_id @property def name(self) -> str: """Return the name.""" return self._name @property def state(self) -> str: """Return the state.""" return self._state @property def unit_of_measurement(self) -> str: """Return the unit.""" return self._unit @property def icon(self) -> str: """Return the icon.""" return self._icon @property def device_class(self) -> str: """Return the device_class.""" return self._device_class @property def device_info(self) -> Dict[str, any]: """Return the device information.""" return self._router.device_info @property def should_poll(self) -> bool: """No polling needed.""" return False @callback def async_on_demand_update(self): """Update state.""" self.async_update_state() self.async_write_ha_state() async def async_added_to_hass(self): """Register state update callback.""" self.async_update_state() self.async_on_remove( async_dispatcher_connect( self.hass, self._router.signal_sensor_update, self.async_on_demand_update, ) ) class FreeboxCallSensor(FreeboxSensor): """Representation of a Freebox call sensor.""" def __init__( self, router: FreeboxRouter, sensor_type: str, sensor: Dict[str, any] ) -> None: """Initialize a Freebox call sensor.""" self._call_list_for_type = [] super().__init__(router, sensor_type, sensor) @callback def async_update_state(self) -> None: """Update the Freebox call sensor.""" self._call_list_for_type = [] if self._router.call_list: for call in self._router.call_list: if not call["new"]: continue if call["type"] == self._sensor_type: self._call_list_for_type.append(call) self._state = len(self._call_list_for_type) @property def device_state_attributes(self) -> Dict[str, any]: """Return device specific state attributes.""" return { dt_util.utc_from_timestamp(call["datetime"]).isoformat(): call["name"] for call in self._call_list_for_type }
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/freebox/sensor.py
"""Support for Qwikswitch Sensors.""" import logging from pyqwikswitch.qwikswitch import SENSORS from homeassistant.core import callback from . import DOMAIN as QWIKSWITCH, QSEntity _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, _, add_entities, discovery_info=None): """Add sensor from the main Qwikswitch component.""" if discovery_info is None: return qsusb = hass.data[QWIKSWITCH] _LOGGER.debug("Setup qwikswitch.sensor %s, %s", qsusb, discovery_info) devs = [QSSensor(sensor) for sensor in discovery_info[QWIKSWITCH]] add_entities(devs) class QSSensor(QSEntity): """Sensor based on a Qwikswitch relay/dimmer module.""" _val = None def __init__(self, sensor): """Initialize the sensor.""" super().__init__(sensor["id"], sensor["name"]) self.channel = sensor["channel"] sensor_type = sensor["type"] self._decode, self.unit = SENSORS[sensor_type] # this cannot happen because it only happens in bool and this should be redirected to binary_sensor assert not isinstance( self.unit, type ), f"boolean sensor id={sensor['id']} name={sensor['name']}" @callback def update_packet(self, packet): """Receive update packet from QSUSB.""" val = self._decode(packet, channel=self.channel) _LOGGER.debug( "Update %s (%s:%s) decoded as %s: %s", self.entity_id, self.qsid, self.channel, val, packet, ) if val is not None: self._val = val self.async_write_ha_state() @property def state(self): """Return the value of the sensor.""" return str(self._val) @property def unique_id(self): """Return a unique identifier for this sensor.""" return f"qs{self.qsid}:{self.channel}" @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self.unit
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/qwikswitch/sensor.py
"""The flunearyou component.""" import asyncio from datetime import timedelta from pyflunearyou import Client from pyflunearyou.errors import FluNearYouError from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE from homeassistant.core import callback from homeassistant.helpers import aiohttp_client, config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.event import async_track_time_interval from .const import ( CATEGORY_CDC_REPORT, CATEGORY_USER_REPORT, DATA_CLIENT, DOMAIN, LOGGER, SENSORS, TOPIC_UPDATE, ) DATA_LISTENER = "listener" DEFAULT_SCAN_INTERVAL = timedelta(minutes=30) CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.119") @callback def async_get_api_category(sensor_type): """Get the category that a particular sensor type belongs to.""" try: return next( ( category for category, sensors in SENSORS.items() for sensor in sensors if sensor[0] == sensor_type ) ) except StopIteration as err: raise ValueError(f"Can't find category sensor type: {sensor_type}") from err async def async_setup(hass, config): """Set up the Flu Near You component.""" hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}} return True async def async_setup_entry(hass, config_entry): """Set up Flu Near You as config entry.""" websession = aiohttp_client.async_get_clientsession(hass) fny = FluNearYouData( hass, Client(websession), config_entry.data.get(CONF_LATITUDE, hass.config.latitude), config_entry.data.get(CONF_LONGITUDE, hass.config.longitude), ) await fny.async_update() hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = fny hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, "sensor") ) async def refresh(event_time): """Refresh data from Flu Near You.""" await fny.async_update() hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id] = async_track_time_interval( hass, refresh, DEFAULT_SCAN_INTERVAL ) return True async def async_unload_entry(hass, config_entry): """Unload an Flu Near You config entry.""" hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id) remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id) remove_listener() await hass.config_entries.async_forward_entry_unload(config_entry, "sensor") return True class FluNearYouData: """Define a data object to retrieve info from Flu Near You.""" def __init__(self, hass, client, latitude, longitude): """Initialize.""" self._async_cancel_time_interval_listener = None self._client = client self._hass = hass self.data = {} self.latitude = latitude self.longitude = longitude self._api_category_count = { CATEGORY_CDC_REPORT: 0, CATEGORY_USER_REPORT: 0, } self._api_category_locks = { CATEGORY_CDC_REPORT: asyncio.Lock(), CATEGORY_USER_REPORT: asyncio.Lock(), } async def _async_get_data_from_api(self, api_category): """Update and save data for a particular API category.""" if self._api_category_count[api_category] == 0: return if api_category == CATEGORY_CDC_REPORT: api_coro = self._client.cdc_reports.status_by_coordinates( self.latitude, self.longitude ) else: api_coro = self._client.user_reports.status_by_coordinates( self.latitude, self.longitude ) try: self.data[api_category] = await api_coro except FluNearYouError as err: LOGGER.error("Unable to get %s data: %s", api_category, err) self.data[api_category] = None async def _async_update_listener_action(self, now): """Define an async_track_time_interval action to update data.""" await self.async_update() @callback def async_deregister_api_interest(self, sensor_type): """Decrement the number of entities with data needs from an API category.""" # If this deregistration should leave us with no registration at all, remove the # time interval: if sum(self._api_category_count.values()) == 0: if self._async_cancel_time_interval_listener: self._async_cancel_time_interval_listener() self._async_cancel_time_interval_listener = None return api_category = async_get_api_category(sensor_type) self._api_category_count[api_category] -= 1 async def async_register_api_interest(self, sensor_type): """Increment the number of entities with data needs from an API category.""" # If this is the first registration we have, start a time interval: if not self._async_cancel_time_interval_listener: self._async_cancel_time_interval_listener = async_track_time_interval( self._hass, self._async_update_listener_action, DEFAULT_SCAN_INTERVAL, ) api_category = async_get_api_category(sensor_type) self._api_category_count[api_category] += 1 # If a sensor registers interest in a particular API call and the data doesn't # exist for it yet, make the API call and grab the data: async with self._api_category_locks[api_category]: if api_category not in self.data: await self._async_get_data_from_api(api_category) async def async_update(self): """Update Flu Near You data.""" tasks = [ self._async_get_data_from_api(api_category) for api_category in self._api_category_count ] await asyncio.gather(*tasks) LOGGER.debug("Received new data") async_dispatcher_send(self._hass, TOPIC_UPDATE)
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/flunearyou/__init__.py
"""Config flow to configure Life360 integration.""" from collections import OrderedDict import logging from life360 import Life360Error, LoginError import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from .const import CONF_AUTHORIZATION, DOMAIN from .helpers import get_api _LOGGER = logging.getLogger(__name__) DOCS_URL = "https://www.home-assistant.io/integrations/life360" @config_entries.HANDLERS.register(DOMAIN) class Life360ConfigFlow(config_entries.ConfigFlow): """Life360 integration config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL def __init__(self): """Initialize.""" self._api = get_api() self._username = vol.UNDEFINED self._password = vol.UNDEFINED @property def configured_usernames(self): """Return tuple of configured usernames.""" entries = self.hass.config_entries.async_entries(DOMAIN) if entries: return (entry.data[CONF_USERNAME] for entry in entries) return () async def async_step_user(self, user_input=None): """Handle a user initiated config flow.""" errors = {} if user_input is not None: self._username = user_input[CONF_USERNAME] self._password = user_input[CONF_PASSWORD] try: # pylint: disable=no-value-for-parameter vol.Email()(self._username) authorization = await self.hass.async_add_executor_job( self._api.get_authorization, self._username, self._password ) except vol.Invalid: errors[CONF_USERNAME] = "invalid_username" except LoginError: errors["base"] = "invalid_auth" except Life360Error as error: _LOGGER.error( "Unexpected error communicating with Life360 server: %s", error ) errors["base"] = "unknown" else: if self._username in self.configured_usernames: errors["base"] = "already_configured" else: return self.async_create_entry( title=self._username, data={ CONF_USERNAME: self._username, CONF_PASSWORD: self._password, CONF_AUTHORIZATION: authorization, }, description_placeholders={"docs_url": DOCS_URL}, ) data_schema = OrderedDict() data_schema[vol.Required(CONF_USERNAME, default=self._username)] = str data_schema[vol.Required(CONF_PASSWORD, default=self._password)] = str return self.async_show_form( step_id="user", data_schema=vol.Schema(data_schema), errors=errors, description_placeholders={"docs_url": DOCS_URL}, ) async def async_step_import(self, user_input): """Import a config flow from configuration.""" username = user_input[CONF_USERNAME] password = user_input[CONF_PASSWORD] try: authorization = await self.hass.async_add_executor_job( self._api.get_authorization, username, password ) except LoginError: _LOGGER.error("Invalid credentials for %s", username) return self.async_abort(reason="invalid_auth") except Life360Error as error: _LOGGER.error( "Unexpected error communicating with Life360 server: %s", error ) return self.async_abort(reason="unknown") return self.async_create_entry( title=f"{username} (from configuration)", data={ CONF_USERNAME: username, CONF_PASSWORD: password, CONF_AUTHORIZATION: authorization, }, )
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/life360/config_flow.py
"""Reads vehicle status from StarLine API.""" from homeassistant.components.sensor import DEVICE_CLASS_TEMPERATURE from homeassistant.const import PERCENTAGE, TEMP_CELSIUS, VOLT from homeassistant.helpers.entity import Entity from homeassistant.helpers.icon import icon_for_battery_level, icon_for_signal_level from .account import StarlineAccount, StarlineDevice from .const import DOMAIN from .entity import StarlineEntity SENSOR_TYPES = { "battery": ["Battery", None, VOLT, None], "balance": ["Balance", None, None, "mdi:cash-multiple"], "ctemp": ["Interior Temperature", DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, None], "etemp": ["Engine Temperature", DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, None], "gsm_lvl": ["GSM Signal", None, PERCENTAGE, None], } async def async_setup_entry(hass, entry, async_add_entities): """Set up the StarLine sensors.""" account: StarlineAccount = hass.data[DOMAIN][entry.entry_id] entities = [] for device in account.api.devices.values(): for key, value in SENSOR_TYPES.items(): sensor = StarlineSensor(account, device, key, *value) if sensor.state is not None: entities.append(sensor) async_add_entities(entities) class StarlineSensor(StarlineEntity, Entity): """Representation of a StarLine sensor.""" def __init__( self, account: StarlineAccount, device: StarlineDevice, key: str, name: str, device_class: str, unit: str, icon: str, ): """Initialize StarLine sensor.""" super().__init__(account, device, key, name) self._device_class = device_class self._unit = unit self._icon = icon @property def icon(self): """Icon to use in the frontend, if any.""" if self._key == "battery": return icon_for_battery_level( battery_level=self._device.battery_level_percent, charging=self._device.car_state.get("ign", False), ) if self._key == "gsm_lvl": return icon_for_signal_level(signal_level=self._device.gsm_level_percent) return self._icon @property def state(self): """Return the state of the sensor.""" if self._key == "battery": return self._device.battery_level if self._key == "balance": return self._device.balance.get("value") if self._key == "ctemp": return self._device.temp_inner if self._key == "etemp": return self._device.temp_engine if self._key == "gsm_lvl": return self._device.gsm_level_percent return None @property def unit_of_measurement(self): """Get the unit of measurement.""" if self._key == "balance": return self._device.balance.get("currency") or "₽" return self._unit @property def device_class(self): """Return the class of the sensor.""" return self._device_class @property def device_state_attributes(self): """Return the state attributes of the sensor.""" if self._key == "balance": return self._account.balance_attrs(self._device) if self._key == "gsm_lvl": return self._account.gsm_attrs(self._device) return None
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/starline/sensor.py
"""Support for XBee Zigbee switches.""" import voluptuous as vol from homeassistant.components.switch import SwitchEntity from . import DOMAIN, PLATFORM_SCHEMA, XBeeDigitalOut, XBeeDigitalOutConfig CONF_ON_STATE = "on_state" DEFAULT_ON_STATE = "high" STATES = ["high", "low"] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_ON_STATE): vol.In(STATES)}) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the XBee Zigbee switch platform.""" zigbee_device = hass.data[DOMAIN] add_entities([XBeeSwitch(XBeeDigitalOutConfig(config), zigbee_device)]) class XBeeSwitch(XBeeDigitalOut, SwitchEntity): """Representation of a XBee Zigbee Digital Out device."""
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/xbee/switch.py
"""Support for Fibaro locks.""" from homeassistant.components.lock import DOMAIN, LockEntity from . import FIBARO_DEVICES, FibaroDevice def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Fibaro locks.""" if discovery_info is None: return add_entities( [FibaroLock(device) for device in hass.data[FIBARO_DEVICES]["lock"]], True ) class FibaroLock(FibaroDevice, LockEntity): """Representation of a Fibaro Lock.""" def __init__(self, fibaro_device): """Initialize the Fibaro device.""" self._state = False super().__init__(fibaro_device) self.entity_id = f"{DOMAIN}.{self.ha_id}" def lock(self, **kwargs): """Lock the device.""" self.action("secure") self._state = True def unlock(self, **kwargs): """Unlock the device.""" self.action("unsecure") self._state = False @property def is_locked(self): """Return true if device is locked.""" return self._state def update(self): """Update device state.""" self._state = self.current_binary_state
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/fibaro/lock.py
"""Support for MelCloud device sensors.""" from pymelcloud import DEVICE_TYPE_ATA, DEVICE_TYPE_ATW from pymelcloud.atw_device import Zone from homeassistant.const import ( DEVICE_CLASS_TEMPERATURE, ENERGY_KILO_WATT_HOUR, TEMP_CELSIUS, ) from homeassistant.helpers.entity import Entity from . import MelCloudDevice from .const import DOMAIN ATTR_MEASUREMENT_NAME = "measurement_name" ATTR_ICON = "icon" ATTR_UNIT = "unit" ATTR_DEVICE_CLASS = "device_class" ATTR_VALUE_FN = "value_fn" ATTR_ENABLED_FN = "enabled" ATA_SENSORS = { "room_temperature": { ATTR_MEASUREMENT_NAME: "Room Temperature", ATTR_ICON: "mdi:thermometer", ATTR_UNIT: TEMP_CELSIUS, ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_VALUE_FN: lambda x: x.device.room_temperature, ATTR_ENABLED_FN: lambda x: True, }, "energy": { ATTR_MEASUREMENT_NAME: "Energy", ATTR_ICON: "mdi:factory", ATTR_UNIT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: None, ATTR_VALUE_FN: lambda x: x.device.total_energy_consumed, ATTR_ENABLED_FN: lambda x: x.device.has_energy_consumed_meter, }, } ATW_SENSORS = { "outside_temperature": { ATTR_MEASUREMENT_NAME: "Outside Temperature", ATTR_ICON: "mdi:thermometer", ATTR_UNIT: TEMP_CELSIUS, ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_VALUE_FN: lambda x: x.device.outside_temperature, ATTR_ENABLED_FN: lambda x: True, }, "tank_temperature": { ATTR_MEASUREMENT_NAME: "Tank Temperature", ATTR_ICON: "mdi:thermometer", ATTR_UNIT: TEMP_CELSIUS, ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_VALUE_FN: lambda x: x.device.tank_temperature, ATTR_ENABLED_FN: lambda x: True, }, } ATW_ZONE_SENSORS = { "room_temperature": { ATTR_MEASUREMENT_NAME: "Room Temperature", ATTR_ICON: "mdi:thermometer", ATTR_UNIT: TEMP_CELSIUS, ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_VALUE_FN: lambda zone: zone.room_temperature, ATTR_ENABLED_FN: lambda x: True, }, "flow_temperature": { ATTR_MEASUREMENT_NAME: "Flow Temperature", ATTR_ICON: "mdi:thermometer", ATTR_UNIT: TEMP_CELSIUS, ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_VALUE_FN: lambda zone: zone.flow_temperature, ATTR_ENABLED_FN: lambda x: True, }, "return_temperature": { ATTR_MEASUREMENT_NAME: "Flow Return Temperature", ATTR_ICON: "mdi:thermometer", ATTR_UNIT: TEMP_CELSIUS, ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_VALUE_FN: lambda zone: zone.return_temperature, ATTR_ENABLED_FN: lambda x: True, }, } async def async_setup_entry(hass, entry, async_add_entities): """Set up MELCloud device sensors based on config_entry.""" mel_devices = hass.data[DOMAIN].get(entry.entry_id) async_add_entities( [ MelDeviceSensor(mel_device, measurement, definition) for measurement, definition in ATA_SENSORS.items() for mel_device in mel_devices[DEVICE_TYPE_ATA] if definition[ATTR_ENABLED_FN](mel_device) ] + [ MelDeviceSensor(mel_device, measurement, definition) for measurement, definition in ATW_SENSORS.items() for mel_device in mel_devices[DEVICE_TYPE_ATW] if definition[ATTR_ENABLED_FN](mel_device) ] + [ AtwZoneSensor(mel_device, zone, measurement, definition) for mel_device in mel_devices[DEVICE_TYPE_ATW] for zone in mel_device.device.zones for measurement, definition, in ATW_ZONE_SENSORS.items() if definition[ATTR_ENABLED_FN](zone) ], True, ) class MelDeviceSensor(Entity): """Representation of a Sensor.""" def __init__(self, api: MelCloudDevice, measurement, definition): """Initialize the sensor.""" self._api = api self._name_slug = api.name self._measurement = measurement self._def = definition @property def unique_id(self): """Return a unique ID.""" return f"{self._api.device.serial}-{self._api.device.mac}-{self._measurement}" @property def icon(self): """Return the icon to use in the frontend, if any.""" return self._def[ATTR_ICON] @property def name(self): """Return the name of the sensor.""" return f"{self._name_slug} {self._def[ATTR_MEASUREMENT_NAME]}" @property def state(self): """Return the state of the sensor.""" return self._def[ATTR_VALUE_FN](self._api) @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._def[ATTR_UNIT] @property def device_class(self): """Return device class.""" return self._def[ATTR_DEVICE_CLASS] async def async_update(self): """Retrieve latest state.""" await self._api.async_update() @property def device_info(self): """Return a device description for device registry.""" return self._api.device_info class AtwZoneSensor(MelDeviceSensor): """Air-to-Air device sensor.""" def __init__(self, api: MelCloudDevice, zone: Zone, measurement, definition): """Initialize the sensor.""" super().__init__(api, measurement, definition) self._zone = zone self._name_slug = f"{api.name} {zone.name}" @property def state(self): """Return zone based state.""" return self._def[ATTR_VALUE_FN](self._zone)
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/melcloud/sensor.py
"""Provide methods to bootstrap a Home Assistant instance.""" import asyncio import contextlib from datetime import datetime import logging import logging.handlers import os import sys import threading from time import monotonic from typing import TYPE_CHECKING, Any, Dict, Optional, Set import voluptuous as vol import yarl from homeassistant import config as conf_util, config_entries, core, loader from homeassistant.components import http from homeassistant.const import ( EVENT_HOMEASSISTANT_STOP, REQUIRED_NEXT_PYTHON_DATE, REQUIRED_NEXT_PYTHON_VER, ) from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.typing import ConfigType from homeassistant.setup import ( DATA_SETUP, DATA_SETUP_STARTED, async_set_domains_to_be_loaded, async_setup_component, ) from homeassistant.util.logging import async_activate_log_queue_handler from homeassistant.util.package import async_get_user_site, is_virtual_env from homeassistant.util.yaml import clear_secret_cache if TYPE_CHECKING: from .runner import RuntimeConfig _LOGGER = logging.getLogger(__name__) ERROR_LOG_FILENAME = "home-assistant.log" # hass.data key for logging information. DATA_LOGGING = "logging" LOG_SLOW_STARTUP_INTERVAL = 60 STAGE_1_TIMEOUT = 120 STAGE_2_TIMEOUT = 300 WRAP_UP_TIMEOUT = 300 COOLDOWN_TIME = 60 DEBUGGER_INTEGRATIONS = {"debugpy", "ptvsd"} CORE_INTEGRATIONS = ("homeassistant", "persistent_notification") LOGGING_INTEGRATIONS = { # Set log levels "logger", # Error logging "system_log", "sentry", # To record data "recorder", } STAGE_1_INTEGRATIONS = { # To make sure we forward data to other instances "mqtt_eventstream", # To provide account link implementations "cloud", # Ensure supervisor is available "hassio", # Get the frontend up and running as soon # as possible so problem integrations can # be removed "frontend", } async def async_setup_hass( runtime_config: "RuntimeConfig", ) -> Optional[core.HomeAssistant]: """Set up Home Assistant.""" hass = core.HomeAssistant() hass.config.config_dir = runtime_config.config_dir async_enable_logging( hass, runtime_config.verbose, runtime_config.log_rotate_days, runtime_config.log_file, runtime_config.log_no_color, ) hass.config.skip_pip = runtime_config.skip_pip if runtime_config.skip_pip: _LOGGER.warning( "Skipping pip installation of required modules. This may cause issues" ) if not await conf_util.async_ensure_config_exists(hass): _LOGGER.error("Error getting configuration path") return None _LOGGER.info("Config directory: %s", runtime_config.config_dir) config_dict = None basic_setup_success = False safe_mode = runtime_config.safe_mode if not safe_mode: await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass) try: config_dict = await conf_util.async_hass_config_yaml(hass) except HomeAssistantError as err: _LOGGER.error( "Failed to parse configuration.yaml: %s. Activating safe mode", err, ) else: if not is_virtual_env(): await async_mount_local_lib_path(runtime_config.config_dir) basic_setup_success = ( await async_from_config_dict(config_dict, hass) is not None ) finally: clear_secret_cache() if config_dict is None: safe_mode = True elif not basic_setup_success: _LOGGER.warning("Unable to set up core integrations. Activating safe mode") safe_mode = True elif ( "frontend" in hass.data.get(DATA_SETUP, {}) and "frontend" not in hass.config.components ): _LOGGER.warning("Detected that frontend did not load. Activating safe mode") # Ask integrations to shut down. It's messy but we can't # do a clean stop without knowing what is broken hass.async_track_tasks() hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP, {}) with contextlib.suppress(asyncio.TimeoutError): async with hass.timeout.async_timeout(10): await hass.async_block_till_done() safe_mode = True old_config = hass.config hass = core.HomeAssistant() hass.config.skip_pip = old_config.skip_pip hass.config.internal_url = old_config.internal_url hass.config.external_url = old_config.external_url hass.config.config_dir = old_config.config_dir if safe_mode: _LOGGER.info("Starting in safe mode") hass.config.safe_mode = True http_conf = (await http.async_get_last_config(hass)) or {} await async_from_config_dict( {"safe_mode": {}, "http": http_conf}, hass, ) if runtime_config.open_ui: hass.add_job(open_hass_ui, hass) return hass def open_hass_ui(hass: core.HomeAssistant) -> None: """Open the UI.""" import webbrowser # pylint: disable=import-outside-toplevel if hass.config.api is None or "frontend" not in hass.config.components: _LOGGER.warning("Cannot launch the UI because frontend not loaded") return scheme = "https" if hass.config.api.use_ssl else "http" url = str( yarl.URL.build(scheme=scheme, host="127.0.0.1", port=hass.config.api.port) ) if not webbrowser.open(url): _LOGGER.warning( "Unable to open the Home Assistant UI in a browser. Open it yourself at %s", url, ) async def async_from_config_dict( config: ConfigType, hass: core.HomeAssistant ) -> Optional[core.HomeAssistant]: """Try to configure Home Assistant from a configuration dictionary. Dynamically loads required components and its dependencies. This method is a coroutine. """ start = monotonic() hass.config_entries = config_entries.ConfigEntries(hass, config) await hass.config_entries.async_initialize() # Set up core. _LOGGER.debug("Setting up %s", CORE_INTEGRATIONS) if not all( await asyncio.gather( *( async_setup_component(hass, domain, config) for domain in CORE_INTEGRATIONS ) ) ): _LOGGER.error("Home Assistant core failed to initialize. ") return None _LOGGER.debug("Home Assistant core initialized") core_config = config.get(core.DOMAIN, {}) try: await conf_util.async_process_ha_core_config(hass, core_config) except vol.Invalid as config_err: conf_util.async_log_exception(config_err, "homeassistant", core_config, hass) return None except HomeAssistantError: _LOGGER.error( "Home Assistant core failed to initialize. " "Further initialization aborted" ) return None await _async_set_up_integrations(hass, config) stop = monotonic() _LOGGER.info("Home Assistant initialized in %.2fs", stop - start) if REQUIRED_NEXT_PYTHON_DATE and sys.version_info[:3] < REQUIRED_NEXT_PYTHON_VER: msg = ( "Support for the running Python version " f"{'.'.join(str(x) for x in sys.version_info[:3])} is deprecated and will " f"be removed in the first release after {REQUIRED_NEXT_PYTHON_DATE}. " "Please upgrade Python to " f"{'.'.join(str(x) for x in REQUIRED_NEXT_PYTHON_VER)} or " "higher." ) _LOGGER.warning(msg) hass.components.persistent_notification.async_create( msg, "Python version", "python_version" ) return hass @core.callback def async_enable_logging( hass: core.HomeAssistant, verbose: bool = False, log_rotate_days: Optional[int] = None, log_file: Optional[str] = None, log_no_color: bool = False, ) -> None: """Set up the logging. This method must be run in the event loop. """ fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s" datefmt = "%Y-%m-%d %H:%M:%S" if not log_no_color: try: # pylint: disable=import-outside-toplevel from colorlog import ColoredFormatter # basicConfig must be called after importing colorlog in order to # ensure that the handlers it sets up wraps the correct streams. logging.basicConfig(level=logging.INFO) colorfmt = f"%(log_color)s{fmt}%(reset)s" logging.getLogger().handlers[0].setFormatter( ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ "DEBUG": "cyan", "INFO": "green", "WARNING": "yellow", "ERROR": "red", "CRITICAL": "red", }, ) ) except ImportError: pass # If the above initialization failed for any reason, setup the default # formatting. If the above succeeds, this will result in a no-op. logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO) # Suppress overly verbose logs from libraries that aren't helpful logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("aiohttp.access").setLevel(logging.WARNING) sys.excepthook = lambda *args: logging.getLogger(None).exception( "Uncaught exception", exc_info=args # type: ignore ) if sys.version_info[:2] >= (3, 8): threading.excepthook = lambda args: logging.getLogger(None).exception( "Uncaught thread exception", exc_info=(args.exc_type, args.exc_value, args.exc_traceback), ) # Log errors to a file if we have write access to file or config dir if log_file is None: err_log_path = hass.config.path(ERROR_LOG_FILENAME) else: err_log_path = os.path.abspath(log_file) err_path_exists = os.path.isfile(err_log_path) err_dir = os.path.dirname(err_log_path) # Check if we can write to the error log if it exists or that # we can create files in the containing directory if not. if (err_path_exists and os.access(err_log_path, os.W_OK)) or ( not err_path_exists and os.access(err_dir, os.W_OK) ): if log_rotate_days: err_handler: logging.FileHandler = ( logging.handlers.TimedRotatingFileHandler( err_log_path, when="midnight", backupCount=log_rotate_days ) ) else: err_handler = logging.FileHandler(err_log_path, mode="w", delay=True) err_handler.setLevel(logging.INFO if verbose else logging.WARNING) err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt)) logger = logging.getLogger("") logger.addHandler(err_handler) logger.setLevel(logging.INFO if verbose else logging.WARNING) # Save the log file location for access by other components. hass.data[DATA_LOGGING] = err_log_path else: _LOGGER.error("Unable to set up error log %s (access denied)", err_log_path) async_activate_log_queue_handler(hass) async def async_mount_local_lib_path(config_dir: str) -> str: """Add local library to Python Path. This function is a coroutine. """ deps_dir = os.path.join(config_dir, "deps") lib_dir = await async_get_user_site(deps_dir) if lib_dir not in sys.path: sys.path.insert(0, lib_dir) return deps_dir @core.callback def _get_domains(hass: core.HomeAssistant, config: Dict[str, Any]) -> Set[str]: """Get domains of components to set up.""" # Filter out the repeating and common config section [homeassistant] domains = {key.split(" ")[0] for key in config if key != core.DOMAIN} # Add config entry domains if not hass.config.safe_mode: domains.update(hass.config_entries.async_domains()) # Make sure the Hass.io component is loaded if "HASSIO" in os.environ: domains.add("hassio") return domains async def _async_log_pending_setups( domains: Set[str], setup_started: Dict[str, datetime] ) -> None: """Periodic log of setups that are pending for longer than LOG_SLOW_STARTUP_INTERVAL.""" while True: await asyncio.sleep(LOG_SLOW_STARTUP_INTERVAL) remaining = [domain for domain in domains if domain in setup_started] if remaining: _LOGGER.warning( "Waiting on integrations to complete setup: %s", ", ".join(remaining), ) async def async_setup_multi_components( hass: core.HomeAssistant, domains: Set[str], config: Dict[str, Any], setup_started: Dict[str, datetime], ) -> None: """Set up multiple domains. Log on failure.""" futures = { domain: hass.async_create_task(async_setup_component(hass, domain, config)) for domain in domains } log_task = asyncio.create_task(_async_log_pending_setups(domains, setup_started)) await asyncio.wait(futures.values()) log_task.cancel() errors = [domain for domain in domains if futures[domain].exception()] for domain in errors: exception = futures[domain].exception() assert exception is not None _LOGGER.error( "Error setting up integration %s - received exception", domain, exc_info=(type(exception), exception, exception.__traceback__), ) async def _async_set_up_integrations( hass: core.HomeAssistant, config: Dict[str, Any] ) -> None: """Set up all the integrations.""" setup_started = hass.data[DATA_SETUP_STARTED] = {} domains_to_setup = _get_domains(hass, config) # Resolve all dependencies so we know all integrations # that will have to be loaded and start rightaway integration_cache: Dict[str, loader.Integration] = {} to_resolve = domains_to_setup while to_resolve: old_to_resolve = to_resolve to_resolve = set() integrations_to_process = [ int_or_exc for int_or_exc in await asyncio.gather( *( loader.async_get_integration(hass, domain) for domain in old_to_resolve ), return_exceptions=True, ) if isinstance(int_or_exc, loader.Integration) ] resolve_dependencies_tasks = [ itg.resolve_dependencies() for itg in integrations_to_process if not itg.all_dependencies_resolved ] if resolve_dependencies_tasks: await asyncio.gather(*resolve_dependencies_tasks) for itg in integrations_to_process: integration_cache[itg.domain] = itg for dep in itg.all_dependencies: if dep in domains_to_setup: continue domains_to_setup.add(dep) to_resolve.add(dep) _LOGGER.info("Domains to be set up: %s", domains_to_setup) logging_domains = domains_to_setup & LOGGING_INTEGRATIONS # Load logging as soon as possible if logging_domains: _LOGGER.info("Setting up logging: %s", logging_domains) await async_setup_multi_components(hass, logging_domains, config, setup_started) # Start up debuggers. Start these first in case they want to wait. debuggers = domains_to_setup & DEBUGGER_INTEGRATIONS if debuggers: _LOGGER.debug("Setting up debuggers: %s", debuggers) await async_setup_multi_components(hass, debuggers, config, setup_started) # calculate what components to setup in what stage stage_1_domains = set() # Find all dependencies of any dependency of any stage 1 integration that # we plan on loading and promote them to stage 1 deps_promotion = STAGE_1_INTEGRATIONS while deps_promotion: old_deps_promotion = deps_promotion deps_promotion = set() for domain in old_deps_promotion: if domain not in domains_to_setup or domain in stage_1_domains: continue stage_1_domains.add(domain) dep_itg = integration_cache.get(domain) if dep_itg is None: continue deps_promotion.update(dep_itg.all_dependencies) stage_2_domains = domains_to_setup - logging_domains - debuggers - stage_1_domains # Kick off loading the registries. They don't need to be awaited. asyncio.create_task(hass.helpers.device_registry.async_get_registry()) asyncio.create_task(hass.helpers.entity_registry.async_get_registry()) asyncio.create_task(hass.helpers.area_registry.async_get_registry()) # Start setup if stage_1_domains: _LOGGER.info("Setting up stage 1: %s", stage_1_domains) try: async with hass.timeout.async_timeout( STAGE_1_TIMEOUT, cool_down=COOLDOWN_TIME ): await async_setup_multi_components( hass, stage_1_domains, config, setup_started ) except asyncio.TimeoutError: _LOGGER.warning("Setup timed out for stage 1 - moving forward") # Enables after dependencies async_set_domains_to_be_loaded(hass, stage_1_domains | stage_2_domains) if stage_2_domains: _LOGGER.info("Setting up stage 2: %s", stage_2_domains) try: async with hass.timeout.async_timeout( STAGE_2_TIMEOUT, cool_down=COOLDOWN_TIME ): await async_setup_multi_components( hass, stage_2_domains, config, setup_started ) except asyncio.TimeoutError: _LOGGER.warning("Setup timed out for stage 2 - moving forward") # Wrap up startup _LOGGER.debug("Waiting for startup to wrap up") try: async with hass.timeout.async_timeout(WRAP_UP_TIMEOUT, cool_down=COOLDOWN_TIME): await hass.async_block_till_done() except asyncio.TimeoutError: _LOGGER.warning("Setup timed out for bootstrap - moving forward")
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/bootstrap.py
"""Support for Nexia Automations.""" from typing import Any from homeassistant.components.scene import Scene from homeassistant.helpers.event import async_call_later from .const import ATTR_DESCRIPTION, DOMAIN, NEXIA_DEVICE, UPDATE_COORDINATOR from .entity import NexiaEntity SCENE_ACTIVATION_TIME = 5 async def async_setup_entry(hass, config_entry, async_add_entities): """Set up automations for a Nexia device.""" nexia_data = hass.data[DOMAIN][config_entry.entry_id] nexia_home = nexia_data[NEXIA_DEVICE] coordinator = nexia_data[UPDATE_COORDINATOR] entities = [] # Automation switches for automation_id in nexia_home.get_automation_ids(): automation = nexia_home.get_automation_by_id(automation_id) entities.append(NexiaAutomationScene(coordinator, automation)) async_add_entities(entities, True) class NexiaAutomationScene(NexiaEntity, Scene): """Provides Nexia automation support.""" def __init__(self, coordinator, automation): """Initialize the automation scene.""" super().__init__( coordinator, name=automation.name, unique_id=automation.automation_id, ) self._automation = automation @property def device_state_attributes(self): """Return the scene specific state attributes.""" data = super().device_state_attributes data[ATTR_DESCRIPTION] = self._automation.description return data @property def icon(self): """Return the icon of the automation scene.""" return "mdi:script-text-outline" async def async_activate(self, **kwargs: Any) -> None: """Activate an automation scene.""" await self.hass.async_add_executor_job(self._automation.activate) async def refresh_callback(_): await self.coordinator.async_refresh() async_call_later(self.hass, SCENE_ACTIVATION_TIME, refresh_callback)
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/nexia/scene.py
"""Handle intents with scripts.""" import copy import voluptuous as vol from homeassistant.helpers import config_validation as cv, intent, script, template DOMAIN = "intent_script" CONF_INTENTS = "intents" CONF_SPEECH = "speech" CONF_ACTION = "action" CONF_CARD = "card" CONF_TYPE = "type" CONF_TITLE = "title" CONF_CONTENT = "content" CONF_TEXT = "text" CONF_ASYNC_ACTION = "async_action" DEFAULT_CONF_ASYNC_ACTION = False CONFIG_SCHEMA = vol.Schema( { DOMAIN: { cv.string: { vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA, vol.Optional( CONF_ASYNC_ACTION, default=DEFAULT_CONF_ASYNC_ACTION ): cv.boolean, vol.Optional(CONF_CARD): { vol.Optional(CONF_TYPE, default="simple"): cv.string, vol.Required(CONF_TITLE): cv.template, vol.Required(CONF_CONTENT): cv.template, }, vol.Optional(CONF_SPEECH): { vol.Optional(CONF_TYPE, default="plain"): cv.string, vol.Required(CONF_TEXT): cv.template, }, } } }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Activate Alexa component.""" intents = copy.deepcopy(config[DOMAIN]) template.attach(hass, intents) for intent_type, conf in intents.items(): if CONF_ACTION in conf: conf[CONF_ACTION] = script.Script( hass, conf[CONF_ACTION], f"Intent Script {intent_type}", DOMAIN ) intent.async_register(hass, ScriptIntentHandler(intent_type, conf)) return True class ScriptIntentHandler(intent.IntentHandler): """Respond to an intent with a script.""" def __init__(self, intent_type, config): """Initialize the script intent handler.""" self.intent_type = intent_type self.config = config async def async_handle(self, intent_obj): """Handle the intent.""" speech = self.config.get(CONF_SPEECH) card = self.config.get(CONF_CARD) action = self.config.get(CONF_ACTION) is_async_action = self.config.get(CONF_ASYNC_ACTION) slots = {key: value["value"] for key, value in intent_obj.slots.items()} if action is not None: if is_async_action: intent_obj.hass.async_create_task( action.async_run(slots, intent_obj.context) ) else: await action.async_run(slots, intent_obj.context) response = intent_obj.create_response() if speech is not None: response.async_set_speech( speech[CONF_TEXT].async_render(slots, parse_result=False), speech[CONF_TYPE], ) if card is not None: response.async_set_card( card[CONF_TITLE].async_render(slots, parse_result=False), card[CONF_CONTENT].async_render(slots, parse_result=False), card[CONF_TYPE], ) return response
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/intent_script/__init__.py
"""Support for the Dyson 360 eye vacuum cleaner robot.""" import logging from libpurecool.const import Dyson360EyeMode, PowerMode from libpurecool.dyson_360_eye import Dyson360Eye from homeassistant.components.vacuum import ( SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, VacuumEntity, ) from homeassistant.helpers.icon import icon_for_battery_level from . import DYSON_DEVICES _LOGGER = logging.getLogger(__name__) ATTR_CLEAN_ID = "clean_id" ATTR_FULL_CLEAN_TYPE = "full_clean_type" ATTR_POSITION = "position" DYSON_360_EYE_DEVICES = "dyson_360_eye_devices" SUPPORT_DYSON = ( SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | SUPPORT_BATTERY | SUPPORT_STOP ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dyson 360 Eye robot vacuum platform.""" _LOGGER.debug("Creating new Dyson 360 Eye robot vacuum") if DYSON_360_EYE_DEVICES not in hass.data: hass.data[DYSON_360_EYE_DEVICES] = [] # Get Dyson Devices from parent component for device in [d for d in hass.data[DYSON_DEVICES] if isinstance(d, Dyson360Eye)]: dyson_entity = Dyson360EyeDevice(device) hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity) add_entities(hass.data[DYSON_360_EYE_DEVICES]) return True class Dyson360EyeDevice(VacuumEntity): """Dyson 360 Eye robot vacuum device.""" def __init__(self, device): """Dyson 360 Eye robot vacuum device.""" _LOGGER.debug("Creating device %s", device.name) self._device = device async def async_added_to_hass(self): """Call when entity is added to hass.""" self._device.add_message_listener(self.on_message) def on_message(self, message): """Handle a new messages that was received from the vacuum.""" _LOGGER.debug("Message received for %s device: %s", self.name, message) self.schedule_update_ha_state() @property def should_poll(self) -> bool: """Return True if entity has to be polled for state. False if entity pushes its state to HA. """ return False @property def name(self): """Return the name of the device.""" return self._device.name @property def status(self): """Return the status of the vacuum cleaner.""" dyson_labels = { Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging", Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged", Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused", Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning", Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home", Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning", Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked", Dyson360EyeMode.FAULT_REPLACE_ON_DOCK: "Error - Replace device on dock", Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished", Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging", } return dyson_labels.get(self._device.state.state, self._device.state.state) @property def battery_level(self): """Return the battery level of the vacuum cleaner.""" return self._device.state.battery_level @property def fan_speed(self): """Return the fan speed of the vacuum cleaner.""" speed_labels = {PowerMode.MAX: "Max", PowerMode.QUIET: "Quiet"} return speed_labels[self._device.state.power_mode] @property def fan_speed_list(self): """Get the list of available fan speed steps of the vacuum cleaner.""" return ["Quiet", "Max"] @property def device_state_attributes(self): """Return the specific state attributes of this vacuum cleaner.""" return {ATTR_POSITION: str(self._device.state.position)} @property def is_on(self) -> bool: """Return True if entity is on.""" return self._device.state.state in [ Dyson360EyeMode.FULL_CLEAN_INITIATED, Dyson360EyeMode.FULL_CLEAN_ABORTED, Dyson360EyeMode.FULL_CLEAN_RUNNING, ] @property def available(self) -> bool: """Return True if entity is available.""" return True @property def supported_features(self): """Flag vacuum cleaner robot features that are supported.""" return SUPPORT_DYSON @property def battery_icon(self): """Return the battery icon for the vacuum cleaner.""" charging = self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGING] return icon_for_battery_level( battery_level=self.battery_level, charging=charging ) def turn_on(self, **kwargs): """Turn the vacuum on.""" _LOGGER.debug("Turn on device %s", self.name) if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]: self._device.resume() else: self._device.start() def turn_off(self, **kwargs): """Turn the vacuum off and return to home.""" _LOGGER.debug("Turn off device %s", self.name) self._device.pause() def stop(self, **kwargs): """Stop the vacuum cleaner.""" _LOGGER.debug("Stop device %s", self.name) self._device.pause() def set_fan_speed(self, fan_speed, **kwargs): """Set fan speed.""" _LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name) power_modes = {"Quiet": PowerMode.QUIET, "Max": PowerMode.MAX} self._device.set_power_mode(power_modes[fan_speed]) def start_pause(self, **kwargs): """Start, pause or resume the cleaning task.""" if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]: _LOGGER.debug("Resume device %s", self.name) self._device.resume() elif self._device.state.state in [ Dyson360EyeMode.INACTIVE_CHARGED, Dyson360EyeMode.INACTIVE_CHARGING, ]: _LOGGER.debug("Start device %s", self.name) self._device.start() else: _LOGGER.debug("Pause device %s", self.name) self._device.pause() def return_to_base(self, **kwargs): """Set the vacuum cleaner to return to the dock.""" _LOGGER.debug("Return to base device %s", self.name) self._device.abort()
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/dyson/vacuum.py
"""Connect two Home Assistant instances via MQTT.""" import json import voluptuous as vol from homeassistant.components.mqtt import valid_publish_topic, valid_subscribe_topic from homeassistant.const import ( ATTR_SERVICE_DATA, EVENT_CALL_SERVICE, EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, MATCH_ALL, ) from homeassistant.core import EventOrigin, State, callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.json import JSONEncoder DOMAIN = "mqtt_eventstream" CONF_PUBLISH_TOPIC = "publish_topic" CONF_SUBSCRIBE_TOPIC = "subscribe_topic" CONF_PUBLISH_EVENTSTREAM_RECEIVED = "publish_eventstream_received" CONF_IGNORE_EVENT = "ignore_event" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_PUBLISH_TOPIC): valid_publish_topic, vol.Optional(CONF_SUBSCRIBE_TOPIC): valid_subscribe_topic, vol.Optional( CONF_PUBLISH_EVENTSTREAM_RECEIVED, default=False ): cv.boolean, vol.Optional(CONF_IGNORE_EVENT, default=[]): cv.ensure_list, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the MQTT eventstream component.""" mqtt = hass.components.mqtt conf = config.get(DOMAIN, {}) pub_topic = conf.get(CONF_PUBLISH_TOPIC) sub_topic = conf.get(CONF_SUBSCRIBE_TOPIC) ignore_event = conf.get(CONF_IGNORE_EVENT) @callback def _event_publisher(event): """Handle events by publishing them on the MQTT queue.""" if event.origin != EventOrigin.local: return if event.event_type == EVENT_TIME_CHANGED: return # User-defined events to ignore if event.event_type in ignore_event: return # Filter out the events that were triggered by publishing # to the MQTT topic, or you will end up in an infinite loop. if event.event_type == EVENT_CALL_SERVICE: if ( event.data.get("domain") == mqtt.DOMAIN and event.data.get("service") == mqtt.SERVICE_PUBLISH and event.data[ATTR_SERVICE_DATA].get("topic") == pub_topic ): return event_info = {"event_type": event.event_type, "event_data": event.data} msg = json.dumps(event_info, cls=JSONEncoder) mqtt.async_publish(pub_topic, msg) # Only listen for local events if you are going to publish them. if pub_topic: hass.bus.async_listen(MATCH_ALL, _event_publisher) # Process events from a remote server that are received on a queue. @callback def _event_receiver(msg): """Receive events published by and fire them on this hass instance.""" event = json.loads(msg.payload) event_type = event.get("event_type") event_data = event.get("event_data") # Special case handling for event STATE_CHANGED # We will try to convert state dicts back to State objects # Copied over from the _handle_api_post_events_event method # of the api component. if event_type == EVENT_STATE_CHANGED and event_data: for key in ("old_state", "new_state"): state = State.from_dict(event_data.get(key)) if state: event_data[key] = state hass.bus.async_fire( event_type, event_data=event_data, origin=EventOrigin.remote ) # Only subscribe if you specified a topic. if sub_topic: await mqtt.async_subscribe(sub_topic, _event_receiver) return True
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/mqtt_eventstream/__init__.py
""" Manage allocation of accessory ID's. HomeKit needs to allocate unique numbers to each accessory. These need to be stable between reboots and upgrades. Using a hash function to generate them means collisions. It also means you can't change the hash without causing breakages for HA users. This module generates and stores them in a HA storage. """ import random from fnvhash import fnv1a_32 from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity_registry import RegistryEntry from homeassistant.helpers.storage import Store from .util import get_aid_storage_filename_for_entry_id AID_MANAGER_STORAGE_VERSION = 1 AID_MANAGER_SAVE_DELAY = 2 ALLOCATIONS_KEY = "allocations" UNIQUE_IDS_KEY = "unique_ids" INVALID_AIDS = (0, 1) AID_MIN = 2 AID_MAX = 18446744073709551615 def get_system_unique_id(entity: RegistryEntry): """Determine the system wide unique_id for an entity.""" return f"{entity.platform}.{entity.domain}.{entity.unique_id}" def _generate_aids(unique_id: str, entity_id: str) -> int: """Generate accessory aid.""" if unique_id: # Use fnv1a_32 of the unique id as # fnv1a_32 has less collisions than # adler32 yield fnv1a_32(unique_id.encode("utf-8")) # If there is no unique id we use # fnv1a_32 as it is unlikely to collide yield fnv1a_32(entity_id.encode("utf-8")) # If called again resort to random allocations. # Given the size of the range its unlikely we'll encounter duplicates # But try a few times regardless for _ in range(5): yield random.randrange(AID_MIN, AID_MAX) class AccessoryAidStorage: """ Holds a map of entity ID to HomeKit ID. Will generate new ID's, ensure they are unique and store them to make sure they persist over reboots. """ def __init__(self, hass: HomeAssistant, entry: ConfigEntry): """Create a new entity map store.""" self.hass = hass self.allocations = {} self.allocated_aids = set() self._entry = entry self.store = None self._entity_registry = None async def async_initialize(self): """Load the latest AID data.""" self._entity_registry = ( await self.hass.helpers.entity_registry.async_get_registry() ) aidstore = get_aid_storage_filename_for_entry_id(self._entry) self.store = Store(self.hass, AID_MANAGER_STORAGE_VERSION, aidstore) raw_storage = await self.store.async_load() if not raw_storage: # There is no data about aid allocations yet return self.allocations = raw_storage.get(ALLOCATIONS_KEY, {}) self.allocated_aids = set(self.allocations.values()) def get_or_allocate_aid_for_entity_id(self, entity_id: str): """Generate a stable aid for an entity id.""" entity = self._entity_registry.async_get(entity_id) if not entity: return self._get_or_allocate_aid(None, entity_id) sys_unique_id = get_system_unique_id(entity) return self._get_or_allocate_aid(sys_unique_id, entity_id) def _get_or_allocate_aid(self, unique_id: str, entity_id: str): """Allocate (and return) a new aid for an accessory.""" if unique_id and unique_id in self.allocations: return self.allocations[unique_id] if entity_id in self.allocations: return self.allocations[entity_id] for aid in _generate_aids(unique_id, entity_id): if aid in INVALID_AIDS: continue if aid not in self.allocated_aids: # Prefer the unique_id over the entitiy_id storage_key = unique_id or entity_id self.allocations[storage_key] = aid self.allocated_aids.add(aid) self.async_schedule_save() return aid raise ValueError( f"Unable to generate unique aid allocation for {entity_id} [{unique_id}]" ) def delete_aid(self, storage_key: str): """Delete an aid allocation.""" if storage_key not in self.allocations: return aid = self.allocations.pop(storage_key) self.allocated_aids.discard(aid) self.async_schedule_save() @callback def async_schedule_save(self): """Schedule saving the entity map cache.""" self.store.async_delay_save(self._data_to_save, AID_MANAGER_SAVE_DELAY) async def async_save(self): """Save the entity map cache.""" return await self.store.async_save(self._data_to_save()) @callback def _data_to_save(self): """Return data of entity map to store in a file.""" return {ALLOCATIONS_KEY: self.allocations}
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/homekit/aidmanager.py
"""Support for Freebox Delta, Revolution and Mini 4K.""" import logging from typing import Dict from aiofreepybox.exceptions import InsufficientPermissionsError from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from .const import DOMAIN from .router import FreeboxRouter _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up the switch.""" router = hass.data[DOMAIN][entry.unique_id] async_add_entities([FreeboxWifiSwitch(router)], True) class FreeboxWifiSwitch(SwitchEntity): """Representation of a freebox wifi switch.""" def __init__(self, router: FreeboxRouter) -> None: """Initialize the Wifi switch.""" self._name = "Freebox WiFi" self._state = None self._router = router self._unique_id = f"{self._router.mac} {self._name}" @property def unique_id(self) -> str: """Return a unique ID.""" return self._unique_id @property def name(self) -> str: """Return the name of the switch.""" return self._name @property def is_on(self) -> bool: """Return true if device is on.""" return self._state @property def device_info(self) -> Dict[str, any]: """Return the device information.""" return self._router.device_info async def _async_set_state(self, enabled: bool): """Turn the switch on or off.""" wifi_config = {"enabled": enabled} try: await self._router.wifi.set_global_config(wifi_config) except InsufficientPermissionsError: _LOGGER.warning( "Home Assistant does not have permissions to modify the Freebox settings. Please refer to documentation" ) async def async_turn_on(self, **kwargs): """Turn the switch on.""" await self._async_set_state(True) async def async_turn_off(self, **kwargs): """Turn the switch off.""" await self._async_set_state(False) async def async_update(self): """Get the state and update it.""" datas = await self._router.wifi.get_global_config() active = datas["enabled"] self._state = bool(active)
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/freebox/switch.py
"""Support for ZoneMinder camera streaming.""" import logging from homeassistant.components.mjpeg.camera import ( CONF_MJPEG_URL, CONF_STILL_IMAGE_URL, MjpegCamera, filter_urllib3_logging, ) from homeassistant.const import CONF_NAME, CONF_VERIFY_SSL from . import DOMAIN as ZONEMINDER_DOMAIN _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the ZoneMinder cameras.""" filter_urllib3_logging() cameras = [] for zm_client in hass.data[ZONEMINDER_DOMAIN].values(): monitors = zm_client.get_monitors() if not monitors: _LOGGER.warning("Could not fetch monitors from ZoneMinder host: %s") return for monitor in monitors: _LOGGER.info("Initializing camera %s", monitor.id) cameras.append(ZoneMinderCamera(monitor, zm_client.verify_ssl)) add_entities(cameras) class ZoneMinderCamera(MjpegCamera): """Representation of a ZoneMinder Monitor Stream.""" def __init__(self, monitor, verify_ssl): """Initialize as a subclass of MjpegCamera.""" device_info = { CONF_NAME: monitor.name, CONF_MJPEG_URL: monitor.mjpeg_image_url, CONF_STILL_IMAGE_URL: monitor.still_image_url, CONF_VERIFY_SSL: verify_ssl, } super().__init__(device_info) self._is_recording = None self._is_available = None self._monitor = monitor @property def should_poll(self): """Update the recording state periodically.""" return True def update(self): """Update our recording state from the ZM API.""" _LOGGER.debug("Updating camera state for monitor %i", self._monitor.id) self._is_recording = self._monitor.is_recording self._is_available = self._monitor.is_available @property def is_recording(self): """Return whether the monitor is in alarm mode.""" return self._is_recording @property def available(self): """Return True if entity is available.""" return self._is_available
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/zoneminder/camera.py
"""Support for Rheem EcoNet water heaters.""" import datetime import logging from pyeconet.api import PyEcoNet import voluptuous as vol from homeassistant.components.water_heater import ( PLATFORM_SCHEMA, STATE_ECO, STATE_ELECTRIC, STATE_GAS, STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_OFF, STATE_PERFORMANCE, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, WaterHeaterEntity, ) from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_TEMPERATURE, CONF_PASSWORD, CONF_USERNAME, TEMP_FAHRENHEIT, ) import homeassistant.helpers.config_validation as cv from .const import DOMAIN, SERVICE_ADD_VACATION, SERVICE_DELETE_VACATION _LOGGER = logging.getLogger(__name__) ATTR_VACATION_START = "next_vacation_start_date" ATTR_VACATION_END = "next_vacation_end_date" ATTR_ON_VACATION = "on_vacation" ATTR_TODAYS_ENERGY_USAGE = "todays_energy_usage" ATTR_IN_USE = "in_use" ATTR_START_DATE = "start_date" ATTR_END_DATE = "end_date" ATTR_LOWER_TEMP = "lower_temp" ATTR_UPPER_TEMP = "upper_temp" ATTR_IS_ENABLED = "is_enabled" SUPPORT_FLAGS_HEATER = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE ADD_VACATION_SCHEMA = vol.Schema( { vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(ATTR_START_DATE): cv.positive_int, vol.Required(ATTR_END_DATE): cv.positive_int, } ) DELETE_VACATION_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids}) ECONET_DATA = "econet" ECONET_STATE_TO_HA = { "Energy Saver": STATE_ECO, "gas": STATE_GAS, "High Demand": STATE_HIGH_DEMAND, "Off": STATE_OFF, "Performance": STATE_PERFORMANCE, "Heat Pump Only": STATE_HEAT_PUMP, "Electric-Only": STATE_ELECTRIC, "Electric": STATE_ELECTRIC, "Heat Pump": STATE_HEAT_PUMP, } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the EcoNet water heaters.""" hass.data[ECONET_DATA] = {} hass.data[ECONET_DATA]["water_heaters"] = [] username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) econet = PyEcoNet(username, password) water_heaters = econet.get_water_heaters() hass_water_heaters = [ EcoNetWaterHeater(water_heater) for water_heater in water_heaters ] add_entities(hass_water_heaters) hass.data[ECONET_DATA]["water_heaters"].extend(hass_water_heaters) def service_handle(service): """Handle the service calls.""" entity_ids = service.data.get("entity_id") all_heaters = hass.data[ECONET_DATA]["water_heaters"] _heaters = [ x for x in all_heaters if not entity_ids or x.entity_id in entity_ids ] for _water_heater in _heaters: if service.service == SERVICE_ADD_VACATION: start = service.data.get(ATTR_START_DATE) end = service.data.get(ATTR_END_DATE) _water_heater.add_vacation(start, end) if service.service == SERVICE_DELETE_VACATION: for vacation in _water_heater.water_heater.vacations: vacation.delete() _water_heater.schedule_update_ha_state(True) hass.services.register( DOMAIN, SERVICE_ADD_VACATION, service_handle, schema=ADD_VACATION_SCHEMA ) hass.services.register( DOMAIN, SERVICE_DELETE_VACATION, service_handle, schema=DELETE_VACATION_SCHEMA ) class EcoNetWaterHeater(WaterHeaterEntity): """Representation of an EcoNet water heater.""" def __init__(self, water_heater): """Initialize the water heater.""" self.water_heater = water_heater self.supported_modes = self.water_heater.supported_modes self.econet_state_to_ha = {} self.ha_state_to_econet = {} for mode in ECONET_STATE_TO_HA: if mode in self.supported_modes: self.econet_state_to_ha[mode] = ECONET_STATE_TO_HA.get(mode) for key, value in self.econet_state_to_ha.items(): self.ha_state_to_econet[value] = key for mode in self.supported_modes: if mode not in ECONET_STATE_TO_HA: error = f"Invalid operation mode mapping. {mode} doesn't map. Please report this." _LOGGER.error(error) @property def name(self): """Return the device name.""" return self.water_heater.name @property def available(self): """Return if the the device is online or not.""" return self.water_heater.is_connected @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_FAHRENHEIT @property def device_state_attributes(self): """Return the optional device state attributes.""" data = {} vacations = self.water_heater.get_vacations() if vacations: data[ATTR_VACATION_START] = vacations[0].start_date data[ATTR_VACATION_END] = vacations[0].end_date data[ATTR_ON_VACATION] = self.water_heater.is_on_vacation todays_usage = self.water_heater.total_usage_for_today if todays_usage: data[ATTR_TODAYS_ENERGY_USAGE] = todays_usage data[ATTR_IN_USE] = self.water_heater.in_use if self.water_heater.lower_temp is not None: data[ATTR_LOWER_TEMP] = round(self.water_heater.lower_temp, 2) if self.water_heater.upper_temp is not None: data[ATTR_UPPER_TEMP] = round(self.water_heater.upper_temp, 2) if self.water_heater.is_enabled is not None: data[ATTR_IS_ENABLED] = self.water_heater.is_enabled return data @property def current_operation(self): """ Return current operation as one of the following. ["eco", "heat_pump", "high_demand", "electric_only"] """ current_op = self.econet_state_to_ha.get(self.water_heater.mode) return current_op @property def operation_list(self): """List of available operation modes.""" op_list = [] for mode in self.supported_modes: ha_mode = self.econet_state_to_ha.get(mode) if ha_mode is not None: op_list.append(ha_mode) return op_list @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS_HEATER def set_temperature(self, **kwargs): """Set new target temperature.""" target_temp = kwargs.get(ATTR_TEMPERATURE) if target_temp is not None: self.water_heater.set_target_set_point(target_temp) else: _LOGGER.error("A target temperature must be provided") def set_operation_mode(self, operation_mode): """Set operation mode.""" op_mode_to_set = self.ha_state_to_econet.get(operation_mode) if op_mode_to_set is not None: self.water_heater.set_mode(op_mode_to_set) else: _LOGGER.error("An operation mode must be provided") def add_vacation(self, start, end): """Add a vacation to this water heater.""" if not start: start = datetime.datetime.now() else: start = datetime.datetime.fromtimestamp(start) end = datetime.datetime.fromtimestamp(end) self.water_heater.set_vacation_mode(start, end) def update(self): """Get the latest date.""" self.water_heater.update_state() @property def target_temperature(self): """Return the temperature we try to reach.""" return self.water_heater.set_point @property def min_temp(self): """Return the minimum temperature.""" return self.water_heater.min_set_point @property def max_temp(self): """Return the maximum temperature.""" return self.water_heater.max_set_point
"""Define tests for the GDACS config flow.""" from datetime import timedelta import pytest from homeassistant import data_entry_flow from homeassistant.components.gdacs import CONF_CATEGORIES, DOMAIN from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, ) from tests.async_mock import patch @pytest.fixture(name="gdacs_setup", autouse=True) def gdacs_setup_fixture(): """Mock gdacs entry setup.""" with patch("homeassistant.components.gdacs.async_setup_entry", return_value=True): yield async def test_duplicate_error(hass, config_entry): """Test that errors are shown when duplicates are added.""" conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25} config_entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_show_form(hass): """Test that the form is served with no input.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_step_import(hass): """Test that the import step works.""" conf = { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: timedelta(minutes=4), CONF_CATEGORIES: ["Drought", "Earthquake"], } result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "import"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 240.0, CONF_CATEGORIES: ["Drought", "Earthquake"], } async def test_step_user(hass): """Test that the user step works.""" hass.config.latitude = -41.2 hass.config.longitude = 174.7 conf = {CONF_RADIUS: 25} result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "user"}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "-41.2, 174.7" assert result["data"] == { CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25, CONF_SCAN_INTERVAL: 300.0, CONF_CATEGORIES: [], }
balloob/home-assistant
tests/components/gdacs/test_config_flow.py
homeassistant/components/econet/water_heater.py
from django.db import models from datetime import datetime from osf.utils.storage import BannerImageStorage from osf.exceptions import ValidationValueError from osf.utils.fields import NonNaiveDateTimeField def validate_banner_dates(banner_id, start_date, end_date): if start_date > end_date: raise ValidationValueError('Start date must be before end date.') overlapping = ScheduledBanner.objects.filter( (models.Q(start_date__gte=start_date) & models.Q(start_date__lte=end_date)) | (models.Q(end_date__gte=start_date) & models.Q(end_date__lte=end_date)) | (models.Q(start_date__lte=start_date) & models.Q(end_date__gte=end_date)) ).exclude(id=banner_id).exists() if overlapping: raise ValidationValueError('Banners dates cannot be overlapping.') class ScheduledBanner(models.Model): class Meta: # Custom permissions for use in the OSF Admin App permissions = ( ('view_scheduledbanner', 'Can view scheduled banner details'), ) name = models.CharField(unique=True, max_length=256) start_date = NonNaiveDateTimeField() end_date = NonNaiveDateTimeField() color = models.CharField(max_length=7) license = models.CharField(blank=True, null=True, max_length=256) link = models.URLField(blank=True, default='https://www.crowdrise.com/centerforopenscience') default_photo = models.FileField(storage=BannerImageStorage()) default_alt_text = models.TextField() mobile_photo = models.FileField(storage=BannerImageStorage()) mobile_alt_text = models.TextField(blank=True, null=True) def save(self, *args, **kwargs): self.start_date = datetime.combine(self.start_date, datetime.min.time()) self.end_date = datetime.combine(self.end_date, datetime.max.time()) validate_banner_dates(self.id, self.start_date, self.end_date) super(ScheduledBanner, self).save(*args, **kwargs)
from __future__ import unicode_literals import itsdangerous import mock import pytest import pytz from django.utils import timezone from addons.base.utils import get_mfr_url from addons.github.models import GithubFileNode from addons.osfstorage import settings as osfstorage_settings from addons.osfstorage.listeners import checkin_files_task from api.base.settings.defaults import API_BASE from api_tests import utils as api_utils from framework.auth.core import Auth from osf.models import NodeLog, Session, QuickFilesNode from osf.utils.permissions import WRITE, READ from osf.utils.workflows import DefaultStates from osf_tests.factories import ( AuthUserFactory, CommentFactory, ProjectFactory, UserFactory, PreprintFactory, ) from website import settings as website_settings # stolen from^W^Winspired by DRF # rest_framework.fields.DateTimeField.to_representation def _dt_to_iso8601(value): iso8601 = value.isoformat() if iso8601.endswith('+00:00'): iso8601 = iso8601[:-6] + 'Z' # microsecond precision return iso8601 @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestFileView: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user, comment_level='public') @pytest.fixture() def quickfiles_node(self, user): return QuickFilesNode.objects.get(creator=user) @pytest.fixture() def file(self, user, node): return api_utils.create_test_file(node, user, create_guid=False) @pytest.fixture() def file_url(self, file): return '/{}files/{}/'.format(API_BASE, file._id) def test_must_have_auth_and_be_contributor(self, app, file_url): # test_must_have_auth(self, app, file_url): res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # test_must_be_contributor(self, app, file_url): non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth, expect_errors=True) assert res.status_code == 403 def test_deleted_file_return_410(self, app, node, user): deleted_file = api_utils.create_test_file(node, user, create_guid=True) url_with_guid = '/{}files/{}/'.format( API_BASE, deleted_file.get_guid()._id ) url_with_id = '/{}files/{}/'.format(API_BASE, deleted_file._id) res = app.get(url_with_guid, auth=user.auth) assert res.status_code == 200 res = app.get(url_with_id, auth=user.auth) assert res.status_code == 200 deleted_file.delete(user=user, save=True) res = app.get(url_with_guid, auth=user.auth, expect_errors=True) assert res.status_code == 410 res = app.get(url_with_id, auth=user.auth, expect_errors=True) assert res.status_code == 410 def test_disabled_users_quickfiles_file_detail_gets_410(self, app, quickfiles_node, user): file_node = api_utils.create_test_file(quickfiles_node, user, create_guid=True) url_with_guid = '/{}files/{}/'.format( API_BASE, file_node.get_guid()._id ) url_with_id = '/{}files/{}/'.format(API_BASE, file_node._id) res = app.get(url_with_id) assert res.status_code == 200 res = app.get(url_with_guid, auth=user.auth) assert res.status_code == 200 user.is_disabled = True user.save() res = app.get(url_with_id, expect_errors=True) assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \ ' quickfiles are no longer available.' assert res.status_code == 410 res = app.get(url_with_guid, expect_errors=True) assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \ ' quickfiles are no longer available.' assert res.status_code == 410 def test_file_guid_guid_status(self, app, user, file, file_url): # test_unvisited_file_has_no_guid res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert res.json['data']['attributes']['guid'] is None # test_visited_file_has_guid guid = file.get_guid(create=True) res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert guid is not None assert res.json['data']['attributes']['guid'] == guid._id def test_file_with_wrong_guid(self, app, user): url = '/{}files/{}/'.format(API_BASE, user._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 @mock.patch('api.base.throttling.CreateGuidThrottle.allow_request') def test_file_guid_not_created_with_basic_auth( self, mock_allow, app, user, file_url): res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth) guid = res.json['data']['attributes'].get('guid', None) assert res.status_code == 200 assert mock_allow.call_count == 1 assert guid is None @mock.patch('api.base.throttling.CreateGuidThrottle.allow_request') def test_file_guid_created_with_cookie( self, mock_allow, app, user, file_url, file): session = Session(data={'auth_user_id': user._id}) session.save() cookie = itsdangerous.Signer( website_settings.SECRET_KEY ).sign(session._id) app.set_cookie(website_settings.COOKIE_NAME, cookie.decode()) res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth) app.reset() # clear cookie assert res.status_code == 200 guid = res.json['data']['attributes'].get('guid', None) assert guid is not None assert guid == file.get_guid()._id assert mock_allow.call_count == 1 def test_get_file(self, app, user, file_url, file): res = app.get(file_url, auth=user.auth) file.versions.first().reload() assert res.status_code == 200 assert set(res.json.keys()) == {'meta', 'data'} attributes = res.json['data']['attributes'] assert attributes['path'] == file.path assert attributes['kind'] == file.kind assert attributes['name'] == file.name assert attributes['materialized_path'] == file.materialized_path assert attributes['last_touched'] is None assert attributes['provider'] == file.provider assert attributes['size'] == file.versions.first().size assert attributes['current_version'] == len(file.history) assert attributes['date_modified'] == _dt_to_iso8601( file.versions.first().created.replace(tzinfo=pytz.utc) ) assert attributes['date_created'] == _dt_to_iso8601( file.versions.last().created.replace(tzinfo=pytz.utc) ) assert attributes['extra']['hashes']['md5'] is None assert attributes['extra']['hashes']['sha256'] is None assert attributes['tags'] == [] # make sure download link has a trailing slash # so that downloads don't 301 assert res.json['data']['links']['download'].endswith('/') def test_file_has_rel_link_to_owning_project( self, app, user, file_url, node): res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert 'target' in res.json['data']['relationships'].keys() expected_url = node.api_v2_url actual_url = res.json['data']['relationships']['target']['links']['related']['href'] assert expected_url in actual_url def test_file_has_comments_link(self, app, user, file, file_url): file.get_guid(create=True) res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert 'comments' in res.json['data']['relationships'].keys() url = res.json['data']['relationships']['comments']['links']['related']['href'] assert app.get(url, auth=user.auth).status_code == 200 assert res.json['data']['type'] == 'files' def test_file_has_correct_unread_comments_count( self, app, user, file, node): contributor = AuthUserFactory() node.add_contributor(contributor, auth=Auth(user), save=True) CommentFactory( node=node, target=file.get_guid(create=True), user=contributor, page='files' ) res = app.get( '/{}files/{}/?related_counts=True'.format(API_BASE, file._id), auth=user.auth ) assert res.status_code == 200 unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread'] assert unread_comments == 1 def test_only_project_contrib_can_comment_on_closed_project( self, app, user, node, file_url): node.comment_level = 'private' node.is_public = True node.save() res = app.get(file_url, auth=user.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is True non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is False def test_logged_or_not_user_comment_status_on_open_project( self, app, node, file_url): node.is_public = True node.save() # test_any_loggedin_user_can_comment_on_open_project(self, app, node, # file_url): non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is True # test_non_logged_in_user_cant_comment(self, app, file_url, node): res = app.get(file_url) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is False def test_checkout(self, app, user, file, file_url, node): assert file.checkout is None res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth) file.reload() file.save() node.reload() assert res.status_code == 200 assert file.checkout == user res = app.get(file_url, auth=user.auth) assert node.logs.count() == 2 assert node.logs.latest().action == NodeLog.CHECKED_OUT assert node.logs.latest().user == user assert user._id == res.json['data']['relationships']['checkout']['links']['related']['meta']['id'] assert '/{}users/{}/'.format( API_BASE, user._id ) in res.json['data']['relationships']['checkout']['links']['related']['href'] res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth) file.reload() assert file.checkout is None assert res.status_code == 200 def test_checkout_file_error(self, app, user, file_url, file): # test_checkout_file_no_type res = app.put_json_api( file_url, {'data': {'id': file._id, 'attributes': {'checkout': user._id}}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 # test_checkout_file_no_id res = app.put_json_api( file_url, {'data': {'type': 'files', 'attributes': {'checkout': user._id}}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 # test_checkout_file_incorrect_type res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'Wrong type.', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_checkout_file_incorrect_id res = app.put_json_api( file_url, { 'data': { 'id': '12345', 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_checkout_file_no_attributes res = app.put_json_api( file_url, {'data': {'id': file._id, 'type': 'files'}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 def test_must_set_self(self, app, user, file, file_url): user_unauthorized = UserFactory() assert file.checkout is None res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user_unauthorized._id } } }, auth=user.auth, expect_errors=True, ) file.reload() assert res.status_code == 400 assert file.checkout is None def test_must_be_self(self, app, file, file_url): user = AuthUserFactory() file.checkout = user file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() assert res.status_code == 403 assert file.checkout == user def test_admin_can_checkin(self, app, user, node, file, file_url): user_unauthorized = UserFactory() node.add_contributor(user_unauthorized) file.checkout = user_unauthorized file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout is None assert node.logs.latest().action == NodeLog.CHECKED_IN assert node.logs.latest().user == user def test_admin_can_checkout(self, app, user, file_url, file, node): res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout == user assert node.logs.latest().action == NodeLog.CHECKED_OUT assert node.logs.latest().user == user def test_cannot_checkin_when_already_checked_in( self, app, user, node, file, file_url): count = node.logs.count() assert not file.is_checked_out res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert node.logs.count() == count assert file.checkout is None def test_cannot_checkout_when_checked_out( self, app, user, node, file, file_url): user_unauthorized = UserFactory() node.add_contributor(user_unauthorized) file.checkout = user_unauthorized file.save() count = node.logs.count() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout == user_unauthorized assert node.logs.count() == count def test_noncontrib_and_read_contrib_cannot_checkout( self, app, file, node, file_url): # test_noncontrib_cannot_checkout non_contrib = AuthUserFactory() assert file.checkout is None assert not node.has_permission(non_contrib, READ) res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': non_contrib._id } } }, auth=non_contrib.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 403 assert file.checkout is None assert node.logs.latest().action != NodeLog.CHECKED_OUT # test_read_contrib_cannot_checkout read_contrib = AuthUserFactory() node.add_contributor(read_contrib, permissions=READ) node.save() assert not node.can_edit(user=read_contrib) res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=read_contrib.auth, expect_errors=True) file.reload() assert res.status_code == 403 assert file.checkout is None assert node.logs.latest().action != NodeLog.CHECKED_OUT def test_write_contrib_can_checkin(self, app, node, file, file_url): write_contrib = AuthUserFactory() node.add_contributor(write_contrib, permissions=WRITE) node.save() assert node.can_edit(user=write_contrib) file.checkout = write_contrib file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=write_contrib.auth, ) file.reload() assert res.status_code == 200 assert file.checkout is None @mock.patch('addons.osfstorage.listeners.enqueue_postcommit_task') def test_removed_contrib_files_checked_in(self, mock_enqueue, app, node, file): write_contrib = AuthUserFactory() node.add_contributor(write_contrib, permissions=WRITE) node.save() assert node.can_edit(user=write_contrib) file.checkout = write_contrib file.save() assert file.is_checked_out node.remove_contributor(write_contrib, auth=Auth(write_contrib)) mock_enqueue.assert_called_with(checkin_files_task, (node._id, write_contrib._id,), {}, celery=True) def test_must_be_osfstorage(self, app, user, file, file_url): file.recast(GithubFileNode._typedmodels_type) file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) assert res.status_code == 403 def test_get_file_guids_misc(self, app, user, file, node): # test_get_file_resolves_guids guid = file.get_guid(create=True) url = '/{}files/{}/'.format(API_BASE, guid._id) res = app.get(url, auth=user.auth) assert res.status_code == 200 assert set(res.json.keys()) == {'meta', 'data'} assert res.json['data']['attributes']['path'] == file.path # test_get_file_invalid_guid_gives_404 url = '/{}files/{}/'.format(API_BASE, 'asdasasd') res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 # test_get_file_non_file_guid_gives_404 url = '/{}files/{}/'.format(API_BASE, node._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_current_version_is_equal_to_length_of_history( self, app, user, file_url, file): res = app.get(file_url, auth=user.auth) assert res.json['data']['attributes']['current_version'] == 1 for version in range(2, 4): file.create_version(user, { 'object': '06d80e' + str(version), 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, {'size': 1337, 'contentType': 'img/png'}).save() res = app.get(file_url, auth=user.auth) assert res.json['data']['attributes']['current_version'] == version # Regression test for OSF-7758 def test_folder_files_relationships_contains_guid_not_id( self, app, user, node): folder = node.get_addon('osfstorage').get_root( ).append_folder('I\'d be a teacher!!') folder.save() folder_url = '/{}files/{}/'.format(API_BASE, folder._id) res = app.get(folder_url, auth=user.auth) split_href = res.json['data']['relationships']['files']['links']['related']['href'].split( '/') assert node._id in split_href assert node.id not in split_href def test_embed_user_on_quickfiles_detail(self, app, user): quickfiles = QuickFilesNode.objects.get(creator=user) osfstorage = quickfiles.get_addon('osfstorage') root = osfstorage.get_root() test_file = root.append_file('speedyfile.txt') url = '/{}files/{}/?embed=user'.format(API_BASE, test_file._id) res = app.get(url, auth=user.auth) assert res.json['data'].get('embeds', None) assert res.json['data']['embeds'].get('user') assert res.json['data']['embeds']['user']['data']['id'] == user._id @pytest.mark.django_db class TestFileVersionView: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user) @pytest.fixture() def osfstorage(self, node): return node.get_addon('osfstorage') @pytest.fixture() def root_node(self, osfstorage): return osfstorage.get_root() @pytest.fixture() def file(self, root_node, user): file = root_node.append_file('test_file') file.create_version(user, { 'object': '06d80e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1337, 'contentType': 'img/png' }).save() return file def test_listing(self, app, user, file): file.create_version(user, { 'object': '0683m38e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1347, 'contentType': 'img/png' }).save() res = app.get( '/{}files/{}/versions/'.format(API_BASE, file._id), auth=user.auth, ) assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == '2' assert res.json['data'][0]['attributes']['name'] == file.name assert res.json['data'][1]['id'] == '1' assert res.json['data'][1]['attributes']['name'] == file.name def test_load_and_property(self, app, user, file): # test_by_id res = app.get( '/{}files/{}/versions/1/'.format(API_BASE, file._id), auth=user.auth, ) assert res.status_code == 200 assert res.json['data']['id'] == '1' mfr_url = get_mfr_url(file, 'osfstorage') render_link = res.json['data']['links']['render'] download_link = res.json['data']['links']['download'] assert mfr_url in render_link assert download_link in render_link assert 'revision=1' in render_link guid = file.get_guid(create=True)._id res = app.get( '/{}files/{}/versions/1/'.format(API_BASE, file._id), auth=user.auth, ) render_link = res.json['data']['links']['render'] download_link = res.json['data']['links']['download'] assert mfr_url in render_link assert download_link in render_link assert guid in render_link assert 'revision=1' in render_link # test_read_only assert app.put( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 assert app.post( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 assert app.delete( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 @pytest.mark.django_db class TestFileTagging: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user) @pytest.fixture() def file_one(self, user, node): return api_utils.create_test_file( node, user, filename='file_one') @pytest.fixture() def payload(self, file_one): payload = { 'data': { 'type': 'files', 'id': file_one._id, 'attributes': { 'checkout': None, 'tags': ['goofy'] } } } return payload @pytest.fixture() def url(self, file_one): return '/{}files/{}/'.format(API_BASE, file_one._id) def test_tags_add_and_update_properly(self, app, user, url, payload): # test_tags_add_properly res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PUT response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofy' # test_tags_update_properly # Ensure removing and adding tag data is correct from the PUT response payload['data']['attributes']['tags'] = ['goofier'] res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofier' def test_tags_add_and_remove_properly(self, app, user, url, payload): app.put_json_api(url, payload, auth=user.auth) payload['data']['attributes']['tags'] = [] res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_put_wo_tags_doesnt_remove_tags(self, app, user, url, payload): app.put_json_api(url, payload, auth=user.auth) payload['data']['attributes'] = {'checkout': None} res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PUT response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofy' def test_add_and_remove_tag_adds_log(self, app, user, url, payload, node): # test_add_tag_adds_log count = node.logs.count() app.put_json_api(url, payload, auth=user.auth) assert node.logs.count() == count + 1 assert NodeLog.FILE_TAG_ADDED == node.logs.latest().action # test_remove_tag_adds_log payload['data']['attributes']['tags'] = [] count = node.logs.count() app.put_json_api(url, payload, auth=user.auth) assert node.logs.count() == count + 1 assert NodeLog.FILE_TAG_REMOVED == node.logs.latest().action @pytest.mark.django_db class TestPreprintFileView: @pytest.fixture() def preprint(self, user): return PreprintFactory(creator=user) @pytest.fixture() def primary_file(self, preprint): return preprint.primary_file @pytest.fixture() def file_url(self, primary_file): return '/{}files/{}/'.format(API_BASE, primary_file._id) @pytest.fixture() def other_user(self): return AuthUserFactory() def test_published_preprint_file(self, app, file_url, preprint, user, other_user): # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 200 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_unpublished_preprint_file(self, app, file_url, preprint, user, other_user): preprint.is_published = False preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_private_preprint_file(self, app, file_url, preprint, user, other_user): preprint.is_public = False preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_deleted_preprint_file(self, app, file_url, preprint, user, other_user): preprint.deleted = timezone.now() preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 410 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 410 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 410 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 410 def test_abandoned_preprint_file(self, app, file_url, preprint, user, other_user): preprint.machine_state = DefaultStates.INITIAL.value preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_withdrawn_preprint_files(self, app, file_url, preprint, user, other_user): preprint.date_withdrawn = timezone.now() preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Noncontrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contributor preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 403
adlius/osf.io
api_tests/files/views/test_file_detail.py
osf/models/banner.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from include import IncludeManager from osf.models.base import BaseModel, ObjectIDMixin from osf.utils.workflows import DefaultStates, DefaultTriggers, ReviewStates, ReviewTriggers from osf.utils import permissions class BaseAction(ObjectIDMixin, BaseModel): class Meta: abstract = True objects = IncludeManager() creator = models.ForeignKey('OSFUser', related_name='+', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=DefaultTriggers.choices()) from_state = models.CharField(max_length=31, choices=DefaultStates.choices()) to_state = models.CharField(max_length=31, choices=DefaultStates.choices()) comment = models.TextField(blank=True) is_deleted = models.BooleanField(default=False) auto = models.BooleanField(default=False) @property def target(self): raise NotImplementedError() class ReviewAction(BaseAction): target = models.ForeignKey('Preprint', related_name='actions', on_delete=models.CASCADE) trigger = models.CharField(max_length=31, choices=ReviewTriggers.choices()) from_state = models.CharField(max_length=31, choices=ReviewStates.choices()) to_state = models.CharField(max_length=31, choices=ReviewStates.choices()) class NodeRequestAction(BaseAction): target = models.ForeignKey('NodeRequest', related_name='actions', on_delete=models.CASCADE) permissions = models.CharField( max_length=5, choices=[(permission, permission.title()) for permission in permissions.API_CONTRIBUTOR_PERMISSIONS], default=permissions.READ ) visible = models.BooleanField(default=True) class PreprintRequestAction(BaseAction): target = models.ForeignKey('PreprintRequest', related_name='actions', on_delete=models.CASCADE)
from __future__ import unicode_literals import itsdangerous import mock import pytest import pytz from django.utils import timezone from addons.base.utils import get_mfr_url from addons.github.models import GithubFileNode from addons.osfstorage import settings as osfstorage_settings from addons.osfstorage.listeners import checkin_files_task from api.base.settings.defaults import API_BASE from api_tests import utils as api_utils from framework.auth.core import Auth from osf.models import NodeLog, Session, QuickFilesNode from osf.utils.permissions import WRITE, READ from osf.utils.workflows import DefaultStates from osf_tests.factories import ( AuthUserFactory, CommentFactory, ProjectFactory, UserFactory, PreprintFactory, ) from website import settings as website_settings # stolen from^W^Winspired by DRF # rest_framework.fields.DateTimeField.to_representation def _dt_to_iso8601(value): iso8601 = value.isoformat() if iso8601.endswith('+00:00'): iso8601 = iso8601[:-6] + 'Z' # microsecond precision return iso8601 @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestFileView: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user, comment_level='public') @pytest.fixture() def quickfiles_node(self, user): return QuickFilesNode.objects.get(creator=user) @pytest.fixture() def file(self, user, node): return api_utils.create_test_file(node, user, create_guid=False) @pytest.fixture() def file_url(self, file): return '/{}files/{}/'.format(API_BASE, file._id) def test_must_have_auth_and_be_contributor(self, app, file_url): # test_must_have_auth(self, app, file_url): res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # test_must_be_contributor(self, app, file_url): non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth, expect_errors=True) assert res.status_code == 403 def test_deleted_file_return_410(self, app, node, user): deleted_file = api_utils.create_test_file(node, user, create_guid=True) url_with_guid = '/{}files/{}/'.format( API_BASE, deleted_file.get_guid()._id ) url_with_id = '/{}files/{}/'.format(API_BASE, deleted_file._id) res = app.get(url_with_guid, auth=user.auth) assert res.status_code == 200 res = app.get(url_with_id, auth=user.auth) assert res.status_code == 200 deleted_file.delete(user=user, save=True) res = app.get(url_with_guid, auth=user.auth, expect_errors=True) assert res.status_code == 410 res = app.get(url_with_id, auth=user.auth, expect_errors=True) assert res.status_code == 410 def test_disabled_users_quickfiles_file_detail_gets_410(self, app, quickfiles_node, user): file_node = api_utils.create_test_file(quickfiles_node, user, create_guid=True) url_with_guid = '/{}files/{}/'.format( API_BASE, file_node.get_guid()._id ) url_with_id = '/{}files/{}/'.format(API_BASE, file_node._id) res = app.get(url_with_id) assert res.status_code == 200 res = app.get(url_with_guid, auth=user.auth) assert res.status_code == 200 user.is_disabled = True user.save() res = app.get(url_with_id, expect_errors=True) assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \ ' quickfiles are no longer available.' assert res.status_code == 410 res = app.get(url_with_guid, expect_errors=True) assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \ ' quickfiles are no longer available.' assert res.status_code == 410 def test_file_guid_guid_status(self, app, user, file, file_url): # test_unvisited_file_has_no_guid res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert res.json['data']['attributes']['guid'] is None # test_visited_file_has_guid guid = file.get_guid(create=True) res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert guid is not None assert res.json['data']['attributes']['guid'] == guid._id def test_file_with_wrong_guid(self, app, user): url = '/{}files/{}/'.format(API_BASE, user._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 @mock.patch('api.base.throttling.CreateGuidThrottle.allow_request') def test_file_guid_not_created_with_basic_auth( self, mock_allow, app, user, file_url): res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth) guid = res.json['data']['attributes'].get('guid', None) assert res.status_code == 200 assert mock_allow.call_count == 1 assert guid is None @mock.patch('api.base.throttling.CreateGuidThrottle.allow_request') def test_file_guid_created_with_cookie( self, mock_allow, app, user, file_url, file): session = Session(data={'auth_user_id': user._id}) session.save() cookie = itsdangerous.Signer( website_settings.SECRET_KEY ).sign(session._id) app.set_cookie(website_settings.COOKIE_NAME, cookie.decode()) res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth) app.reset() # clear cookie assert res.status_code == 200 guid = res.json['data']['attributes'].get('guid', None) assert guid is not None assert guid == file.get_guid()._id assert mock_allow.call_count == 1 def test_get_file(self, app, user, file_url, file): res = app.get(file_url, auth=user.auth) file.versions.first().reload() assert res.status_code == 200 assert set(res.json.keys()) == {'meta', 'data'} attributes = res.json['data']['attributes'] assert attributes['path'] == file.path assert attributes['kind'] == file.kind assert attributes['name'] == file.name assert attributes['materialized_path'] == file.materialized_path assert attributes['last_touched'] is None assert attributes['provider'] == file.provider assert attributes['size'] == file.versions.first().size assert attributes['current_version'] == len(file.history) assert attributes['date_modified'] == _dt_to_iso8601( file.versions.first().created.replace(tzinfo=pytz.utc) ) assert attributes['date_created'] == _dt_to_iso8601( file.versions.last().created.replace(tzinfo=pytz.utc) ) assert attributes['extra']['hashes']['md5'] is None assert attributes['extra']['hashes']['sha256'] is None assert attributes['tags'] == [] # make sure download link has a trailing slash # so that downloads don't 301 assert res.json['data']['links']['download'].endswith('/') def test_file_has_rel_link_to_owning_project( self, app, user, file_url, node): res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert 'target' in res.json['data']['relationships'].keys() expected_url = node.api_v2_url actual_url = res.json['data']['relationships']['target']['links']['related']['href'] assert expected_url in actual_url def test_file_has_comments_link(self, app, user, file, file_url): file.get_guid(create=True) res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert 'comments' in res.json['data']['relationships'].keys() url = res.json['data']['relationships']['comments']['links']['related']['href'] assert app.get(url, auth=user.auth).status_code == 200 assert res.json['data']['type'] == 'files' def test_file_has_correct_unread_comments_count( self, app, user, file, node): contributor = AuthUserFactory() node.add_contributor(contributor, auth=Auth(user), save=True) CommentFactory( node=node, target=file.get_guid(create=True), user=contributor, page='files' ) res = app.get( '/{}files/{}/?related_counts=True'.format(API_BASE, file._id), auth=user.auth ) assert res.status_code == 200 unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread'] assert unread_comments == 1 def test_only_project_contrib_can_comment_on_closed_project( self, app, user, node, file_url): node.comment_level = 'private' node.is_public = True node.save() res = app.get(file_url, auth=user.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is True non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is False def test_logged_or_not_user_comment_status_on_open_project( self, app, node, file_url): node.is_public = True node.save() # test_any_loggedin_user_can_comment_on_open_project(self, app, node, # file_url): non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is True # test_non_logged_in_user_cant_comment(self, app, file_url, node): res = app.get(file_url) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is False def test_checkout(self, app, user, file, file_url, node): assert file.checkout is None res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth) file.reload() file.save() node.reload() assert res.status_code == 200 assert file.checkout == user res = app.get(file_url, auth=user.auth) assert node.logs.count() == 2 assert node.logs.latest().action == NodeLog.CHECKED_OUT assert node.logs.latest().user == user assert user._id == res.json['data']['relationships']['checkout']['links']['related']['meta']['id'] assert '/{}users/{}/'.format( API_BASE, user._id ) in res.json['data']['relationships']['checkout']['links']['related']['href'] res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth) file.reload() assert file.checkout is None assert res.status_code == 200 def test_checkout_file_error(self, app, user, file_url, file): # test_checkout_file_no_type res = app.put_json_api( file_url, {'data': {'id': file._id, 'attributes': {'checkout': user._id}}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 # test_checkout_file_no_id res = app.put_json_api( file_url, {'data': {'type': 'files', 'attributes': {'checkout': user._id}}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 # test_checkout_file_incorrect_type res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'Wrong type.', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_checkout_file_incorrect_id res = app.put_json_api( file_url, { 'data': { 'id': '12345', 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_checkout_file_no_attributes res = app.put_json_api( file_url, {'data': {'id': file._id, 'type': 'files'}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 def test_must_set_self(self, app, user, file, file_url): user_unauthorized = UserFactory() assert file.checkout is None res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user_unauthorized._id } } }, auth=user.auth, expect_errors=True, ) file.reload() assert res.status_code == 400 assert file.checkout is None def test_must_be_self(self, app, file, file_url): user = AuthUserFactory() file.checkout = user file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() assert res.status_code == 403 assert file.checkout == user def test_admin_can_checkin(self, app, user, node, file, file_url): user_unauthorized = UserFactory() node.add_contributor(user_unauthorized) file.checkout = user_unauthorized file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout is None assert node.logs.latest().action == NodeLog.CHECKED_IN assert node.logs.latest().user == user def test_admin_can_checkout(self, app, user, file_url, file, node): res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout == user assert node.logs.latest().action == NodeLog.CHECKED_OUT assert node.logs.latest().user == user def test_cannot_checkin_when_already_checked_in( self, app, user, node, file, file_url): count = node.logs.count() assert not file.is_checked_out res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert node.logs.count() == count assert file.checkout is None def test_cannot_checkout_when_checked_out( self, app, user, node, file, file_url): user_unauthorized = UserFactory() node.add_contributor(user_unauthorized) file.checkout = user_unauthorized file.save() count = node.logs.count() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout == user_unauthorized assert node.logs.count() == count def test_noncontrib_and_read_contrib_cannot_checkout( self, app, file, node, file_url): # test_noncontrib_cannot_checkout non_contrib = AuthUserFactory() assert file.checkout is None assert not node.has_permission(non_contrib, READ) res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': non_contrib._id } } }, auth=non_contrib.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 403 assert file.checkout is None assert node.logs.latest().action != NodeLog.CHECKED_OUT # test_read_contrib_cannot_checkout read_contrib = AuthUserFactory() node.add_contributor(read_contrib, permissions=READ) node.save() assert not node.can_edit(user=read_contrib) res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=read_contrib.auth, expect_errors=True) file.reload() assert res.status_code == 403 assert file.checkout is None assert node.logs.latest().action != NodeLog.CHECKED_OUT def test_write_contrib_can_checkin(self, app, node, file, file_url): write_contrib = AuthUserFactory() node.add_contributor(write_contrib, permissions=WRITE) node.save() assert node.can_edit(user=write_contrib) file.checkout = write_contrib file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=write_contrib.auth, ) file.reload() assert res.status_code == 200 assert file.checkout is None @mock.patch('addons.osfstorage.listeners.enqueue_postcommit_task') def test_removed_contrib_files_checked_in(self, mock_enqueue, app, node, file): write_contrib = AuthUserFactory() node.add_contributor(write_contrib, permissions=WRITE) node.save() assert node.can_edit(user=write_contrib) file.checkout = write_contrib file.save() assert file.is_checked_out node.remove_contributor(write_contrib, auth=Auth(write_contrib)) mock_enqueue.assert_called_with(checkin_files_task, (node._id, write_contrib._id,), {}, celery=True) def test_must_be_osfstorage(self, app, user, file, file_url): file.recast(GithubFileNode._typedmodels_type) file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) assert res.status_code == 403 def test_get_file_guids_misc(self, app, user, file, node): # test_get_file_resolves_guids guid = file.get_guid(create=True) url = '/{}files/{}/'.format(API_BASE, guid._id) res = app.get(url, auth=user.auth) assert res.status_code == 200 assert set(res.json.keys()) == {'meta', 'data'} assert res.json['data']['attributes']['path'] == file.path # test_get_file_invalid_guid_gives_404 url = '/{}files/{}/'.format(API_BASE, 'asdasasd') res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 # test_get_file_non_file_guid_gives_404 url = '/{}files/{}/'.format(API_BASE, node._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_current_version_is_equal_to_length_of_history( self, app, user, file_url, file): res = app.get(file_url, auth=user.auth) assert res.json['data']['attributes']['current_version'] == 1 for version in range(2, 4): file.create_version(user, { 'object': '06d80e' + str(version), 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, {'size': 1337, 'contentType': 'img/png'}).save() res = app.get(file_url, auth=user.auth) assert res.json['data']['attributes']['current_version'] == version # Regression test for OSF-7758 def test_folder_files_relationships_contains_guid_not_id( self, app, user, node): folder = node.get_addon('osfstorage').get_root( ).append_folder('I\'d be a teacher!!') folder.save() folder_url = '/{}files/{}/'.format(API_BASE, folder._id) res = app.get(folder_url, auth=user.auth) split_href = res.json['data']['relationships']['files']['links']['related']['href'].split( '/') assert node._id in split_href assert node.id not in split_href def test_embed_user_on_quickfiles_detail(self, app, user): quickfiles = QuickFilesNode.objects.get(creator=user) osfstorage = quickfiles.get_addon('osfstorage') root = osfstorage.get_root() test_file = root.append_file('speedyfile.txt') url = '/{}files/{}/?embed=user'.format(API_BASE, test_file._id) res = app.get(url, auth=user.auth) assert res.json['data'].get('embeds', None) assert res.json['data']['embeds'].get('user') assert res.json['data']['embeds']['user']['data']['id'] == user._id @pytest.mark.django_db class TestFileVersionView: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user) @pytest.fixture() def osfstorage(self, node): return node.get_addon('osfstorage') @pytest.fixture() def root_node(self, osfstorage): return osfstorage.get_root() @pytest.fixture() def file(self, root_node, user): file = root_node.append_file('test_file') file.create_version(user, { 'object': '06d80e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1337, 'contentType': 'img/png' }).save() return file def test_listing(self, app, user, file): file.create_version(user, { 'object': '0683m38e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1347, 'contentType': 'img/png' }).save() res = app.get( '/{}files/{}/versions/'.format(API_BASE, file._id), auth=user.auth, ) assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == '2' assert res.json['data'][0]['attributes']['name'] == file.name assert res.json['data'][1]['id'] == '1' assert res.json['data'][1]['attributes']['name'] == file.name def test_load_and_property(self, app, user, file): # test_by_id res = app.get( '/{}files/{}/versions/1/'.format(API_BASE, file._id), auth=user.auth, ) assert res.status_code == 200 assert res.json['data']['id'] == '1' mfr_url = get_mfr_url(file, 'osfstorage') render_link = res.json['data']['links']['render'] download_link = res.json['data']['links']['download'] assert mfr_url in render_link assert download_link in render_link assert 'revision=1' in render_link guid = file.get_guid(create=True)._id res = app.get( '/{}files/{}/versions/1/'.format(API_BASE, file._id), auth=user.auth, ) render_link = res.json['data']['links']['render'] download_link = res.json['data']['links']['download'] assert mfr_url in render_link assert download_link in render_link assert guid in render_link assert 'revision=1' in render_link # test_read_only assert app.put( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 assert app.post( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 assert app.delete( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 @pytest.mark.django_db class TestFileTagging: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user) @pytest.fixture() def file_one(self, user, node): return api_utils.create_test_file( node, user, filename='file_one') @pytest.fixture() def payload(self, file_one): payload = { 'data': { 'type': 'files', 'id': file_one._id, 'attributes': { 'checkout': None, 'tags': ['goofy'] } } } return payload @pytest.fixture() def url(self, file_one): return '/{}files/{}/'.format(API_BASE, file_one._id) def test_tags_add_and_update_properly(self, app, user, url, payload): # test_tags_add_properly res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PUT response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofy' # test_tags_update_properly # Ensure removing and adding tag data is correct from the PUT response payload['data']['attributes']['tags'] = ['goofier'] res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofier' def test_tags_add_and_remove_properly(self, app, user, url, payload): app.put_json_api(url, payload, auth=user.auth) payload['data']['attributes']['tags'] = [] res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_put_wo_tags_doesnt_remove_tags(self, app, user, url, payload): app.put_json_api(url, payload, auth=user.auth) payload['data']['attributes'] = {'checkout': None} res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PUT response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofy' def test_add_and_remove_tag_adds_log(self, app, user, url, payload, node): # test_add_tag_adds_log count = node.logs.count() app.put_json_api(url, payload, auth=user.auth) assert node.logs.count() == count + 1 assert NodeLog.FILE_TAG_ADDED == node.logs.latest().action # test_remove_tag_adds_log payload['data']['attributes']['tags'] = [] count = node.logs.count() app.put_json_api(url, payload, auth=user.auth) assert node.logs.count() == count + 1 assert NodeLog.FILE_TAG_REMOVED == node.logs.latest().action @pytest.mark.django_db class TestPreprintFileView: @pytest.fixture() def preprint(self, user): return PreprintFactory(creator=user) @pytest.fixture() def primary_file(self, preprint): return preprint.primary_file @pytest.fixture() def file_url(self, primary_file): return '/{}files/{}/'.format(API_BASE, primary_file._id) @pytest.fixture() def other_user(self): return AuthUserFactory() def test_published_preprint_file(self, app, file_url, preprint, user, other_user): # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 200 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_unpublished_preprint_file(self, app, file_url, preprint, user, other_user): preprint.is_published = False preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_private_preprint_file(self, app, file_url, preprint, user, other_user): preprint.is_public = False preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_deleted_preprint_file(self, app, file_url, preprint, user, other_user): preprint.deleted = timezone.now() preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 410 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 410 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 410 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 410 def test_abandoned_preprint_file(self, app, file_url, preprint, user, other_user): preprint.machine_state = DefaultStates.INITIAL.value preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_withdrawn_preprint_files(self, app, file_url, preprint, user, other_user): preprint.date_withdrawn = timezone.now() preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Noncontrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contributor preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 403
adlius/osf.io
api_tests/files/views/test_file_detail.py
osf/models/action.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import re import datetime from website.identifiers.clients.base import AbstractIdentifierClient from website import settings from datacite import DataCiteMDSClient, schema40 logger = logging.getLogger(__name__) class DataCiteClient(AbstractIdentifierClient): def __init__(self, base_url, prefix, client=None): self.base_url = base_url self.prefix = prefix self._client = client or DataCiteMDSClient( url=self.base_url, username=settings.DATACITE_USERNAME, password=settings.DATACITE_PASSWORD, prefix=self.prefix ) def build_metadata(self, node): """Return the formatted datacite metadata XML as a string. """ data = { 'identifier': { 'identifier': self.build_doi(node), 'identifierType': 'DOI', }, 'creators': [ {'creatorName': user.fullname, 'givenName': user.given_name, 'familyName': user.family_name} for user in node.visible_contributors ], 'titles': [ {'title': node.title} ], 'publisher': 'Open Science Framework', 'publicationYear': str(datetime.datetime.now().year), 'resourceType': { 'resourceType': 'Project', 'resourceTypeGeneral': 'Text' } } if node.description: data['descriptions'] = [{ 'descriptionType': 'Abstract', 'description': node.description }] if node.node_license: data['rightsList'] = [{ 'rights': node.node_license.name, 'rightsURI': node.node_license.url }] # Validate dictionary assert schema40.validate(data) # Generate DataCite XML from dictionary. return schema40.tostring(data) def build_doi(self, object): return settings.DOI_FORMAT.format(prefix=self.prefix, guid=object._id) def get_identifier(self, identifier): self._client.doi_get(identifier) def create_identifier(self, node, category): if category == 'doi': metadata = self.build_metadata(node) resp = self._client.metadata_post(metadata) # Typical response: 'OK (10.70102/FK2osf.io/cq695)' to doi 10.70102/FK2osf.io/cq695 doi = re.match(r'OK \((?P<doi>[a-zA-Z0-9 .\/]{0,})\)', resp).groupdict()['doi'] if settings.DATACITE_MINT_DOIS: self._client.doi_post(doi, node.absolute_url) return {'doi': doi} else: raise NotImplementedError('Creating an identifier with category {} is not supported'.format(category)) def update_identifier(self, node, category): if not node.is_public or node.is_deleted: if category == 'doi': doi = self.build_doi(node) self._client.metadata_delete(doi) return {'doi': doi} else: raise NotImplementedError('Updating metadata not supported for {}'.format(category)) else: return self.create_identifier(node, category)
from __future__ import unicode_literals import itsdangerous import mock import pytest import pytz from django.utils import timezone from addons.base.utils import get_mfr_url from addons.github.models import GithubFileNode from addons.osfstorage import settings as osfstorage_settings from addons.osfstorage.listeners import checkin_files_task from api.base.settings.defaults import API_BASE from api_tests import utils as api_utils from framework.auth.core import Auth from osf.models import NodeLog, Session, QuickFilesNode from osf.utils.permissions import WRITE, READ from osf.utils.workflows import DefaultStates from osf_tests.factories import ( AuthUserFactory, CommentFactory, ProjectFactory, UserFactory, PreprintFactory, ) from website import settings as website_settings # stolen from^W^Winspired by DRF # rest_framework.fields.DateTimeField.to_representation def _dt_to_iso8601(value): iso8601 = value.isoformat() if iso8601.endswith('+00:00'): iso8601 = iso8601[:-6] + 'Z' # microsecond precision return iso8601 @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestFileView: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user, comment_level='public') @pytest.fixture() def quickfiles_node(self, user): return QuickFilesNode.objects.get(creator=user) @pytest.fixture() def file(self, user, node): return api_utils.create_test_file(node, user, create_guid=False) @pytest.fixture() def file_url(self, file): return '/{}files/{}/'.format(API_BASE, file._id) def test_must_have_auth_and_be_contributor(self, app, file_url): # test_must_have_auth(self, app, file_url): res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # test_must_be_contributor(self, app, file_url): non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth, expect_errors=True) assert res.status_code == 403 def test_deleted_file_return_410(self, app, node, user): deleted_file = api_utils.create_test_file(node, user, create_guid=True) url_with_guid = '/{}files/{}/'.format( API_BASE, deleted_file.get_guid()._id ) url_with_id = '/{}files/{}/'.format(API_BASE, deleted_file._id) res = app.get(url_with_guid, auth=user.auth) assert res.status_code == 200 res = app.get(url_with_id, auth=user.auth) assert res.status_code == 200 deleted_file.delete(user=user, save=True) res = app.get(url_with_guid, auth=user.auth, expect_errors=True) assert res.status_code == 410 res = app.get(url_with_id, auth=user.auth, expect_errors=True) assert res.status_code == 410 def test_disabled_users_quickfiles_file_detail_gets_410(self, app, quickfiles_node, user): file_node = api_utils.create_test_file(quickfiles_node, user, create_guid=True) url_with_guid = '/{}files/{}/'.format( API_BASE, file_node.get_guid()._id ) url_with_id = '/{}files/{}/'.format(API_BASE, file_node._id) res = app.get(url_with_id) assert res.status_code == 200 res = app.get(url_with_guid, auth=user.auth) assert res.status_code == 200 user.is_disabled = True user.save() res = app.get(url_with_id, expect_errors=True) assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \ ' quickfiles are no longer available.' assert res.status_code == 410 res = app.get(url_with_guid, expect_errors=True) assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \ ' quickfiles are no longer available.' assert res.status_code == 410 def test_file_guid_guid_status(self, app, user, file, file_url): # test_unvisited_file_has_no_guid res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert res.json['data']['attributes']['guid'] is None # test_visited_file_has_guid guid = file.get_guid(create=True) res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert guid is not None assert res.json['data']['attributes']['guid'] == guid._id def test_file_with_wrong_guid(self, app, user): url = '/{}files/{}/'.format(API_BASE, user._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 @mock.patch('api.base.throttling.CreateGuidThrottle.allow_request') def test_file_guid_not_created_with_basic_auth( self, mock_allow, app, user, file_url): res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth) guid = res.json['data']['attributes'].get('guid', None) assert res.status_code == 200 assert mock_allow.call_count == 1 assert guid is None @mock.patch('api.base.throttling.CreateGuidThrottle.allow_request') def test_file_guid_created_with_cookie( self, mock_allow, app, user, file_url, file): session = Session(data={'auth_user_id': user._id}) session.save() cookie = itsdangerous.Signer( website_settings.SECRET_KEY ).sign(session._id) app.set_cookie(website_settings.COOKIE_NAME, cookie.decode()) res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth) app.reset() # clear cookie assert res.status_code == 200 guid = res.json['data']['attributes'].get('guid', None) assert guid is not None assert guid == file.get_guid()._id assert mock_allow.call_count == 1 def test_get_file(self, app, user, file_url, file): res = app.get(file_url, auth=user.auth) file.versions.first().reload() assert res.status_code == 200 assert set(res.json.keys()) == {'meta', 'data'} attributes = res.json['data']['attributes'] assert attributes['path'] == file.path assert attributes['kind'] == file.kind assert attributes['name'] == file.name assert attributes['materialized_path'] == file.materialized_path assert attributes['last_touched'] is None assert attributes['provider'] == file.provider assert attributes['size'] == file.versions.first().size assert attributes['current_version'] == len(file.history) assert attributes['date_modified'] == _dt_to_iso8601( file.versions.first().created.replace(tzinfo=pytz.utc) ) assert attributes['date_created'] == _dt_to_iso8601( file.versions.last().created.replace(tzinfo=pytz.utc) ) assert attributes['extra']['hashes']['md5'] is None assert attributes['extra']['hashes']['sha256'] is None assert attributes['tags'] == [] # make sure download link has a trailing slash # so that downloads don't 301 assert res.json['data']['links']['download'].endswith('/') def test_file_has_rel_link_to_owning_project( self, app, user, file_url, node): res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert 'target' in res.json['data']['relationships'].keys() expected_url = node.api_v2_url actual_url = res.json['data']['relationships']['target']['links']['related']['href'] assert expected_url in actual_url def test_file_has_comments_link(self, app, user, file, file_url): file.get_guid(create=True) res = app.get(file_url, auth=user.auth) assert res.status_code == 200 assert 'comments' in res.json['data']['relationships'].keys() url = res.json['data']['relationships']['comments']['links']['related']['href'] assert app.get(url, auth=user.auth).status_code == 200 assert res.json['data']['type'] == 'files' def test_file_has_correct_unread_comments_count( self, app, user, file, node): contributor = AuthUserFactory() node.add_contributor(contributor, auth=Auth(user), save=True) CommentFactory( node=node, target=file.get_guid(create=True), user=contributor, page='files' ) res = app.get( '/{}files/{}/?related_counts=True'.format(API_BASE, file._id), auth=user.auth ) assert res.status_code == 200 unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread'] assert unread_comments == 1 def test_only_project_contrib_can_comment_on_closed_project( self, app, user, node, file_url): node.comment_level = 'private' node.is_public = True node.save() res = app.get(file_url, auth=user.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is True non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is False def test_logged_or_not_user_comment_status_on_open_project( self, app, node, file_url): node.is_public = True node.save() # test_any_loggedin_user_can_comment_on_open_project(self, app, node, # file_url): non_contributor = AuthUserFactory() res = app.get(file_url, auth=non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is True # test_non_logged_in_user_cant_comment(self, app, file_url, node): res = app.get(file_url) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert res.status_code == 200 assert can_comment is False def test_checkout(self, app, user, file, file_url, node): assert file.checkout is None res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth) file.reload() file.save() node.reload() assert res.status_code == 200 assert file.checkout == user res = app.get(file_url, auth=user.auth) assert node.logs.count() == 2 assert node.logs.latest().action == NodeLog.CHECKED_OUT assert node.logs.latest().user == user assert user._id == res.json['data']['relationships']['checkout']['links']['related']['meta']['id'] assert '/{}users/{}/'.format( API_BASE, user._id ) in res.json['data']['relationships']['checkout']['links']['related']['href'] res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth) file.reload() assert file.checkout is None assert res.status_code == 200 def test_checkout_file_error(self, app, user, file_url, file): # test_checkout_file_no_type res = app.put_json_api( file_url, {'data': {'id': file._id, 'attributes': {'checkout': user._id}}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 # test_checkout_file_no_id res = app.put_json_api( file_url, {'data': {'type': 'files', 'attributes': {'checkout': user._id}}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 # test_checkout_file_incorrect_type res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'Wrong type.', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_checkout_file_incorrect_id res = app.put_json_api( file_url, { 'data': { 'id': '12345', 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_checkout_file_no_attributes res = app.put_json_api( file_url, {'data': {'id': file._id, 'type': 'files'}}, auth=user.auth, expect_errors=True ) assert res.status_code == 400 def test_must_set_self(self, app, user, file, file_url): user_unauthorized = UserFactory() assert file.checkout is None res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user_unauthorized._id } } }, auth=user.auth, expect_errors=True, ) file.reload() assert res.status_code == 400 assert file.checkout is None def test_must_be_self(self, app, file, file_url): user = AuthUserFactory() file.checkout = user file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() assert res.status_code == 403 assert file.checkout == user def test_admin_can_checkin(self, app, user, node, file, file_url): user_unauthorized = UserFactory() node.add_contributor(user_unauthorized) file.checkout = user_unauthorized file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout is None assert node.logs.latest().action == NodeLog.CHECKED_IN assert node.logs.latest().user == user def test_admin_can_checkout(self, app, user, file_url, file, node): res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout == user assert node.logs.latest().action == NodeLog.CHECKED_OUT assert node.logs.latest().user == user def test_cannot_checkin_when_already_checked_in( self, app, user, node, file, file_url): count = node.logs.count() assert not file.is_checked_out res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert node.logs.count() == count assert file.checkout is None def test_cannot_checkout_when_checked_out( self, app, user, node, file, file_url): user_unauthorized = UserFactory() node.add_contributor(user_unauthorized) file.checkout = user_unauthorized file.save() count = node.logs.count() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 200 assert file.checkout == user_unauthorized assert node.logs.count() == count def test_noncontrib_and_read_contrib_cannot_checkout( self, app, file, node, file_url): # test_noncontrib_cannot_checkout non_contrib = AuthUserFactory() assert file.checkout is None assert not node.has_permission(non_contrib, READ) res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': non_contrib._id } } }, auth=non_contrib.auth, expect_errors=True, ) file.reload() node.reload() assert res.status_code == 403 assert file.checkout is None assert node.logs.latest().action != NodeLog.CHECKED_OUT # test_read_contrib_cannot_checkout read_contrib = AuthUserFactory() node.add_contributor(read_contrib, permissions=READ) node.save() assert not node.can_edit(user=read_contrib) res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=read_contrib.auth, expect_errors=True) file.reload() assert res.status_code == 403 assert file.checkout is None assert node.logs.latest().action != NodeLog.CHECKED_OUT def test_write_contrib_can_checkin(self, app, node, file, file_url): write_contrib = AuthUserFactory() node.add_contributor(write_contrib, permissions=WRITE) node.save() assert node.can_edit(user=write_contrib) file.checkout = write_contrib file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': None } } }, auth=write_contrib.auth, ) file.reload() assert res.status_code == 200 assert file.checkout is None @mock.patch('addons.osfstorage.listeners.enqueue_postcommit_task') def test_removed_contrib_files_checked_in(self, mock_enqueue, app, node, file): write_contrib = AuthUserFactory() node.add_contributor(write_contrib, permissions=WRITE) node.save() assert node.can_edit(user=write_contrib) file.checkout = write_contrib file.save() assert file.is_checked_out node.remove_contributor(write_contrib, auth=Auth(write_contrib)) mock_enqueue.assert_called_with(checkin_files_task, (node._id, write_contrib._id,), {}, celery=True) def test_must_be_osfstorage(self, app, user, file, file_url): file.recast(GithubFileNode._typedmodels_type) file.save() res = app.put_json_api( file_url, { 'data': { 'id': file._id, 'type': 'files', 'attributes': { 'checkout': user._id } } }, auth=user.auth, expect_errors=True, ) assert res.status_code == 403 def test_get_file_guids_misc(self, app, user, file, node): # test_get_file_resolves_guids guid = file.get_guid(create=True) url = '/{}files/{}/'.format(API_BASE, guid._id) res = app.get(url, auth=user.auth) assert res.status_code == 200 assert set(res.json.keys()) == {'meta', 'data'} assert res.json['data']['attributes']['path'] == file.path # test_get_file_invalid_guid_gives_404 url = '/{}files/{}/'.format(API_BASE, 'asdasasd') res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 # test_get_file_non_file_guid_gives_404 url = '/{}files/{}/'.format(API_BASE, node._id) res = app.get(url, auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_current_version_is_equal_to_length_of_history( self, app, user, file_url, file): res = app.get(file_url, auth=user.auth) assert res.json['data']['attributes']['current_version'] == 1 for version in range(2, 4): file.create_version(user, { 'object': '06d80e' + str(version), 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, {'size': 1337, 'contentType': 'img/png'}).save() res = app.get(file_url, auth=user.auth) assert res.json['data']['attributes']['current_version'] == version # Regression test for OSF-7758 def test_folder_files_relationships_contains_guid_not_id( self, app, user, node): folder = node.get_addon('osfstorage').get_root( ).append_folder('I\'d be a teacher!!') folder.save() folder_url = '/{}files/{}/'.format(API_BASE, folder._id) res = app.get(folder_url, auth=user.auth) split_href = res.json['data']['relationships']['files']['links']['related']['href'].split( '/') assert node._id in split_href assert node.id not in split_href def test_embed_user_on_quickfiles_detail(self, app, user): quickfiles = QuickFilesNode.objects.get(creator=user) osfstorage = quickfiles.get_addon('osfstorage') root = osfstorage.get_root() test_file = root.append_file('speedyfile.txt') url = '/{}files/{}/?embed=user'.format(API_BASE, test_file._id) res = app.get(url, auth=user.auth) assert res.json['data'].get('embeds', None) assert res.json['data']['embeds'].get('user') assert res.json['data']['embeds']['user']['data']['id'] == user._id @pytest.mark.django_db class TestFileVersionView: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user) @pytest.fixture() def osfstorage(self, node): return node.get_addon('osfstorage') @pytest.fixture() def root_node(self, osfstorage): return osfstorage.get_root() @pytest.fixture() def file(self, root_node, user): file = root_node.append_file('test_file') file.create_version(user, { 'object': '06d80e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1337, 'contentType': 'img/png' }).save() return file def test_listing(self, app, user, file): file.create_version(user, { 'object': '0683m38e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1347, 'contentType': 'img/png' }).save() res = app.get( '/{}files/{}/versions/'.format(API_BASE, file._id), auth=user.auth, ) assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == '2' assert res.json['data'][0]['attributes']['name'] == file.name assert res.json['data'][1]['id'] == '1' assert res.json['data'][1]['attributes']['name'] == file.name def test_load_and_property(self, app, user, file): # test_by_id res = app.get( '/{}files/{}/versions/1/'.format(API_BASE, file._id), auth=user.auth, ) assert res.status_code == 200 assert res.json['data']['id'] == '1' mfr_url = get_mfr_url(file, 'osfstorage') render_link = res.json['data']['links']['render'] download_link = res.json['data']['links']['download'] assert mfr_url in render_link assert download_link in render_link assert 'revision=1' in render_link guid = file.get_guid(create=True)._id res = app.get( '/{}files/{}/versions/1/'.format(API_BASE, file._id), auth=user.auth, ) render_link = res.json['data']['links']['render'] download_link = res.json['data']['links']['download'] assert mfr_url in render_link assert download_link in render_link assert guid in render_link assert 'revision=1' in render_link # test_read_only assert app.put( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 assert app.post( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 assert app.delete( '/{}files/{}/versions/1/'.format(API_BASE, file._id), expect_errors=True, auth=user.auth, ).status_code == 405 @pytest.mark.django_db class TestFileTagging: @pytest.fixture() def node(self, user): return ProjectFactory(creator=user) @pytest.fixture() def file_one(self, user, node): return api_utils.create_test_file( node, user, filename='file_one') @pytest.fixture() def payload(self, file_one): payload = { 'data': { 'type': 'files', 'id': file_one._id, 'attributes': { 'checkout': None, 'tags': ['goofy'] } } } return payload @pytest.fixture() def url(self, file_one): return '/{}files/{}/'.format(API_BASE, file_one._id) def test_tags_add_and_update_properly(self, app, user, url, payload): # test_tags_add_properly res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PUT response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofy' # test_tags_update_properly # Ensure removing and adding tag data is correct from the PUT response payload['data']['attributes']['tags'] = ['goofier'] res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofier' def test_tags_add_and_remove_properly(self, app, user, url, payload): app.put_json_api(url, payload, auth=user.auth) payload['data']['attributes']['tags'] = [] res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_put_wo_tags_doesnt_remove_tags(self, app, user, url, payload): app.put_json_api(url, payload, auth=user.auth) payload['data']['attributes'] = {'checkout': None} res = app.put_json_api(url, payload, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PUT response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'goofy' def test_add_and_remove_tag_adds_log(self, app, user, url, payload, node): # test_add_tag_adds_log count = node.logs.count() app.put_json_api(url, payload, auth=user.auth) assert node.logs.count() == count + 1 assert NodeLog.FILE_TAG_ADDED == node.logs.latest().action # test_remove_tag_adds_log payload['data']['attributes']['tags'] = [] count = node.logs.count() app.put_json_api(url, payload, auth=user.auth) assert node.logs.count() == count + 1 assert NodeLog.FILE_TAG_REMOVED == node.logs.latest().action @pytest.mark.django_db class TestPreprintFileView: @pytest.fixture() def preprint(self, user): return PreprintFactory(creator=user) @pytest.fixture() def primary_file(self, preprint): return preprint.primary_file @pytest.fixture() def file_url(self, primary_file): return '/{}files/{}/'.format(API_BASE, primary_file._id) @pytest.fixture() def other_user(self): return AuthUserFactory() def test_published_preprint_file(self, app, file_url, preprint, user, other_user): # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 200 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_unpublished_preprint_file(self, app, file_url, preprint, user, other_user): preprint.is_published = False preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_private_preprint_file(self, app, file_url, preprint, user, other_user): preprint.is_public = False preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 200 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_deleted_preprint_file(self, app, file_url, preprint, user, other_user): preprint.deleted = timezone.now() preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 410 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 410 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 410 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 410 def test_abandoned_preprint_file(self, app, file_url, preprint, user, other_user): preprint.machine_state = DefaultStates.INITIAL.value preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Non contrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contrib preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 200 def test_withdrawn_preprint_files(self, app, file_url, preprint, user, other_user): preprint.date_withdrawn = timezone.now() preprint.save() # Unauthenticated res = app.get(file_url, expect_errors=True) assert res.status_code == 401 # Noncontrib res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Write contributor preprint.add_contributor(other_user, WRITE, save=True) res = app.get(file_url, auth=other_user.auth, expect_errors=True) assert res.status_code == 403 # Admin contrib res = app.get(file_url, auth=user.auth, expect_errors=True) assert res.status_code == 403
adlius/osf.io
api_tests/files/views/test_file_detail.py
website/identifiers/clients/datacite.py
import os import pytest from cfme.fixtures.terminalreporter import reporter from cfme.utils.datafile import data_path_for_filename from cfme.utils.datafile import load_data_file from cfme.utils.path import data_path from cfme.utils.path import log_path # Collection for storing unique combinations of data file paths # and filenames for usage reporting after a completed test run seen_data_files = set() @pytest.fixture(scope="module") def datafile(request): """datafile(filename, replacements) datafile fixture, with templating support Args: filename: filename to load from the data dir replacements: template replacements Returns: Path to the loaded datafile Usage: Given a filename, it will attempt to open the given file from the test's corresponding data dir. For example, this: datafile('testfile') # in tests/subdir/test_module_name.py Would return a file object representing this file: /path/to/cfme_tests/data/subdir/test_module_name/testfile Given a filename with a leading slash, it will attempt to load the file relative to the root of the data dir. For example, this: datafile('/common/testfile') # in tests/subdir/test_module_name.py Would return a file object representing this file: /path/to/cfme_tests/data/common/testfile Note that the test module name is not used with the leading slash. .. rubric:: Templates: This fixture can also handle template replacements. If the datafile being loaded is a python template, the dictionary of replacements can be passed as the 'replacements' keyword argument. In this case, the returned data file will be a NamedTemporaryFile prepopulated with the interpolated result from combining the template with the replacements mapping. * http://docs.python.org/2/library/string.html#template-strings * http://docs.python.org/2/library/tempfile.html#tempfile.NamedTemporaryFile """ return _FixtureDataFile(request) def pytest_addoption(parser): group = parser.getgroup('cfme') group.addoption('--udf-report', action='store_true', default=False, dest='udf_report', help='flag to generate an unused data files report') def pytest_sessionfinish(session, exitstatus): udf_log_file = log_path.join('unused_data_files.log') if udf_log_file.check(): # Clean up old udf log if it exists udf_log_file.remove() if session.config.option.udf_report is False: # Short out here if not making a report return # Output an unused data files log after a test run data_files = set() for dirpath, dirnames, filenames in os.walk(str(data_path)): for filename in filenames: filepath = os.path.join(dirpath, filename) data_files.add(filepath) unused_data_files = data_files - seen_data_files if unused_data_files: # Write the log of unused data files out, minus the data dir prefix udf_log = ''.join( (line[len(str(data_path)):] + '\n' for line in unused_data_files) ) udf_log_file.write(udf_log + '\n') # Throw a notice into the terminal reporter to check the log tr = reporter() tr.write_line('') tr.write_sep( '-', '%d unused data files after test run, check %s' % ( len(unused_data_files), udf_log_file.basename ) ) class _FixtureDataFile(object): def __init__(self, request): self.base_path = str(request.session.fspath) self.testmod_path = str(request.fspath) def __call__(self, filename, replacements=None): if filename.startswith('/'): complete_path = data_path_for_filename( filename.strip('/'), self.base_path) else: complete_path = data_path_for_filename( filename, self.base_path, self.testmod_path) seen_data_files.add(complete_path) return load_data_file(complete_path, replacements)
import random import pytest from cfme import test_requirements from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider], scope='module')] @pytest.fixture(scope='module') def elements_collection(setup_provider_modscope, appliance, provider): elements_collection_ = appliance.collections.network_topology_elements wait_for(elements_collection_.all, timeout=10) yield elements_collection_ provider.delete_if_exists(cancel=False) provider.wait_for_delete() @test_requirements.filtering def test_topology_search(request, elements_collection): """Testing search functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: anikifor casecomponent: WebUI initialEstimate: 1/4h """ elements = elements_collection.all() logger.info(str(elements)) element_to_search = random.choice(elements) search_term = element_to_search.name[:len(element_to_search.name) // 2] elements_collection.search(search_term) request.addfinalizer(elements_collection.clear_search) for element in elements: logger.info(str(element)) if search_term in element.name: assert not element.is_opaqued, ( 'Element should be not opaqued. Search: "{}", found: "{}"'.format( search_term, element.name) ) else: assert element.is_opaqued, ( 'Element should be opaqued. search: "{}", found: "{}"'.format( search_term, element.name) ) @test_requirements.sdn def test_topology_toggle_display(elements_collection): """Testing display functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: mmojzis casecomponent: WebUI initialEstimate: 1/4h """ vis_terms = {True: 'Visible', False: 'Hidden'} for state in (True, False): for legend in elements_collection.legends: if state: elements_collection.disable_legend(legend) else: elements_collection.enable_legend(legend) for element in elements_collection.all(): assert ( element.type != ''.join(legend.split()).rstrip('s') or element.is_displayed != state ), ( 'Element is {} but should be {} since "{}" display is currently {}'.format( vis_terms[not state], vis_terms[state], legend, {True: 'on', False: 'off'}[state]) )
izapolsk/integration_tests
cfme/tests/networks/test_sdn_topology.py
cfme/fixtures/datafile.py
import attr from riggerlib import recursive_update from cfme.cloud.instance import Instance from cfme.cloud.instance import InstanceCollection @attr.s class GCEInstance(Instance): # CFME & provider power control options START = "Start" POWER_ON = START # For compatibility with the infra objects. STOP = "Stop" DELETE = "Delete" TERMINATE = 'Delete' # CFME-only power control options SOFT_REBOOT = "Soft Reboot" # Provider-only power control options RESTART = "Restart" # CFME power states STATE_ON = "on" STATE_OFF = "off" STATE_SUSPENDED = "suspended" STATE_TERMINATED = "terminated" STATE_ARCHIVED = "archived" STATE_UNKNOWN = "unknown" @property def ui_powerstates_available(self): return { 'on': [self.STOP, self.SOFT_REBOOT, self.TERMINATE], 'off': [self.START, self.TERMINATE]} @property def ui_powerstates_unavailable(self): return { 'on': [self.START], 'off': [self.STOP, self.SOFT_REBOOT]} @property def vm_default_args(self): """Represents dictionary used for Vm/Instance provision with GCE mandatory default args""" inst_args = super(GCEInstance, self).vm_default_args provisioning = self.provider.data['provisioning'] inst_args['properties']['boot_disk_size'] = provisioning.get('boot_disk_size', '10 GB') return inst_args @property def vm_default_args_rest(self): inst_args = super(GCEInstance, self).vm_default_args_rest provisioning = self.provider.data['provisioning'] recursive_update(inst_args, { 'vm_fields': { 'boot_disk_size': provisioning['boot_disk_size'].replace(' ', '.')}}) return inst_args @attr.s class GCEInstanceCollection(InstanceCollection): ENTITY = GCEInstance
import random import pytest from cfme import test_requirements from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider], scope='module')] @pytest.fixture(scope='module') def elements_collection(setup_provider_modscope, appliance, provider): elements_collection_ = appliance.collections.network_topology_elements wait_for(elements_collection_.all, timeout=10) yield elements_collection_ provider.delete_if_exists(cancel=False) provider.wait_for_delete() @test_requirements.filtering def test_topology_search(request, elements_collection): """Testing search functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: anikifor casecomponent: WebUI initialEstimate: 1/4h """ elements = elements_collection.all() logger.info(str(elements)) element_to_search = random.choice(elements) search_term = element_to_search.name[:len(element_to_search.name) // 2] elements_collection.search(search_term) request.addfinalizer(elements_collection.clear_search) for element in elements: logger.info(str(element)) if search_term in element.name: assert not element.is_opaqued, ( 'Element should be not opaqued. Search: "{}", found: "{}"'.format( search_term, element.name) ) else: assert element.is_opaqued, ( 'Element should be opaqued. search: "{}", found: "{}"'.format( search_term, element.name) ) @test_requirements.sdn def test_topology_toggle_display(elements_collection): """Testing display functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: mmojzis casecomponent: WebUI initialEstimate: 1/4h """ vis_terms = {True: 'Visible', False: 'Hidden'} for state in (True, False): for legend in elements_collection.legends: if state: elements_collection.disable_legend(legend) else: elements_collection.enable_legend(legend) for element in elements_collection.all(): assert ( element.type != ''.join(legend.split()).rstrip('s') or element.is_displayed != state ), ( 'Element is {} but should be {} since "{}" display is currently {}'.format( vis_terms[not state], vis_terms[state], legend, {True: 'on', False: 'off'}[state]) )
izapolsk/integration_tests
cfme/tests/networks/test_sdn_topology.py
cfme/cloud/instance/gce.py
"""Module handling report menus contents""" from contextlib import contextmanager import attr from navmazing import NavigateToAttribute from widgetastic.widget import Text from widgetastic_patternfly import Button from cfme.intelligence.reports import CloudIntelReportsView from cfme.intelligence.reports import ReportsMultiBoxSelect from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator from widgetastic_manageiq import FolderManager from widgetastic_manageiq import ManageIQTree class AllReportMenusView(CloudIntelReportsView): title = Text("#explorer_title_text") reports_tree = ManageIQTree("menu_roles_treebox") @property def is_displayed(self): return ( self.in_intel_reports and self.title.text == "All EVM Groups" and self.edit_report_menus.is_opened and self.edit_report_menus.tree.currently_selected == ["All EVM Groups"] ) class EditReportMenusView(AllReportMenusView): # Buttons save_button = Button("Save") reset_button = Button("Reset") default_button = Button("Default") cancel_button = Button("Cancel") commit_button = Button("Commit") discard_button = Button("Discard") manager = FolderManager(".//div[@id='folder_lists']/table") report_select = ReportsMultiBoxSelect( move_into="Move selected reports right", move_from="Move selected reports left", available_items="available_reports", chosen_items="selected_reports" ) @property def is_displayed(self): return ( self.in_intel_reports and self.title.text == 'Editing EVM Group "{}"'.format(self.context["object"].group) and self.edit_report_menus.is_opened and self.edit_report_menus.tree.currently_selected == [ "All EVM Groups", self.context["object"].group ] ) @attr.s class ReportMenu(BaseEntity): """ This is a fake class mainly needed for navmazing navigation. """ group = None def go_to_group(self, group_name): self.group = group_name view = navigate_to(self, "Edit") assert view.is_displayed return view def get_folders(self, group): """Returns list of folders for given user group. Args: group: User group to check. """ view = self.go_to_group(group) view.reports_tree.click_path("Top Level") fields = view.manager.fields view.discard_button.click() return fields def get_subfolders(self, group, folder): """Returns list of sub-folders for given user group and folder. Args: group: User group to check. folder: Folder to read. """ view = self.go_to_group(group) view.reports_tree.click_path("Top Level", folder) fields = view.manager.fields view.discard_button.click() return fields def _action(self, action, manager, folder_name): with manager as folder_manager: getattr(folder_manager, action)(folder_name) def add_folder(self, group, folder): """Adds a folder under top-level. Args: group: User group. folder: Name of the new folder. """ self._action("add", self.manage_folder(group), folder) def add_subfolder(self, group, folder, subfolder): """Adds a subfolder under specified folder. Args: group: User group. folder: Name of the folder. subfolder: Name of the new subfolder. """ self._action("add", self.manage_folder(group, folder), subfolder) def remove_folder(self, group, folder): """Removes a folder under top-level. Args: group: User group. folder: Name of the folder. """ self._action("delete", self.manage_folder(group), folder) def remove_subfolder(self, group, folder, subfolder): """Removes a subfolder under specified folder. Args: group: User group. folder: Name of the folder. subfolder: Name of the subfolder. """ self._action("delete", self.manage_folder(group, folder), subfolder) def reset_to_default(self, group): """Clicks the `Default` button. Args: group: Group to set to Default """ view = self.go_to_group(group) view.default_button.click() view.save_button.click() flash_view = self.create_view(AllReportMenusView) assert flash_view.flash.assert_message( 'Report Menu for role "{}" was saved'.format(group) ) @contextmanager def manage_subfolder(self, group, folder, subfolder): """Context manager to use when modifying the subfolder contents. You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the changes done inside the with block. Args: group: User group. folder: Parent folder name. subfolder: Subfolder name to manage. Returns: Context-managed :py:class: `widgetastic_manageiq.MultiBoxSelect` instance """ view = self.go_to_group(group) view.reports_tree.click_path("Top Level", folder, subfolder) try: yield view.report_select except FolderManager._BailOut: view.discard_button.click() except Exception: # In case of any exception, nothing will be saved view.discard_button.click() raise # And reraise the exception else: # If no exception happens, save! view.commit_button.click() view.save_button.click() flash_view = self.create_view(AllReportMenusView) flash_view.flash.assert_message( 'Report Menu for role "{}" was saved'.format(group) ) @contextmanager def manage_folder(self, group, folder=None): """Context manager to use when modifying the folder contents. You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the changes done inside the with block. This context manager does not give the manager as a value to the with block so you have to import and use the :py:class:`FolderManager` class manually. Args: group: User group. folder: Which folder to manage. If None, top-level will be managed. Returns: Context-managed :py:class:`widgetastic_manageiq.FolderManager` instance """ view = self.go_to_group(group) if folder is None: view.reports_tree.click_path("Top Level") else: view.reports_tree.click_path("Top Level", folder) try: yield view.manager except FolderManager._BailOut: view.manager.discard() except Exception: # In case of any exception, nothing will be saved view.manager.discard() raise # And reraise the exception else: # If no exception happens, save! view.manager.commit() view.save_button.click() flash_view = self.create_view(AllReportMenusView) flash_view.flash.assert_message( 'Report Menu for role "{}" was saved'.format(group) ) def move_reports(self, group, folder, subfolder, *reports): """ Moves a list of reports to a given menu Args: group: User group folder: Parent of the subfolder where reports are to be moved. subfolder: Subfolder under which the reports are to be moved. reports: List of reports that are to be moved. """ reports = list(reports) cancel_view = "" with self.manage_subfolder(group, folder, subfolder) as selected_menu: selected_options = selected_menu.parent_view.report_select.all_options diff = set(selected_options) & set(reports) if diff and (len(diff) == len(reports)): cancel_view = self.create_view(AllReportMenusView) # If all the reports to be moved are already present, raise an exception to exit. raise FolderManager._BailOut # fill method replaces all the options in all_options with the value passed as argument # We do not want to replace any value, we just want to move the new reports to a given # menu. This is a work-around for that purpose. reports.extend(selected_options) selected_menu.parent_view.report_select.fill(reports) if cancel_view: cancel_view.flash.assert_message( 'Edit of Report Menu for role "{}" was cancelled by the user'.format( group ) ) @attr.s class ReportMenusCollection(BaseCollection): """Collection object for the :py:class:'cfme.intelligence.reports.ReportMenu'.""" ENTITY = ReportMenu @navigator.register(ReportMenu, "Edit") class EditReportMenus(CFMENavigateStep): VIEW = EditReportMenusView prerequisite = NavigateToAttribute( "appliance.collections.intel_report_menus", "All" ) def step(self, *args, **kwargs): self.prerequisite_view.edit_report_menus.tree.click_path( "All EVM Groups", self.obj.group ) @navigator.register(ReportMenusCollection, "All") class ReportMenus(CFMENavigateStep): VIEW = AllReportMenusView prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports") def step(self, *args, **kwargs): self.prerequisite_view.edit_report_menus.tree.click_path("All EVM Groups")
import random import pytest from cfme import test_requirements from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider], scope='module')] @pytest.fixture(scope='module') def elements_collection(setup_provider_modscope, appliance, provider): elements_collection_ = appliance.collections.network_topology_elements wait_for(elements_collection_.all, timeout=10) yield elements_collection_ provider.delete_if_exists(cancel=False) provider.wait_for_delete() @test_requirements.filtering def test_topology_search(request, elements_collection): """Testing search functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: anikifor casecomponent: WebUI initialEstimate: 1/4h """ elements = elements_collection.all() logger.info(str(elements)) element_to_search = random.choice(elements) search_term = element_to_search.name[:len(element_to_search.name) // 2] elements_collection.search(search_term) request.addfinalizer(elements_collection.clear_search) for element in elements: logger.info(str(element)) if search_term in element.name: assert not element.is_opaqued, ( 'Element should be not opaqued. Search: "{}", found: "{}"'.format( search_term, element.name) ) else: assert element.is_opaqued, ( 'Element should be opaqued. search: "{}", found: "{}"'.format( search_term, element.name) ) @test_requirements.sdn def test_topology_toggle_display(elements_collection): """Testing display functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: mmojzis casecomponent: WebUI initialEstimate: 1/4h """ vis_terms = {True: 'Visible', False: 'Hidden'} for state in (True, False): for legend in elements_collection.legends: if state: elements_collection.disable_legend(legend) else: elements_collection.enable_legend(legend) for element in elements_collection.all(): assert ( element.type != ''.join(legend.split()).rstrip('s') or element.is_displayed != state ), ( 'Element is {} but should be {} since "{}" display is currently {}'.format( vis_terms[not state], vis_terms[state], legend, {True: 'on', False: 'off'}[state]) )
izapolsk/integration_tests
cfme/tests/networks/test_sdn_topology.py
cfme/intelligence/reports/menus.py
import attr import importscan import sentaku from cfme.generic_objects.definition.button_groups import GenericObjectButtonGroupsCollection from cfme.generic_objects.definition.button_groups import GenericObjectButtonsCollection from cfme.generic_objects.instance import GenericObjectInstanceCollection from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.update import Updateable @attr.s class GenericObjectDefinition(BaseEntity, Updateable, sentaku.modeling.ElementMixin): """Generic Objects Definition class to context switch between UI and REST. Read/Update/Delete functionality. """ _collections = { 'generic_objects': GenericObjectInstanceCollection, 'generic_object_groups_buttons': GenericObjectButtonGroupsCollection, 'generic_object_buttons': GenericObjectButtonsCollection } update = sentaku.ContextualMethod() delete = sentaku.ContextualMethod() exists = sentaku.ContextualProperty() add_button = sentaku.ContextualMethod() add_button_group = sentaku.ContextualMethod() generic_objects = sentaku.ContextualProperty() generic_object_buttons = sentaku.ContextualProperty() instance_count = sentaku.ContextualProperty() name = attr.ib() description = attr.ib() attributes = attr.ib(default=None) # e.g. {'address': 'string'} associations = attr.ib(default=None) # e.g. {'services': 'Service'} methods = attr.ib(default=None) # e.g. ['method1', 'method2'] custom_image_file_path = attr.ib(default=None) rest_response = attr.ib(default=None, init=False) @attr.s class GenericObjectDefinitionCollection(BaseCollection, sentaku.modeling.ElementMixin): ENTITY = GenericObjectDefinition create = sentaku.ContextualMethod() all = sentaku.ContextualMethod() from cfme.generic_objects.definition import rest, ui # NOQA last for import cycles importscan.scan(rest) importscan.scan(ui)
import random import pytest from cfme import test_requirements from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider], scope='module')] @pytest.fixture(scope='module') def elements_collection(setup_provider_modscope, appliance, provider): elements_collection_ = appliance.collections.network_topology_elements wait_for(elements_collection_.all, timeout=10) yield elements_collection_ provider.delete_if_exists(cancel=False) provider.wait_for_delete() @test_requirements.filtering def test_topology_search(request, elements_collection): """Testing search functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: anikifor casecomponent: WebUI initialEstimate: 1/4h """ elements = elements_collection.all() logger.info(str(elements)) element_to_search = random.choice(elements) search_term = element_to_search.name[:len(element_to_search.name) // 2] elements_collection.search(search_term) request.addfinalizer(elements_collection.clear_search) for element in elements: logger.info(str(element)) if search_term in element.name: assert not element.is_opaqued, ( 'Element should be not opaqued. Search: "{}", found: "{}"'.format( search_term, element.name) ) else: assert element.is_opaqued, ( 'Element should be opaqued. search: "{}", found: "{}"'.format( search_term, element.name) ) @test_requirements.sdn def test_topology_toggle_display(elements_collection): """Testing display functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: mmojzis casecomponent: WebUI initialEstimate: 1/4h """ vis_terms = {True: 'Visible', False: 'Hidden'} for state in (True, False): for legend in elements_collection.legends: if state: elements_collection.disable_legend(legend) else: elements_collection.enable_legend(legend) for element in elements_collection.all(): assert ( element.type != ''.join(legend.split()).rstrip('s') or element.is_displayed != state ), ( 'Element is {} but should be {} since "{}" display is currently {}'.format( vis_terms[not state], vis_terms[state], legend, {True: 'on', False: 'off'}[state]) )
izapolsk/integration_tests
cfme/tests/networks/test_sdn_topology.py
cfme/generic_objects/definition/__init__.py
from os import path from urllib.error import URLError import attr from cached_property import cached_property from wrapanapi.systems.container import Openshift from cfme.common import Taggable from cfme.common.provider import DefaultEndpoint from cfme.common.vm_console import ConsoleMixin from cfme.containers.provider import ContainersProvider from cfme.containers.provider import ContainersProviderDefaultEndpoint from cfme.containers.provider import ContainersProviderEndpointsForm from cfme.control.explorer.alert_profiles import NodeAlertProfile from cfme.control.explorer.alert_profiles import ProviderAlertProfile from cfme.utils import ssh from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.log import logger from cfme.utils.ocp_cli import OcpCli from cfme.utils.varmeth import variable from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for class CustomAttribute(object): def __init__(self, name, value, field_type=None, href=None): self.name = name self.value = value self.field_type = field_type self.href = href class OpenshiftDefaultEndpoint(ContainersProviderDefaultEndpoint): """Represents Openshift default endpoint""" @staticmethod def get_ca_cert(connection_info): """Getting OpenShift's certificate from the master machine. Args: connection_info (dict): username, password and hostname for OCP returns: certificate's content. """ with ssh.SSHClient(**connection_info) as provider_ssh: _, stdout, _ = provider_ssh.exec_command("cat /etc/origin/master/ca.crt") return str("".join(stdout.readlines())) class ServiceBasedEndpoint(DefaultEndpoint): @property def view_value_mapping(self): out = {'hostname': self.hostname, 'api_port': self.api_port, 'sec_protocol': self.sec_protocol} if out['sec_protocol'] and self.sec_protocol.lower() == 'ssl trusting custom ca': out['trusted_ca_certificates'] = OpenshiftDefaultEndpoint.get_ca_cert( {"username": self.ssh_creds.principal, "password": self.ssh_creds.secret, "hostname": self.master_hostname}) return out class VirtualizationEndpoint(ServiceBasedEndpoint): """Represents virtualization Endpoint""" name = 'virtualization' @property def view_value_mapping(self): # values like host, port are taken from Default endpoint # and not editable in Virtualization endpoint, only token can be added return {'kubevirt_token': self.token} class MetricsEndpoint(ServiceBasedEndpoint): """Represents metrics Endpoint""" name = 'metrics' class AlertsEndpoint(ServiceBasedEndpoint): """Represents Alerts Endpoint""" name = 'alerts' @attr.s(cmp=False) class OpenshiftProvider(ContainersProvider, ConsoleMixin, Taggable): num_route = ['num_route'] STATS_TO_MATCH = ContainersProvider.STATS_TO_MATCH + num_route type_name = "openshift" mgmt_class = Openshift db_types = ["Openshift::ContainerManager"] endpoints_form = ContainersProviderEndpointsForm settings_key = 'ems_openshift' ems_pretty_name = 'OpenShift Container Platform' http_proxy = attr.ib(default=None) adv_http = attr.ib(default=None) adv_https = attr.ib(default=None) no_proxy = attr.ib(default=None) image_repo = attr.ib(default=None) image_reg = attr.ib(default=None) image_tag = attr.ib(default=None) cve_loc = attr.ib(default=None) virt_type = attr.ib(default=None) provider = attr.ib(default=None) def create(self, **kwargs): # Enable alerts collection before adding the provider to avoid missing active # alert after adding the provider # For more info: https://bugzilla.redhat.com/show_bug.cgi?id=1514950 if getattr(self, "alerts_type") == "Prometheus": alert_profiles = self.appliance.collections.alert_profiles provider_profile = alert_profiles.instantiate(ProviderAlertProfile, "Prometheus Provider Profile") node_profile = alert_profiles.instantiate(NodeAlertProfile, "Prometheus node Profile") for profile in [provider_profile, node_profile]: profile.assign_to("The Enterprise") super(OpenshiftProvider, self).create(**kwargs) @cached_property def cli(self): return OcpCli(self) def href(self): return self.appliance.rest_api.collections.providers\ .find_by(name=self.name).resources[0].href @property def view_value_mapping(self): mapping = {'name': self.name, 'zone': self.zone, 'prov_type': ('OpenShift Container Platform' if self.appliance.is_downstream else 'OpenShift')} mapping['metrics_type'] = self.metrics_type mapping['alerts_type'] = self.alerts_type mapping['proxy'] = { 'http_proxy': self.http_proxy } mapping['advanced'] = { 'adv_http': self.adv_http, 'adv_https': self.adv_https, 'no_proxy': self.no_proxy, 'image_repo': self.image_repo, 'image_reg': self.image_reg, 'image_tag': self.image_tag, 'cve_loc': self.cve_loc } mapping['virt_type'] = self.virt_type return mapping @property def is_provider_enabled(self): return self.appliance.rest_api.collections.providers.get(name=self.name).enabled @variable(alias='db') def num_route(self): return self._num_db_generic('container_routes') @num_route.variant('ui') def num_route_ui(self): view = navigate_to(self, "Details") return int(view.entities.summary("Relationships").get_text_of('Container Routes')) @variable(alias='db') def num_template(self): return self._num_db_generic('container_templates') @num_template.variant('ui') def num_template_ui(self): view = navigate_to(self, "Details") return int(view.entities.summary("Relationships").get_text_of("Container Templates")) @classmethod def from_config(cls, prov_config, prov_key, appliance=None): appliance = appliance or cls.appliance endpoints = {} token_creds = cls.process_credential_yaml_key(prov_config['credentials'], cred_type='token') master_hostname = prov_config['endpoints']['default'].hostname ssh_creds = cls.process_credential_yaml_key(prov_config['ssh_creds']) for endp in prov_config['endpoints']: # Add ssh_password for each endpoint, so get_ca_cert # will be able to get SSL cert form OCP for each endpoint setattr(prov_config['endpoints'][endp], "master_hostname", master_hostname) setattr(prov_config['endpoints'][endp], "ssh_creds", ssh_creds) if OpenshiftDefaultEndpoint.name == endp: prov_config['endpoints'][endp]['token'] = token_creds.token endpoints[endp] = OpenshiftDefaultEndpoint(**prov_config['endpoints'][endp]) elif MetricsEndpoint.name == endp: endpoints[endp] = MetricsEndpoint(**prov_config['endpoints'][endp]) elif AlertsEndpoint.name == endp: endpoints[endp] = AlertsEndpoint(**prov_config['endpoints'][endp]) else: raise Exception('Unsupported endpoint type "{}".'.format(endp)) settings = prov_config.get('settings', {}) advanced = settings.get('advanced', {}) http_proxy = settings.get('proxy', {}).get('http_proxy') adv_http, adv_https, no_proxy, image_repo, image_reg, image_tag, cve_loc = [ advanced.get(field) for field in ('adv_http', 'adv_https', 'no_proxy', 'image_repo', 'image_reg', 'image_tag', 'cve_loc') ] return appliance.collections.containers_providers.instantiate( prov_class=cls, name=prov_config.get('name'), key=prov_key, zone=prov_config.get('server_zone'), metrics_type=prov_config.get('metrics_type'), alerts_type=prov_config.get('alerts_type'), endpoints=endpoints, provider_data=prov_config, http_proxy=http_proxy, adv_http=adv_http, adv_https=adv_https, no_proxy=no_proxy, image_repo=image_repo, image_reg=image_reg, image_tag=image_tag, cve_loc=cve_loc, virt_type=prov_config.get('virt_type')) def custom_attributes(self): """returns custom attributes""" response = self.appliance.rest_api.get( path.join(self.href(), 'custom_attributes')) out = [] for attr_dict in response['resources']: attr = self.appliance.rest_api.get(attr_dict['href']) out.append( CustomAttribute( attr['name'], attr['value'], (attr['field_type'] if 'field_type' in attr else None), attr_dict['href'] ) ) return out def add_custom_attributes(self, *custom_attributes): """Adding static custom attributes to provider. Args: custom_attributes: The custom attributes to add. returns: response. """ if not custom_attributes: raise TypeError('{} takes at least 1 argument.' .format(self.add_custom_attributes.__name__)) for c_attr in custom_attributes: if not isinstance(c_attr, CustomAttribute): raise TypeError('All arguments should be of type {}. ({} != {})' .format(CustomAttribute, type(c_attr), CustomAttribute)) payload = { "action": "add", "resources": [{ "name": ca.name, "value": str(ca.value) } for ca in custom_attributes]} for i, fld_tp in enumerate([c_attr.field_type for c_attr in custom_attributes]): if fld_tp: payload['resources'][i]['field_type'] = fld_tp return self.appliance.rest_api.post( path.join(self.href(), 'custom_attributes'), **payload) def edit_custom_attributes(self, *custom_attributes): """Editing static custom attributes in provider. Args: custom_attributes: The custom attributes to edit. returns: response. """ if not custom_attributes: raise TypeError('{} takes at least 1 argument.' .format(self.edit_custom_attributes.__name__)) for c_attr in custom_attributes: if not isinstance(c_attr, CustomAttribute): raise TypeError('All arguments should be of type {}. ({} != {})' .format(CustomAttribute, type(c_attr), CustomAttribute)) attribs = self.custom_attributes() payload = { "action": "edit", "resources": [{ "href": [c_attr for c_attr in attribs if c_attr.name == ca.name][-1].href, "value": ca.value } for ca in custom_attributes]} return self.appliance.rest_api.post( path.join(self.href(), 'custom_attributes'), **payload) def delete_custom_attributes(self, *custom_attributes): """Deleting static custom attributes from provider. Args: custom_attributes: The custom attributes to delete. (Could be also names (str)) Returns: response. """ names = [] for c_attr in custom_attributes: attr_type = type(c_attr) if attr_type in (str, CustomAttribute): names.append(c_attr if attr_type is str else c_attr.name) else: raise TypeError('Type of arguments should be either' 'str or CustomAttribute. ({} not in [str, CustomAttribute])' .format(type(c_attr))) attribs = self.custom_attributes() if not names: names = [attrib.name for attrib in attribs] payload = { "action": "delete", "resources": [{ "href": attrib.href, } for attrib in attribs if attrib.name in names]} return self.appliance.rest_api.post( path.join(self.href(), 'custom_attributes'), **payload) def sync_ssl_certificate(self): """ fixture which sync SSL certificate between CFME and OCP Args: provider (OpenShiftProvider): OCP system to sync cert from appliance (IPAppliance): CFME appliance to sync cert with Returns: None """ def _copy_certificate(): is_succeed = True try: # Copy certificate to the appliance provider_ssh.get_file("/etc/origin/master/ca.crt", "/tmp/ca.crt") appliance_ssh.put_file("/tmp/ca.crt", "/etc/pki/ca-trust/source/anchors/{crt}".format( crt=cert_name)) except URLError: logger.debug("Fail to deploy certificate from Openshift to CFME") is_succeed = False finally: return is_succeed provider_ssh = self.cli.ssh_client appliance_ssh = self.appliance.ssh_client() # Connection to the applince in case of dead connection if not appliance_ssh.connected: appliance_ssh.connect() # Checking if SSL is already configured between appliance and provider, # by send a HTTPS request (using SSL) from the appliance to the provider, # hiding the output and sending back the return code of the action _, stdout, stderr = \ appliance_ssh.exec_command( "curl https://{provider}:8443 -sS > /dev/null;echo $?".format( provider=self.provider_data.hostname)) # Do in case of failure (return code is not 0) if stdout.readline().replace('\n', "") != "0": cert_name = "{provider_name}.ca.crt".format( provider_name=self.provider_data.hostname.split(".")[0]) wait_for(_copy_certificate, num_sec=600, delay=30, message="Copy certificate from OCP to CFME") appliance_ssh.exec_command("update-ca-trust") # restarting evemserverd to apply the new SSL certificate self.appliance.evmserverd.restart() self.appliance.evmserverd.wait_for_running() self.appliance.wait_for_web_ui() def get_system_id(self): mgmt_systems_tbl = self.appliance.db.client['ext_management_systems'] return self.appliance.db.client.session.query(mgmt_systems_tbl).filter( mgmt_systems_tbl.name == self.name).first().id def get_metrics(self, **kwargs): """"Returns all the collected metrics for this provider Args: filters: list of dicts with column name and values e.g [{"resource_type": "Container"}, {"parent_ems_id": "1L"}] metrics_table: Metrics table name, there are few metrics table e.g metrics, metric_rollups, etc Returns: Query object with the relevant records """ filters = kwargs.get("filters", {}) metrics_table = kwargs.get("metrics_table", "metric_rollups") metrics_tbl = self.appliance.db.client[metrics_table] mgmt_system_id = self.get_system_id() logger.info("Getting metrics for {name} (parent_ems_id == {id})".format( name=self.name, id=mgmt_system_id)) if filters: logger.info("Filtering by: {f}".format(f=filters)) filters["parent_ems_id"] = mgmt_system_id return self.appliance.db.client.session.query(metrics_tbl).filter_by(**filters) def wait_for_collected_metrics(self, timeout="50m", table_name="metrics"): """Check the db if gathering collection data Args: timeout: timeout in minutes Return: Bool: is collected metrics count is greater than 0 """ def is_collected(): metrics_count = self.get_metrics(table=table_name).count() logger.info("Current metrics found count is {count}".format(count=metrics_count)) return metrics_count > 0 logger.info("Monitoring DB for metrics collection") result = True try: wait_for(is_collected, timeout=timeout, delay=30) except TimedOutError: logger.error( "Timeout exceeded, No metrics found in MIQ DB for the provider \"{name}\"".format( name=self.name)) result = False finally: return result def pause(self): """ Pause the OCP provider. Returns: API response. """ return self.appliance.rest_api.collections.providers.get(name=self.name).action.pause() def resume(self): """ Resume the OCP provider. Returns: API response. """ return self.appliance.rest_api.collections.providers.get(name=self.name).action.resume()
import random import pytest from cfme import test_requirements from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider], scope='module')] @pytest.fixture(scope='module') def elements_collection(setup_provider_modscope, appliance, provider): elements_collection_ = appliance.collections.network_topology_elements wait_for(elements_collection_.all, timeout=10) yield elements_collection_ provider.delete_if_exists(cancel=False) provider.wait_for_delete() @test_requirements.filtering def test_topology_search(request, elements_collection): """Testing search functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: anikifor casecomponent: WebUI initialEstimate: 1/4h """ elements = elements_collection.all() logger.info(str(elements)) element_to_search = random.choice(elements) search_term = element_to_search.name[:len(element_to_search.name) // 2] elements_collection.search(search_term) request.addfinalizer(elements_collection.clear_search) for element in elements: logger.info(str(element)) if search_term in element.name: assert not element.is_opaqued, ( 'Element should be not opaqued. Search: "{}", found: "{}"'.format( search_term, element.name) ) else: assert element.is_opaqued, ( 'Element should be opaqued. search: "{}", found: "{}"'.format( search_term, element.name) ) @test_requirements.sdn def test_topology_toggle_display(elements_collection): """Testing display functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: mmojzis casecomponent: WebUI initialEstimate: 1/4h """ vis_terms = {True: 'Visible', False: 'Hidden'} for state in (True, False): for legend in elements_collection.legends: if state: elements_collection.disable_legend(legend) else: elements_collection.enable_legend(legend) for element in elements_collection.all(): assert ( element.type != ''.join(legend.split()).rstrip('s') or element.is_displayed != state ), ( 'Element is {} but should be {} since "{}" display is currently {}'.format( vis_terms[not state], vis_terms[state], legend, {True: 'on', False: 'off'}[state]) )
izapolsk/integration_tests
cfme/tests/networks/test_sdn_topology.py
cfme/containers/provider/openshift.py
"""An example config:: artifactor: log_dir: /home/test/workspace/cfme_tests/artiout per_run: test #test, run, None reuse_dir: True squash_exceptions: False threaded: False server_address: 127.0.0.1 server_port: 21212 server_enabled: True plugins: ``log_dir`` is the destination for all artifacts ``per_run`` denotes if the test artifacts should be group by run, test, or None ``reuse_dir`` if this is False and Artifactor comes across a dir that has already been used, it will die """ import atexit import os import subprocess from threading import RLock import diaper import pytest from artifactor import ArtifactorClient from cfme.fixtures.pytest_store import store from cfme.fixtures.pytest_store import write_line from cfme.markers.polarion import extract_polarion_ids from cfme.utils.appliance import find_appliance from cfme.utils.blockers import Blocker from cfme.utils.blockers import BZ from cfme.utils.conf import credentials from cfme.utils.conf import env from cfme.utils.log import logger from cfme.utils.net import net_check from cfme.utils.net import random_port from cfme.utils.wait import wait_for UNDER_TEST = False # set to true for artifactor using tests # Create a list of all our passwords for use with the sanitize request later in this module # Filter out all Nones as it will mess the output up. words = [word for word in {v.get('password') for v in credentials.values()} if word is not None] def get_test_idents(item): try: return item.location[2], item.location[0] except AttributeError: try: return item.fspath.strpath, None except AttributeError: return (None, None) def get_name(obj): return (getattr(obj, '_param_name', None) or getattr(obj, 'name', None) or str(obj)) class DummyClient(object): def fire_hook(self, *args, **kwargs): return def terminate(self): return def task_status(self): return def __bool__(self): # DummyClient is always False, # so it's easy to see if we have an artiactor client return False def get_client(art_config, pytest_config): if art_config and not UNDER_TEST: port = getattr(pytest_config.option, 'artifactor_port', None) or \ art_config.get('server_port') or random_port() pytest_config.option.artifactor_port = port art_config['server_port'] = port return ArtifactorClient( art_config['server_address'], art_config['server_port']) else: return DummyClient() def spawn_server(config, art_client): if store.slave_manager or UNDER_TEST: return None import subprocess cmd = ['miq-artifactor-server', '--port', str(art_client.port)] if config.getvalue('run_id'): cmd.append('--run-id') cmd.append(str(config.getvalue('run_id'))) proc = subprocess.Popen(cmd) return proc session_ver = None session_build = None session_stream = None session_fw_version = None def pytest_addoption(parser): parser.addoption("--run-id", action="store", default=None, help="A run id to assist in logging") @pytest.hookimpl(tryfirst=True) def pytest_configure(config): if config.getoption('--help'): return art_client = get_client( art_config=env.get('artifactor', {}), pytest_config=config) # just in case if not store.slave_manager: with diaper: atexit.register(shutdown, config) if art_client: config._art_proc = spawn_server(config, art_client) wait_for( net_check, func_args=[art_client.port, '127.0.0.1'], func_kwargs={'force': True}, num_sec=10, message="wait for artifactor to start") art_client.ready = True else: config._art_proc = None from cfme.utils.log import artifactor_handler artifactor_handler.artifactor = art_client if store.slave_manager: artifactor_handler.slaveid = store.slaveid config._art_client = art_client def fire_art_hook(config, hook, **hook_args): client = getattr(config, '_art_client', None) if client is None: assert UNDER_TEST, 'missing artifactor is only valid for inprocess tests' else: return client.fire_hook(hook, **hook_args) def fire_art_test_hook(node, hook, **hook_args): name, location = get_test_idents(node) return fire_art_hook( node.config, hook, test_name=name, test_location=location, **hook_args) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): global session_ver global session_build global session_stream appliance = find_appliance(item) if not session_ver: session_ver = str(appliance.version) session_build = appliance.build session_stream = appliance.version.stream() if str(session_ver) not in session_build: session_build = "{}-{}".format(str(session_ver), session_build) session_fw_version = None try: proc = subprocess.Popen(['git', 'describe', '--tags'], stdout=subprocess.PIPE) proc.wait() session_fw_version = proc.stdout.read().strip() except Exception: pass # already set session_fw_version to None fire_art_hook( item.config, 'session_info', version=session_ver, build=session_build, stream=session_stream, fw_version=session_fw_version ) tier = item.get_closest_marker('tier') if tier: tier = tier.args[0] requirement = item.get_closest_marker('requirement') if requirement: requirement = requirement.args[0] param_dict = {} try: params = item.callspec.params param_dict = {p: get_name(v) for p, v in params.items()} except Exception: pass # already set param_dict ip = appliance.hostname # This pre_start_test hook is needed so that filedump is able to make get the test # object set up before the logger starts logging. As the logger fires a nested hook # to the filedumper, and we can't specify order inriggerlib. meta = item.get_closest_marker('meta') if meta and 'blockers' in meta.kwargs: blocker_spec = meta.kwargs['blockers'] blockers = [] for blocker in blocker_spec: if isinstance(blocker, int): blockers.append(BZ(blocker).url) else: blockers.append(Blocker.parse(blocker).url) else: blockers = [] fire_art_test_hook( item, 'pre_start_test', slaveid=store.slaveid, ip=ip) fire_art_test_hook( item, 'start_test', slaveid=store.slaveid, ip=ip, tier=tier, requirement=requirement, param_dict=param_dict, issues=blockers) yield def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) app = find_appliance(item) ip = app.hostname fire_art_test_hook( item, 'finish_test', slaveid=store.slaveid, ip=ip, wait_for_task=True) fire_art_test_hook(item, 'sanitize', words=words) jenkins_data = { 'build_url': os.environ.get('BUILD_URL'), 'build_number': os.environ.get('BUILD_NUMBER'), 'git_commit': os.environ.get('GIT_COMMIT'), 'job_name': os.environ.get('JOB_NAME') } param_dict = None try: caps = app.browser.widgetastic.selenium.capabilities param_dict = { 'browserName': caps.get('browserName', 'Unknown'), 'browserPlatform': caps.get('platformName', caps.get('platform', 'Unknown')), 'browserVersion': caps.get('browserVersion', caps.get('version', 'Unknown')) } except Exception: logger.exception("Couldn't grab browser env_vars") pass # already set param_dict fire_art_test_hook( item, 'ostriz_send', env_params=param_dict, slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data) def pytest_runtest_logreport(report): if store.slave_manager: return # each node does its own reporting config = store.config # tech debt name, location = get_test_idents(report) xfail = hasattr(report, 'wasxfail') if hasattr(report, 'skipped'): if report.skipped: fire_art_hook( config, 'filedump', test_location=location, test_name=name, description="Short traceback", contents=report.longreprtext, file_type="short_tb", group_id="skipped") fire_art_hook( config, 'report_test', test_location=location, test_name=name, test_xfail=xfail, test_when=report.when, test_outcome=report.outcome, test_phase_duration=report.duration) fire_art_hook(config, 'build_report') @pytest.hookimpl(hookwrapper=True) def pytest_unconfigure(config): yield shutdown(config) lock = RLock() def shutdown(config): app = find_appliance(config, require=False) if app is not None: with lock: proc = config._art_proc if proc and proc.returncode is None: if not store.slave_manager: write_line('collecting artifacts') fire_art_hook(config, 'finish_session') if not store.slave_manager: config._art_client.terminate() proc.wait()
import random import pytest from cfme import test_requirements from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [pytest.mark.provider([EC2Provider, AzureProvider, GCEProvider, OpenStackProvider], scope='module')] @pytest.fixture(scope='module') def elements_collection(setup_provider_modscope, appliance, provider): elements_collection_ = appliance.collections.network_topology_elements wait_for(elements_collection_.all, timeout=10) yield elements_collection_ provider.delete_if_exists(cancel=False) provider.wait_for_delete() @test_requirements.filtering def test_topology_search(request, elements_collection): """Testing search functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: anikifor casecomponent: WebUI initialEstimate: 1/4h """ elements = elements_collection.all() logger.info(str(elements)) element_to_search = random.choice(elements) search_term = element_to_search.name[:len(element_to_search.name) // 2] elements_collection.search(search_term) request.addfinalizer(elements_collection.clear_search) for element in elements: logger.info(str(element)) if search_term in element.name: assert not element.is_opaqued, ( 'Element should be not opaqued. Search: "{}", found: "{}"'.format( search_term, element.name) ) else: assert element.is_opaqued, ( 'Element should be opaqued. search: "{}", found: "{}"'.format( search_term, element.name) ) @test_requirements.sdn def test_topology_toggle_display(elements_collection): """Testing display functionality in Topology view. Metadata: test_flag: sdn Polarion: assignee: mmojzis casecomponent: WebUI initialEstimate: 1/4h """ vis_terms = {True: 'Visible', False: 'Hidden'} for state in (True, False): for legend in elements_collection.legends: if state: elements_collection.disable_legend(legend) else: elements_collection.enable_legend(legend) for element in elements_collection.all(): assert ( element.type != ''.join(legend.split()).rstrip('s') or element.is_displayed != state ), ( 'Element is {} but should be {} since "{}" display is currently {}'.format( vis_terms[not state], vis_terms[state], legend, {True: 'on', False: 'off'}[state]) )
izapolsk/integration_tests
cfme/tests/networks/test_sdn_topology.py
cfme/fixtures/artifactor_plugin.py
# -*- encoding: utf-8 -*- from abjad import * def test_indicatortools_Clef_middle_c_position_01(): assert Clef('treble').middle_c_position == pitchtools.StaffPosition(-6) assert Clef('alto').middle_c_position == pitchtools.StaffPosition(0) assert Clef('tenor').middle_c_position == pitchtools.StaffPosition(2) assert Clef('bass').middle_c_position == pitchtools.StaffPosition(6) assert Clef('treble^8').middle_c_position == pitchtools.StaffPosition(-13) assert Clef('alto^15').middle_c_position == pitchtools.StaffPosition(-13) assert Clef('tenor_8').middle_c_position == pitchtools.StaffPosition(9) assert Clef('bass_15').middle_c_position == pitchtools.StaffPosition(19)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/indicatortools/test/test_indicatortools_Clef_middle_c_position.py
# -*- encoding: utf-8 -*- from abjad.tools import indicatortools from abjad.tools import pitchtools from abjad.tools import scoretools from abjad.tools.topleveltools import iterate def iterate_out_of_range_notes_and_chords(expr): '''Iterates notes and chords in `expr` outside traditional instrument ranges: :: >>> staff = Staff("c'8 r8 <d fs>8 r8") >>> violin = instrumenttools.Violin() >>> attach(violin, staff) :: >>> list( ... instrumenttools.iterate_out_of_range_notes_and_chords( ... staff)) [Chord('<d fs>8')] Returns generator. ''' from abjad.tools import instrumenttools prototype = (scoretools.Note, scoretools.Chord) for note_or_chord in iterate(expr).by_class(prototype): instrument = note_or_chord._get_effective( instrumenttools.Instrument) if instrument is None: message = 'no instrument found.' raise ValueError(message) if note_or_chord not in instrument.pitch_range: yield note_or_chord
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/instrumenttools/iterate_out_of_range_notes_and_chords.py
# -*- encoding: utf-8 -*- from abjad.tools.datastructuretools.TreeNode import TreeNode class ReSTHorizontalRule(TreeNode): r'''A ReST horizontal rule. :: >>> rule = documentationtools.ReSTHorizontalRule() >>> rule ReSTHorizontalRule() :: >>> print(rule.rest_format) -------- ''' ### CLASS VARIABLES ### __documentation_section__ = 'reStructuredText' ### PRIVATE PROPERTIES ### @property def _rest_format_contributions(self): return ['--------'] ### PUBLIC PROPERTIES ### @property def rest_format(self): r'''ReST format of ReSt horizontal rule. Returns text. ''' return '\n'.join(self._rest_format_contributions)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/documentationtools/ReSTHorizontalRule.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_NumberedPitch_pitch_number_01(): assert pitchtools.NumberedPitch("cff''").pitch_number == 10 assert pitchtools.NumberedPitch("ctqf''").pitch_number == 10.5 assert pitchtools.NumberedPitch("cf''").pitch_number == 11 assert pitchtools.NumberedPitch("cqf''").pitch_number == 11.5 assert pitchtools.NumberedPitch("c''").pitch_number == 12 assert pitchtools.NumberedPitch("cqs''").pitch_number == 12.5 assert pitchtools.NumberedPitch("cs''").pitch_number == 13 assert pitchtools.NumberedPitch("ctqs''").pitch_number == 13.5 assert pitchtools.NumberedPitch("css''").pitch_number == 14 assert pitchtools.NumberedPitch("d''").pitch_number == 14
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/test/test_pitchtools_NumberedPitch_pitch_number.py
# -*- encoding: utf-8 -*- import functools from abjad.tools import durationtools from abjad.tools.schemetools.Scheme import Scheme @functools.total_ordering class SchemeMoment(Scheme): r'''A LilyPond scheme moment. Initializes with two integers: :: >>> moment = schemetools.SchemeMoment(1, 68) >>> moment SchemeMoment(1, 68) Scheme moments are immutable. ''' ### CLASS VARIABLES ### __slots__ = ( ) ### INITIALIZER ### def __init__(self, *args, **kwargs): if len(args) == 1 and durationtools.Duration.is_token(args[0]): args = durationtools.Duration(args[0]) elif len(args) == 1 and isinstance(args[0], type(self)): args = args[0].duration elif len(args) == 2 and \ isinstance(args[0], int) and isinstance(args[1], int): args = durationtools.Duration(args) elif len(args) == 0: args = durationtools.Duration((1, 4)) else: message = 'can not intialize {}: {!r}.' message = message.format(type(self).__name__, args) raise TypeError(message) Scheme.__init__(self, args, **kwargs) ### SPECIAL METHODS ### def __eq__(self, arg): r'''Is true when `arg` is a scheme moment with the same value as that of this scheme moment. :: >>> moment == schemetools.SchemeMoment(1, 68) True Otherwise false. >>> moment == schemetools.SchemeMoment(1, 54) False Returns boolean. ''' if isinstance(arg, type(self)): if self._value == arg._value: return True return False def __getnewargs__(self): r'''Gets new arguments. Returns tuple. ''' return (self._value,) def __hash__(self): r'''Hashes scheme moment. Required to be explicitly re-defined on Python 3 if __eq__ changes. Returns integer. ''' return super(SchemeMoment, self).__hash__() def __lt__(self, arg): r'''Is true when `arg` is a scheme moment with value greater than that of this scheme moment. :: >>> moment < schemetools.SchemeMoment(1, 32) True Otherwise false: :: >>> moment < schemetools.SchemeMoment(1, 78) False Returns boolean. ''' if isinstance(arg, type(self)): if self._value < arg._value: return True return False ### PRIVATE PROPERTIES ### @property def _formatted_value(self): numerator, denominator = self._value.numerator, self._value.denominator return '(ly:make-moment {} {})'.format(numerator, denominator) @property def _storage_format_specification(self): from abjad.tools import systemtools return systemtools.StorageFormatSpecification( self, positional_argument_values=( self._value.numerator, self._value.denominator, ), ) ### PUBLIC PROPERTIES ### @property def duration(self): r'''Duration of scheme moment. :: >>> scheme_moment = schemetools.SchemeMoment(1, 68) >>> scheme_moment.duration Duration(1, 68) Returns duration. ''' return self._value
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/schemetools/SchemeMoment.py
# -*- encoding: utf-8 -*- from abjad import * def configure_lilypond_file(lilypond_file): r'''Configures LilyPond file. ''' lilypond_file.global_staff_size = 8 context_block = lilypondfiletools.ContextBlock( source_context_name=r'Staff \RemoveEmptyStaves', ) override(context_block).vertical_axis_group.remove_first = True lilypond_file.layout_block.items.append(context_block) slash_separator = indicatortools.LilyPondCommand('slashSeparator') lilypond_file.paper_block.system_separator_markup = slash_separator bottom_margin = lilypondfiletools.LilyPondDimension(0.5, 'in') lilypond_file.paper_block.bottom_margin = bottom_margin top_margin = lilypondfiletools.LilyPondDimension(0.5, 'in') lilypond_file.paper_block.top_margin = top_margin left_margin = lilypondfiletools.LilyPondDimension(0.75, 'in') lilypond_file.paper_block.left_margin = left_margin right_margin = lilypondfiletools.LilyPondDimension(0.5, 'in') lilypond_file.paper_block.right_margin = right_margin paper_width = lilypondfiletools.LilyPondDimension(5.25, 'in') lilypond_file.paper_block.paper_width = paper_width paper_height = lilypondfiletools.LilyPondDimension(7.25, 'in') lilypond_file.paper_block.paper_height = paper_height lilypond_file.header_block.composer = markuptools.Markup('Arvo Pärt') title = 'Cantus in Memory of Benjamin Britten (1980)' lilypond_file.header_block.title = markuptools.Markup(title)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/demos/part/configure_lilypond_file.py
# -*- encoding: utf-8 -*- from abjad import * def test_selectiontools_Selection__get_component_01(): staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") assert select(staff)._get_component(Measure, 0) is staff[0] assert select(staff)._get_component(Measure, 1) is staff[1] assert select(staff)._get_component(Measure, 2) is staff[2] def test_selectiontools_Selection__get_component_02(): staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") assert select(staff)._get_component(Measure, -1) is staff[2] assert select(staff)._get_component(Measure, -2) is staff[1] assert select(staff)._get_component(Measure, -3) is staff[0] def test_selectiontools_Selection__get_component_03(): r'''Read forwards for positive n. ''' staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") r''' \new Staff { { \time 2/8 c'8 d'8 } { \time 2/8 e'8 f'8 } { \time 2/8 g'8 a'8 } } ''' assert select(staff)._get_component(scoretools.Leaf, 0) is staff[0][0] assert select(staff)._get_component(scoretools.Leaf, 1) is staff[0][1] assert select(staff)._get_component(scoretools.Leaf, 2) is staff[1][0] assert select(staff)._get_component(scoretools.Leaf, 3) is staff[1][1] assert select(staff)._get_component(scoretools.Leaf, 4) is staff[2][0] assert select(staff)._get_component(scoretools.Leaf, 5) is staff[2][1] def test_selectiontools_Selection__get_component_04(): r'''Read backwards for negative n. ''' staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") r''' \new Staff { { \time 2/8 c'8 d'8 } { \time 2/8 e'8 f'8 } { \time 2/8 g'8 a'8 } } ''' assert select(staff)._get_component(scoretools.Leaf, -1) is staff[2][1] assert select(staff)._get_component(scoretools.Leaf, -2) is staff[2][0] assert select(staff)._get_component(scoretools.Leaf, -3) is staff[1][1] assert select(staff)._get_component(scoretools.Leaf, -4) is staff[1][0] assert select(staff)._get_component(scoretools.Leaf, -5) is staff[0][1] assert select(staff)._get_component(scoretools.Leaf, -6) is staff[0][0] def test_selectiontools_Selection__get_component_05(): staff = Staff(r''' c'16 r16 d'8 r8 e'8. r8. f'4 r4 ''') notes = [staff[0], staff[2], staff[4], staff[6]] rests = [staff[1], staff[3], staff[5], staff[7]] assert select(staff)._get_component(Note, 0) is notes[0] assert select(staff)._get_component(Note, 1) is notes[1] assert select(staff)._get_component(Note, 2) is notes[2] assert select(staff)._get_component(Note, 3) is notes[3] assert select(staff)._get_component(Rest, 0) is rests[0] assert select(staff)._get_component(Rest, 1) is rests[1] assert select(staff)._get_component(Rest, 2) is rests[2] assert select(staff)._get_component(Rest, 3) is rests[3] assert select(staff)._get_component(Staff, 0) is staff def test_selectiontools_Selection__get_component_06(): r'''Iterates backwards with negative values of n. ''' staff = Staff(r''' c'16 r16 d'8 r8 e'8. r8. f'4 r4 ''') notes = [staff[0], staff[2], staff[4], staff[6]] rests = [staff[1], staff[3], staff[5], staff[7]] assert select(staff)._get_component(Note, -1) is notes[3] assert select(staff)._get_component(Note, -2) is notes[2] assert select(staff)._get_component(Note, -3) is notes[1] assert select(staff)._get_component(Note, -4) is notes[0] assert select(staff)._get_component(Rest, -1) is rests[3] assert select(staff)._get_component(Rest, -2) is rests[2] assert select(staff)._get_component(Rest, -3) is rests[1] assert select(staff)._get_component(Rest, -4) is rests[0]
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/selectiontools/test/test_selectiontools_Selection__get_component.py
# -*- encoding: utf-8 -*- import sys from abjad import * def test_stringtools_strip_diacritics_01(): if sys.version_info[0] == 2: binary_string = 'Dvo\xc5\x99\xc3\xa1k' else: binary_string = 'Dvořák' ascii_string = stringtools.strip_diacritics(binary_string) assert ascii_string == 'Dvorak'
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/stringtools/test/test_stringtools_strip_diacritics.py
# -*- encoding: utf-8 -*- from abjad.tools import durationtools from abjad.tools import mathtools from abjad.tools import sequencetools from abjad.tools.pitchtools.Segment import Segment from abjad.tools.topleveltools import new class IntervalSegment(Segment): r'''An interval segment. :: >>> intervals = 'm2 M10 -aug4 P5' >>> pitchtools.IntervalSegment(intervals) IntervalSegment(['+m2', '+M10', '-aug4', '+P5']) :: >>> pitch_segment = pitchtools.PitchSegment("c d e f g a b c'") >>> pitchtools.IntervalSegment(pitch_segment) IntervalSegment(['+M2', '+M2', '+m2', '+M2', '+M2', '+M2', '+m2']) ''' ### CLASS VARIABLES ### __slots__ = () ### INITIALIZER ### def __init__( self, items=None, item_class=None, ): from abjad.tools import pitchtools if isinstance(items, pitchtools.PitchSegment): intervals = [] for one, two in sequencetools.iterate_sequence_nwise(items): intervals.append(one - two) items = intervals Segment.__init__( self, items=items, item_class=item_class, ) ### PRIVATE PROPERTIES ### @property def _named_item_class(self): from abjad.tools import pitchtools return pitchtools.NamedInterval @property def _numbered_item_class(self): from abjad.tools import pitchtools return pitchtools.NumberedInterval @property def _parent_item_class(self): from abjad.tools import pitchtools return pitchtools.Interval @property def _repr_specification(self): items = [] if self.item_class.__name__.startswith('Named'): items = [str(x) for x in self] else: items = [x.number for x in self] return new( self._storage_format_specification, is_indented=False, keyword_argument_names=(), positional_argument_values=( items, ), ) ### PUBLIC METHODS ### @classmethod def from_selection( cls, selection, item_class=None, ): r'''Makes interval segment from component `selection`. :: >>> staff = Staff("c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8") >>> pitchtools.IntervalSegment.from_selection( ... staff, item_class=pitchtools.NumberedInterval) IntervalSegment([2, 2, 1, 2, 2, 2, 1]) Returns interval segment. ''' from abjad.tools import pitchtools pitch_segment = pitchtools.PitchSegment.from_selection(selection) intervals = (-x for x in mathtools.difference_series(pitch_segment)) return cls( items=intervals, item_class=item_class, ) def rotate(self, n): r'''Rotates interval segment by `n`. Returns new interval segment. ''' return new(self, self[-n:] + self[:-n]) ### PUBLIC PROPERTIES ### @property def has_duplicates(self): r'''True if segment has duplicate items. Otherwise false. :: >>> intervals = 'm2 M3 -aug4 m2 P5' >>> segment = pitchtools.IntervalSegment(intervals) >>> segment.has_duplicates True :: >>> intervals = 'M3 -aug4 m2 P5' >>> segment = pitchtools.IntervalSegment(intervals) >>> segment.has_duplicates False Returns boolean. ''' from abjad.tools import pitchtools return len(pitchtools.IntervalSet(self)) < len(self) @property def slope(self): r'''Slope of interval segment. The slope of a interval segment is the sum of its intervals divided by its length: :: >>> pitchtools.IntervalSegment([1, 2]).slope Multiplier(3, 2) Returns multiplier. ''' return durationtools.Multiplier.from_float( sum([x.number for x in self])) / len(self) @property def spread(self): r'''Spread of interval segment. The maximum interval spanned by any combination of the intervals within a numbered interval segment. :: >>> pitchtools.IntervalSegment([1, 2, -3, 1, -2, 1]).spread NumberedInterval(4.0) :: >>> pitchtools.IntervalSegment([1, 1, 1, 2, -3, -2]).spread NumberedInterval(5.0) Returns numbered interval. ''' from abjad.tools import pitchtools current = maximum = minimum = 0 for x in self: current += float(x) if maximum < current: maximum = current if current < minimum: minimum = current return pitchtools.NumberedInterval(maximum - minimum)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/IntervalSegment.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_yield_all_pitch_class_sets_01(): U_star = pitchtools.yield_all_pitch_class_sets() assert len(U_star) == 4096 assert pitchtools.PitchClassSet([0, 1, 2]) in U_star assert pitchtools.PitchClassSet([1, 2, 3]) in U_star assert pitchtools.PitchClassSet([3, 4, 8, 9, 11]) in U_star assert pitchtools.PitchClassSet(range(12)) in U_star
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/test/test_pitchtools_yield_all_pitch_class_sets.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_NumberedPitchClass___add___01(): r'''Ascending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(1) == pitchtools.NumberedPitchClass(1) assert pc + MCI(2) == pitchtools.NumberedPitchClass(2) assert pc + MCI(3) == pitchtools.NumberedPitchClass(3) assert pc + MCI(4) == pitchtools.NumberedPitchClass(4) assert pc + MCI(5) == pitchtools.NumberedPitchClass(5) assert pc + MCI(6) == pitchtools.NumberedPitchClass(6) assert pc + MCI(7) == pitchtools.NumberedPitchClass(7) assert pc + MCI(8) == pitchtools.NumberedPitchClass(8) assert pc + MCI(9) == pitchtools.NumberedPitchClass(9) assert pc + MCI(10) == pitchtools.NumberedPitchClass(10) assert pc + MCI(11) == pitchtools.NumberedPitchClass(11) def test_pitchtools_NumberedPitchClass___add___02(): r'''Ascending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(12) == pitchtools.NumberedPitchClass(0) assert pc + MCI(13) == pitchtools.NumberedPitchClass(1) assert pc + MCI(14) == pitchtools.NumberedPitchClass(2) assert pc + MCI(15) == pitchtools.NumberedPitchClass(3) assert pc + MCI(16) == pitchtools.NumberedPitchClass(4) assert pc + MCI(17) == pitchtools.NumberedPitchClass(5) assert pc + MCI(18) == pitchtools.NumberedPitchClass(6) assert pc + MCI(19) == pitchtools.NumberedPitchClass(7) assert pc + MCI(20) == pitchtools.NumberedPitchClass(8) assert pc + MCI(21) == pitchtools.NumberedPitchClass(9) assert pc + MCI(22) == pitchtools.NumberedPitchClass(10) assert pc + MCI(23) == pitchtools.NumberedPitchClass(11) def test_pitchtools_NumberedPitchClass___add___03(): r'''Descending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(-1) == pitchtools.NumberedPitchClass(11) assert pc + MCI(-2) == pitchtools.NumberedPitchClass(10) assert pc + MCI(-3) == pitchtools.NumberedPitchClass(9) assert pc + MCI(-4) == pitchtools.NumberedPitchClass(8) assert pc + MCI(-5) == pitchtools.NumberedPitchClass(7) assert pc + MCI(-6) == pitchtools.NumberedPitchClass(6) assert pc + MCI(-7) == pitchtools.NumberedPitchClass(5) assert pc + MCI(-8) == pitchtools.NumberedPitchClass(4) assert pc + MCI(-9) == pitchtools.NumberedPitchClass(3) assert pc + MCI(-10) == pitchtools.NumberedPitchClass(2) assert pc + MCI(-11) == pitchtools.NumberedPitchClass(1) def test_pitchtools_NumberedPitchClass___add___04(): r'''Descending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(-12) == pitchtools.NumberedPitchClass(0) assert pc + MCI(-13) == pitchtools.NumberedPitchClass(11) assert pc + MCI(-14) == pitchtools.NumberedPitchClass(10) assert pc + MCI(-15) == pitchtools.NumberedPitchClass(9) assert pc + MCI(-16) == pitchtools.NumberedPitchClass(8) assert pc + MCI(-17) == pitchtools.NumberedPitchClass(7) assert pc + MCI(-18) == pitchtools.NumberedPitchClass(6) assert pc + MCI(-19) == pitchtools.NumberedPitchClass(5) assert pc + MCI(-20) == pitchtools.NumberedPitchClass(4) assert pc + MCI(-21) == pitchtools.NumberedPitchClass(3) assert pc + MCI(-22) == pitchtools.NumberedPitchClass(2) assert pc + MCI(-23) == pitchtools.NumberedPitchClass(1) def test_pitchtools_NumberedPitchClass___add___05(): r'''numbered unison added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(0) == pitchtools.NumberedPitchClass(0)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/test/test_pitchtools_NumberedPitchClass___add__.py
# -*- encoding: utf-8 -*- from abjad import * def test_schemetools_Scheme_format_scheme_value_01(): assert schemetools.Scheme.format_scheme_value(1) == '1' assert schemetools.Scheme.format_scheme_value(True) == '#t' assert schemetools.Scheme.format_scheme_value(False) == '#f' assert schemetools.Scheme.format_scheme_value('foo bar') == '"foo bar"' assert schemetools.Scheme.format_scheme_value('baz') == 'baz' assert schemetools.Scheme.format_scheme_value([1, 2, 3]) == '(1 2 3)'
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/schemetools/test/test_schemetools_Scheme_format_scheme_value.py
# -*- encoding: utf-8 -*- import abc from abjad.tools.abctools.AbjadObject import AbjadObject class TypedCollection(AbjadObject): r'''Abstract base class for typed collections. ''' ### CLASS VARIABLES ### __slots__ = ( '_collection', '_item_class', ) ### INITIALIZER ### @abc.abstractmethod def __init__(self, items=None, item_class=None): assert isinstance(item_class, (type(None), type)) self._item_class = item_class ### SPECIAL METHODS ### def __contains__(self, item): r'''Is true when typed collection container `item`. Otherwise false. Returns boolean. ''' try: item = self._item_coercer(item) except ValueError: return False return self._collection.__contains__(item) def __eq__(self, expr): r'''Is true when `expr` is a typed collection with items that compare equal to those of this typed collection. Otherwise false. Returns boolean. ''' if isinstance(expr, type(self)): return self._collection == expr._collection elif isinstance(expr, type(self._collection)): return self._collection == expr return False def __format__(self, format_specification=''): r'''Formats typed collection. Set `format_specification` to `''` or `'storage'`. Interprets `''` equal to `'storage'`. Returns string. ''' from abjad.tools import systemtools if format_specification in ('', 'storage'): return systemtools.StorageFormatManager.get_storage_format(self) return str(self) def __getnewargs__(self): r'''Gets new arguments. Returns tuple. ''' return (self._collection, self.item_class) def __hash__(self): r'''Hashes typed collection. Required to be explicitly re-defined on Python 3 if __eq__ changes. Returns integer. ''' return super(TypedCollection, self).__hash__() def __iter__(self): r'''Iterates typed collection. Returns generator. ''' return self._collection.__iter__() def __len__(self): r'''Length of typed collection. Returns nonnegative integer. ''' return len(self._collection) def __ne__(self, expr): r'''Is true when `expr` is not a typed collection with items equal to this typed collection. Otherwise false. Returns boolean. ''' return not self.__eq__(expr) ### PRIVATE METHODS ### def _on_insertion(self, item): r'''Override to operate on item after insertion into collection. ''' pass def _on_removal(self, item): r'''Override to operate on item after removal from collection. ''' pass ### PRIVATE PROPERTIES ### @property def _item_coercer(self): def coerce_(x): if isinstance(x, self._item_class): return x return self._item_class(x) if self._item_class is None: return lambda x: x return coerce_ @property def _repr_specification(self): from abjad.tools import systemtools manager = systemtools.StorageFormatManager names = manager.get_signature_keyword_argument_names(self) keyword_argument_names = list(names) if 'items' in keyword_argument_names: keyword_argument_names.remove('items') keyword_argument_names = tuple(keyword_argument_names) positional_argument_values = ( self._collection, ) return systemtools.StorageFormatSpecification( self, is_indented=False, keyword_argument_names=keyword_argument_names, positional_argument_values=positional_argument_values, ) @property def _storage_format_specification(self): from abjad.tools import systemtools manager = systemtools.StorageFormatManager names = manager.get_signature_keyword_argument_names(self) keyword_argument_names = list(names) if 'items' in keyword_argument_names: keyword_argument_names.remove('items') keyword_argument_names = tuple(keyword_argument_names) positional_argument_values = ( self._collection, ) return systemtools.StorageFormatSpecification( self, keyword_argument_names=keyword_argument_names, positional_argument_values=positional_argument_values, ) ### PUBLIC PROPERTIES ### @property def item_class(self): r'''Item class to coerce items into. ''' return self._item_class @property def items(self): r'''Gets collection items. ''' return [x for x in self]
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/datastructuretools/TypedCollection.py
# -*- encoding: utf-8 -*- def timespan_2_overlaps_all_of_timespan_1( timespan_1=None, timespan_2=None, hold=False, ): r'''Makes time relation indicating that `timespan_2` overlaps all of `timespan_1`. :: >>> relation = timespantools.timespan_2_overlaps_all_of_timespan_1() >>> print(format(relation)) timespantools.TimespanTimespanTimeRelation( inequality=timespantools.CompoundInequality( [ timespantools.SimpleInequality('timespan_2.start_offset < timespan_1.start_offset'), timespantools.SimpleInequality('timespan_1.stop_offset < timespan_2.stop_offset'), ], logical_operator='and', ), ) Returns time relation or boolean. ''' from abjad.tools import timespantools inequality = timespantools.CompoundInequality([ 'timespan_2.start_offset < timespan_1.start_offset', 'timespan_1.stop_offset < timespan_2.stop_offset', ]) time_relation = timespantools.TimespanTimespanTimeRelation( inequality, timespan_1=timespan_1, timespan_2=timespan_2, ) if time_relation.is_fully_loaded and not hold: return time_relation() else: return time_relation
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/timespantools/timespan_2_overlaps_all_of_timespan_1.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_PitchClassSet_multiply_01(): assert pitchtools.PitchClassSet([0, 1, 5]).multiply(5) == \ pitchtools.PitchClassSet([0, 1, 5]) assert pitchtools.PitchClassSet([1, 2, 6]).multiply(5) == \ pitchtools.PitchClassSet([5, 6, 10]) assert pitchtools.PitchClassSet([2, 3, 7]).multiply(5) == \ pitchtools.PitchClassSet([3, 10, 11])
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet_multiply.py
# -*- encoding: utf-8 -*- from abjad import * def test_scoretools_Container_index_01(): r'''Elements that compare equal return different indices in container. ''' container = Container(4 * Note("c'4")) assert container.index(container[0]) == 0 assert container.index(container[1]) == 1 assert container.index(container[2]) == 2 assert container.index(container[3]) == 3
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/scoretools/test/test_scoretools_Container_index.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_list_pitch_numbers_in_expr_01(): tuplet = scoretools.FixedDurationTuplet(Duration(2, 8), "c'8 d'8 e'8") assert pitchtools.list_pitch_numbers_in_expr(tuplet) == (0, 2, 4) def test_pitchtools_list_pitch_numbers_in_expr_02(): staff = Staff("c'8 d'8 e'8 f'8") assert pitchtools.list_pitch_numbers_in_expr(staff) == (0, 2, 4, 5)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/test/test_pitchtools_list_pitch_numbers_in_expr.py
# -*- encoding: utf-8 -*- from abjad import * def test_rhythmtreetools_RhythmTreeContainer_insert_01(): leaf_a = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=3) leaf_b = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=3) leaf_c = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=2) container = rhythmtreetools.RhythmTreeContainer() assert container.children == () container.insert(0, leaf_a) assert container.children == (leaf_a,) container.insert(0, leaf_b) assert container.children == (leaf_b, leaf_a) container.insert(1, leaf_c) assert container.children == (leaf_b, leaf_c, leaf_a)
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/rhythmtreetools/test/test_rhythmtreetools_RhythmTreeContainer_insert.py
# -*- encoding: utf-8 -*- import six from abjad.tools.stringtools.strip_diacritics import strip_diacritics def to_accent_free_snake_case(string): '''Changes `string` to accent-free snake case. .. container:: example :: >>> stringtools.to_accent_free_snake_case('Déja vu') 'deja_vu' Strips accents from accented characters. Changes all punctuation (including spaces) to underscore. Sets to lowercase. Returns string. ''' assert isinstance(string, six.string_types) result = strip_diacritics(string) result = result.replace(' ', '_') result = result.replace("'", '_') result = result.lower() return result
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/stringtools/to_accent_free_snake_case.py
# -*- encoding: utf-8 -*- import copy from abjad import * def test_pitchtools_NamedPitch___copy___01(): pitch = NamedPitch(13) new = copy.copy(pitch) assert new is not pitch assert new.accidental is not pitch.accidental
# -*- encoding: utf-8 -*- import pytest from abjad import * def test_datastructuretools_CyclicTuplet___getslice___01(): cyclic_tuple = datastructuretools.CyclicTuple(range(3)) assert cyclic_tuple[:2] == (0, 1) assert cyclic_tuple[:10] == (0, 1, 2, 0, 1, 2, 0, 1, 2, 0) assert cyclic_tuple[2:10] == (2, 0, 1, 2, 0, 1, 2, 0)
mscuthbert/abjad
abjad/tools/datastructuretools/test/test_datastructuretools_CyclicTuplet___getslice__.py
abjad/tools/pitchtools/test/test_pitchtools_NamedPitch___copy__.py
"""Helpers that help with state related things.""" import asyncio from collections import defaultdict import datetime as dt import logging from types import ModuleType, TracebackType from typing import Any, Dict, Iterable, List, Optional, Type, Union from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON from homeassistant.const import ( STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_UNKNOWN, STATE_UNLOCKED, ) from homeassistant.core import Context, State from homeassistant.loader import IntegrationNotFound, async_get_integration, bind_hass import homeassistant.util.dt as dt_util from .typing import HomeAssistantType _LOGGER = logging.getLogger(__name__) class AsyncTrackStates: """ Record the time when the with-block is entered. Add all states that have changed since the start time to the return list when with-block is exited. Must be run within the event loop. """ def __init__(self, hass: HomeAssistantType) -> None: """Initialize a TrackStates block.""" self.hass = hass self.states: List[State] = [] # pylint: disable=attribute-defined-outside-init def __enter__(self) -> List[State]: """Record time from which to track changes.""" self.now = dt_util.utcnow() return self.states def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: """Add changes states to changes list.""" self.states.extend(get_changed_since(self.hass.states.async_all(), self.now)) def get_changed_since( states: Iterable[State], utc_point_in_time: dt.datetime ) -> List[State]: """Return list of states that have been changed since utc_point_in_time.""" return [state for state in states if state.last_updated >= utc_point_in_time] @bind_hass async def async_reproduce_state( hass: HomeAssistantType, states: Union[State, Iterable[State]], *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce a list of states on multiple domains.""" if isinstance(states, State): states = [states] to_call: Dict[str, List[State]] = defaultdict(list) for state in states: to_call[state.domain].append(state) async def worker(domain: str, states_by_domain: List[State]) -> None: try: integration = await async_get_integration(hass, domain) except IntegrationNotFound: _LOGGER.warning( "Trying to reproduce state for unknown integration: %s", domain ) return try: platform: Optional[ModuleType] = integration.get_platform("reproduce_state") except ImportError: _LOGGER.warning("Integration %s does not support reproduce state", domain) return await platform.async_reproduce_states( # type: ignore hass, states_by_domain, context=context, reproduce_options=reproduce_options ) if to_call: # run all domains in parallel await asyncio.gather( *(worker(domain, data) for domain, data in to_call.items()) ) def state_as_number(state: State) -> float: """ Try to coerce our state to a number. Raises ValueError if this is not possible. """ if state.state in ( STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON, STATE_OPEN, STATE_HOME, ): return 1 if state.state in ( STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN, STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME, ): return 0 return float(state.state)
"""Test significant change helper.""" import pytest from homeassistant.components.sensor import DEVICE_CLASS_BATTERY from homeassistant.const import ATTR_DEVICE_CLASS, STATE_UNAVAILABLE, STATE_UNKNOWN from homeassistant.core import State from homeassistant.helpers import significant_change @pytest.fixture(name="checker") async def checker_fixture(hass): """Checker fixture.""" checker = await significant_change.create_checker(hass, "test") def async_check_significant_change( _hass, old_state, _old_attrs, new_state, _new_attrs, **kwargs ): return abs(float(old_state) - float(new_state)) > 4 hass.data[significant_change.DATA_FUNCTIONS][ "test_domain" ] = async_check_significant_change return checker async def test_signicant_change(hass, checker): """Test initialize helper works.""" ent_id = "test_domain.test_entity" attrs = {ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY} assert checker.async_is_significant_change(State(ent_id, "100", attrs)) # Same state is not significant. assert not checker.async_is_significant_change(State(ent_id, "100", attrs)) # State under 5 difference is not significant. (per test mock) assert not checker.async_is_significant_change(State(ent_id, "96", attrs)) # Make sure we always compare against last significant change assert checker.async_is_significant_change(State(ent_id, "95", attrs)) # State turned unknown assert checker.async_is_significant_change(State(ent_id, STATE_UNKNOWN, attrs)) # State turned unavailable assert checker.async_is_significant_change(State(ent_id, "100", attrs)) assert checker.async_is_significant_change(State(ent_id, STATE_UNAVAILABLE, attrs)) async def test_significant_change_extra(hass, checker): """Test extra significant checker works.""" ent_id = "test_domain.test_entity" attrs = {ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY} assert checker.async_is_significant_change(State(ent_id, "100", attrs), extra_arg=1) assert checker.async_is_significant_change(State(ent_id, "200", attrs), extra_arg=1) # Reset the last significiant change to 100 to repeat test but with # extra checker installed. assert checker.async_is_significant_change(State(ent_id, "100", attrs), extra_arg=1) def extra_significant_check( hass, old_state, old_attrs, old_extra_arg, new_state, new_attrs, new_extra_arg ): return old_extra_arg != new_extra_arg checker.extra_significant_check = extra_significant_check # This is normally a significant change (100 -> 200), but the extra arg check marks it # as insignificant. assert not checker.async_is_significant_change( State(ent_id, "200", attrs), extra_arg=1 ) assert checker.async_is_significant_change(State(ent_id, "200", attrs), extra_arg=2)
partofthething/home-assistant
tests/helpers/test_significant_change.py
homeassistant/helpers/state.py
from streamcorpus_pipeline._get_name_info import get_name_info from streamcorpus import Chunk, make_stream_item def test_get_name_info(tmpdir): path = str(tmpdir.join('test_path')) c = Chunk(path, mode='wb') c.add(make_stream_item(28491, 'abs_url')) name_info = get_name_info(path, i_str='foo') assert name_info['date_now'] == name_info['date_time_now'][:10] assert name_info['date_now'] + '-' + name_info['time_now'] == name_info['date_time_now']
from __future__ import absolute_import import logging import os import time import uuid import pytest import streamcorpus from streamcorpus import make_stream_item, StreamItem, ContentItem, OffsetType, Chunk import streamcorpus_pipeline from streamcorpus_pipeline._clean_visible import clean_visible from streamcorpus_pipeline._hyperlink_labels import anchors_re, hyperlink_labels from streamcorpus_pipeline.tests._test_data import get_test_chunk_path logger = logging.getLogger(__name__) def make_test_stream_item(test_data_dir): stream_item = make_stream_item(None, 'http://nytimes.com/') stream_item.body = ContentItem() path = os.path.join(test_data_dir, 'test', 'nytimes-index-clean-stable.html') stream_item.body.clean_html = open(str(path)).read() return stream_item def make_hyperlink_labeled_test_stream_item(test_data_dir): context = {} si = make_test_stream_item(test_data_dir) assert len(si.body.clean_html) > 200 hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': True, 'offset_types': ['BYTES'], }) hl(si, context) cv = clean_visible(config={}) cv(si, context) assert len(si.body.clean_visible) > 200 return si def make_hyperlink_labeled_test_chunk(tmpdir): ''' returns a path to a temporary chunk that has been hyperlink labeled ''' tpath = tmpdir.join(str(uuid.uuid1()) + '.sc') o_chunk = Chunk(tpath, mode='wb') ipath = get_test_chunk_path() hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': True, 'offset_types': [BYTES], }) cv = make_clean_visible(config={}) for si in Chunk(path=ipath, message=streamcorpus.StreamItem_v0_2_0): ## clear out existing labels and tokens si.body.labels = {} si.body.sentences = {} context = {} hl(si, context) cv(si, context) o_chunk.add(si) o_chunk.close() return tpath def test_basics(test_data_dir): start = time.time() ## run it with a byte regex si1 = make_test_stream_item(test_data_dir) context = {} hl1 = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': False, 'domain_substrings': ['nytimes.com'], 'offset_types': ['BYTES'], }) hl1(si1,context) elapsed_bytes = time.time() - start assert si1.body.labels['author'][0].offsets.keys() == [OffsetType.BYTES] ## run it with regex start = time.time() si2 = make_test_stream_item(test_data_dir) hl2 = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': False, 'domain_substrings': ['nytimes.com'], 'offset_types': ['LINES'], }) hl2(si2,context) elapsed_lines = time.time() - start assert si2.body.labels['author'][0].offsets.keys() == [OffsetType.LINES] byte_labels = set() for annotator_id in si1.body.labels: for label in si1.body.labels[annotator_id]: assert OffsetType.BYTES in label.offsets byte_labels.add(label.target.target_id) line_labels = set() for annotator_id in si2.body.labels: for label in si2.body.labels[annotator_id]: assert OffsetType.LINES in label.offsets line_labels.add(label.target.target_id) assert line_labels == byte_labels logger.info('{0:.5f} bytes, {1:.5f} lines' .format(elapsed_bytes, elapsed_lines)) @pytest.mark.parametrize(('parser_type',), [ # pylint: disable=E1101 ('BYTES',), ('CHARS',), ]) def test_speed(parser_type, test_data_dir): stream_items = [] for i in xrange(10): stream_item = StreamItem() stream_item.body = ContentItem() path = os.path.join(test_data_dir, 'test' ) stream_item.body.clean_html = open( os.path.join(path, 'nytimes-index-clean.html')).read() stream_items.append( stream_item ) context = {} start = time.time() hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': False, 'domain_substrings': ['nytimes.com'], 'offset_types': [parser_type], }) for si in stream_items: si = hl(si, context) elapsed = time.time() - start rate = len(stream_items) / elapsed logger.debug('OffsetType: {0}'.format(OffsetType)) logger.info('{0:.1f} per second for {1}'.format(rate, parser_type)) sample_text = u''' This is a normal <a href="http://en.wikipedia.org/wiki/Hello">link</a>. This is a funky <a href ='http://en.wikipedia.org/wiki/Bogus' asdf=4 >the Bogus</a> Obviously intrigued by anything named Munn I sought <a href="http://en.wikipedia.org/wiki/Munny">more info</a>.</font></p> ''' def test_anchors_re(): parts = sample_text.split('</a>') matches = list(anchors_re.match(part) for part in parts) ## now we check more of the output assert len(matches) == 4 for m in matches: if not m: continue before = m.group('before') href = m.group('href') ahref = m.group('ahref') posthref = m.group('posthref') preequals = m.group('preequals') postequals = m.group('postequals') # logger.debug(m.groups('href') assert sum(map(int, map(bool, matches))) == 3 @pytest.mark.parametrize(('parser_type',), [ # pylint: disable=E1101 ('BYTES',), ('CHARS',), ]) def test_long_doc(parser_type, test_data_dir): stream_item = StreamItem() stream_item.body = ContentItem() path = os.path.join(test_data_dir, 'test' ) stream_item.body.clean_html = open( os.path.join(path, 'company-test.html')).read() context = {} hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': True, 'offset_types': [parser_type], }) hl(stream_item, context)
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/tests/test_hyperlink_labels.py
streamcorpus_pipeline/tests/test_get_name_info.py
#!/usr/bin/env python ''' streamcorpus_pipeline.TaggerBatchTransform for LingPipe This software is released under an MIT/X11 open source license. Copyright 2012-2014 Diffeo, Inc. ''' from __future__ import absolute_import import hashlib import itertools import logging import os import re import sys import time import traceback import uuid from nltk.tokenize import WhitespaceTokenizer from streamcorpus import Token, Sentence, EntityType, Chunk, Offset, \ OffsetType, Gender, MentionType, Attribute, AttributeType from streamcorpus_pipeline.stages import Configured from streamcorpus_pipeline._taggers import TaggerBatchTransform logger = logging.getLogger(__name__) ## map LingPipe's specific strings to streamcorpus.EntityType _ENTITY_TYPES = { 'PERSON': EntityType.PER, 'FEMALE_PRONOUN': EntityType.PER, 'MALE_PRONOUN': EntityType.PER, 'ORGANIZATION': EntityType.ORG, 'LOCATION': EntityType.LOC, } _PRONOUNS = { 'FEMALE_PRONOUN': Gender.FEMALE, 'MALE_PRONOUN': Gender.MALE, } filename_re = re.compile('''.*?<FILENAME docid="(?P<stream_id>.*?)">(?P<tagged_doc>(.|\n)*?)</FILENAME>''') sentence_re = re.compile('''(?P<before>(.|\n)*?)<s i="(?P<sentence_id>.*?)">(?P<tagged_sentence>(.|\n)*?)</s>''') ner_re = re.compile('''(?P<before>(.|\n)*?)(<ENAMEX ID="(?P<chain_id>.*?)" TYPE="(?P<entity_type>.*?)">(?P<entity_string>(.|\n)*?)</ENAMEX>)?''') ## detect any kind of whitespace, including unicode whitespace, which ## should include \u200b, even though python2.7 apparently lost this ## knowledge: http://bugs.python.org/issue10567 only_whitespace = re.compile(u'''^(\s|\n|\u200b)*$''', re.UNICODE) def files(text): ''' Iterate over <FILENAME> XML-like tags and tokenize with nltk ''' for f_match in filename_re.finditer(text): yield f_match.group('stream_id'), f_match.group('tagged_doc') class LingPipeParser(object): def __init__(self, config): self.config = config self.clear() def clear(self): self.tok_num = 0 self.byte_idx = 0 self.line_idx = 0 self.word_tokenizer = WhitespaceTokenizer() def set(self, ner_dom): self.clear() ## nltk wants a unicode string, so decode, it and then we will ## re-encode it to carefully recover the byte offsets. We ## must take care not to use any nltk components that insert ## new whitespace, such ## nltk.tokenize.treebank.TreebankTokenizer self.ner_dom = ner_dom self.attributes = [] self.relations = [] def sentences(self): ''' Iterate over <s> XML-like tags and tokenize with nltk ''' for sentence_id, node in enumerate(self.ner_dom.childNodes): ## increment the char index with any text before the <s> ## tag. Crucial assumption here is that the LingPipe XML ## tags are inserted into the original byte array without ## modifying the portions that are not inside the ## LingPipe-added tags themselves. if node.nodeType == node.TEXT_NODE: ## we expect to only see TEXT_NODE instances with whitespace assert only_whitespace.match(node.data), repr(node.data) ## must convert back to utf-8 to have expected byte offsets self.byte_idx += len(node.data.encode('utf-8')) ## count full lines, i.e. only those that end with a \n # 'True' here means keep the trailing newlines for line in node.data.splitlines(True): if line.endswith('\n'): self.line_idx += 1 else: logger.debug('getting tokens for sentence_id=%d' % sentence_id) more_sentence_remains = True while more_sentence_remains: ## always a sentence sent = Sentence() ## this "node" came from for loop above, and it's ## childNodes list might have been popped by a ## previous pass through this while loop tokens = iter( self.tokens( node ) ) while 1: try: tok = tokens.next() sent.tokens.append(tok) #logger.debug('got token: %r %d %d' % (tok.token, tok.mention_id, tok.sentence_pos)) except StopIteration: yield sent more_sentence_remains = False break def _make_token(self, start, end): ''' Instantiates a Token from self._input_string[start:end] ''' ## all thfift strings must be encoded first tok_string = self._input_string[start:end].encode('utf-8') if only_whitespace.match(tok_string): ## drop any tokens with only whitespace return None tok = Token() tok.token = tok_string tok.token_num = self.tok_num if 'BYTES' in self.config['offset_types']: tok.offsets[OffsetType.BYTES] = Offset( type = OffsetType.BYTES, first=self.byte_idx + len(self._input_string[:start].encode('utf-8')), length=len(tok_string), value=self.config['offset_debugging'] and tok_string or None, ) if 'LINES' in self.config['offset_types']: tok.offsets[OffsetType.LINES] = Offset( type = OffsetType.LINES, first=self.line_idx, length=1, value=self.config['offset_debugging'] and tok_string or None, ) self.tok_num += 1 ## keep track of position within a sentence tok.sentence_pos = self.sent_pos self.sent_pos += 1 return tok def tokens(self, sentence_dom): ''' Tokenize all the words and preserve NER labels from ENAMEX tags ''' ## keep track of sentence position, which is reset for each ## sentence, and used above in _make_token self.sent_pos = 0 ## keep track of mention_id, so we can distinguish adjacent ## multi-token mentions within the same coref chain mention_id = 0 while len(sentence_dom.childNodes) > 0: ## shrink the sentence_dom's child nodes. In v0_2_0 this ## was required to cope with HitMaxi16. Now it is just to ## save memory. node = sentence_dom.childNodes.pop(0) if node.nodeType == node.TEXT_NODE: ## process portion before an ENAMEX tag for line in node.data.splitlines(True): self._input_string = line for start, end in self.word_tokenizer.span_tokenize(line): tok = self._make_token(start, end) if tok: yield tok if line.endswith('\n'): ## maintain the index to the current line self.line_idx += 1 ## increment index pasat the 'before' portion self.byte_idx += len(line.encode('utf-8')) else: ## process text inside an ENAMEX tag assert node.nodeName == 'ENAMEX', node.nodeName chain_id = node.attributes.get('ID').value entity_type = node.attributes.get('TYPE').value for node in node.childNodes: assert node.nodeType == node.TEXT_NODE, node.nodeType for line in node.data.splitlines(True): self._input_string = line for start, end in self.word_tokenizer.span_tokenize(line): tok = self._make_token(start, end) if tok: if entity_type in _PRONOUNS: tok.mention_type = MentionType.PRO tok.entity_type = _ENTITY_TYPES[entity_type] ## create an attribute attr = Attribute( attribute_type=AttributeType.PER_GENDER, value=str(_PRONOUNS[entity_type]) ) self.attributes.append(attr) else: ## regular entity_type tok.mention_type = MentionType.NAME tok.entity_type = _ENTITY_TYPES[entity_type] tok.equiv_id = int(chain_id) tok.mention_id = mention_id yield tok if line.endswith('\n'): ## maintain the index to the current line self.line_idx += 1 ## increment index pasat the 'before' portion self.byte_idx += len(line.encode('utf-8')) ## increment mention_id within this sentence mention_id += 1 class lingpipe(TaggerBatchTransform): ''' a streamcorpus_pipeline batch transform that converts a chunk into a new chunk with data generated by LingPipe ''' config_name = 'lingpipe' tagger_id = 'lingpipe' template = \ '''cd %(tagger_root_path)s/demos/generic/bin && ''' + \ '''cat %(clean_visible_path)s | ./cmd_coref_en_news_muc6.sh ''' + \ ''' "-contentType=text/html" "-includeElts=FILENAME" ''' + \ ''' %(java_heap_size)s 1> %(ner_xml_path)s''' def get_sentences(self, ner_dom): '''parse the sentences and tokens out of the XML''' lp_parser = LingPipeParser(self.config) lp_parser.set(ner_dom) sentences = list( lp_parser.sentences() ) return sentences, lp_parser.relations, lp_parser.attributes if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('action', metavar='postproc|align', help='postproc') parser.add_argument('input_file', help='XML file from LingPipe') parser.add_argument('output_file', help='XML file to generate with OWPL data') parser.add_argument('--source_chunk', help='source chunk file that was input to the pipeline data') args = parser.parse_args() if args.action == 'postproc': text = open(args.input_file).read() print 'read %d bytes from %s' % (len(text), args.input_file) raise NotImplementedError('need to instantiate a LingPipeParser object here') for stream_id, tagged_doc in files(text): for sent in sentences(tagged_doc): # pylint: disable=E0602 for tok in sent.tokens: if tok.entity_type is not None: print tok, EntityType._VALUES_TO_NAMES[tok.entity_type] elif args.action == 'align': i_chunk = Chunk(path=args.source_chunk, mode='rb') o_chunk = Chunk(path=args.output_file, mode='wb') align_chunk_with_ner(args.input_file, i_chunk, o_chunk) # pylint: disable=E0602
from __future__ import absolute_import import logging import os import time import uuid import pytest import streamcorpus from streamcorpus import make_stream_item, StreamItem, ContentItem, OffsetType, Chunk import streamcorpus_pipeline from streamcorpus_pipeline._clean_visible import clean_visible from streamcorpus_pipeline._hyperlink_labels import anchors_re, hyperlink_labels from streamcorpus_pipeline.tests._test_data import get_test_chunk_path logger = logging.getLogger(__name__) def make_test_stream_item(test_data_dir): stream_item = make_stream_item(None, 'http://nytimes.com/') stream_item.body = ContentItem() path = os.path.join(test_data_dir, 'test', 'nytimes-index-clean-stable.html') stream_item.body.clean_html = open(str(path)).read() return stream_item def make_hyperlink_labeled_test_stream_item(test_data_dir): context = {} si = make_test_stream_item(test_data_dir) assert len(si.body.clean_html) > 200 hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': True, 'offset_types': ['BYTES'], }) hl(si, context) cv = clean_visible(config={}) cv(si, context) assert len(si.body.clean_visible) > 200 return si def make_hyperlink_labeled_test_chunk(tmpdir): ''' returns a path to a temporary chunk that has been hyperlink labeled ''' tpath = tmpdir.join(str(uuid.uuid1()) + '.sc') o_chunk = Chunk(tpath, mode='wb') ipath = get_test_chunk_path() hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': True, 'offset_types': [BYTES], }) cv = make_clean_visible(config={}) for si in Chunk(path=ipath, message=streamcorpus.StreamItem_v0_2_0): ## clear out existing labels and tokens si.body.labels = {} si.body.sentences = {} context = {} hl(si, context) cv(si, context) o_chunk.add(si) o_chunk.close() return tpath def test_basics(test_data_dir): start = time.time() ## run it with a byte regex si1 = make_test_stream_item(test_data_dir) context = {} hl1 = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': False, 'domain_substrings': ['nytimes.com'], 'offset_types': ['BYTES'], }) hl1(si1,context) elapsed_bytes = time.time() - start assert si1.body.labels['author'][0].offsets.keys() == [OffsetType.BYTES] ## run it with regex start = time.time() si2 = make_test_stream_item(test_data_dir) hl2 = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': False, 'domain_substrings': ['nytimes.com'], 'offset_types': ['LINES'], }) hl2(si2,context) elapsed_lines = time.time() - start assert si2.body.labels['author'][0].offsets.keys() == [OffsetType.LINES] byte_labels = set() for annotator_id in si1.body.labels: for label in si1.body.labels[annotator_id]: assert OffsetType.BYTES in label.offsets byte_labels.add(label.target.target_id) line_labels = set() for annotator_id in si2.body.labels: for label in si2.body.labels[annotator_id]: assert OffsetType.LINES in label.offsets line_labels.add(label.target.target_id) assert line_labels == byte_labels logger.info('{0:.5f} bytes, {1:.5f} lines' .format(elapsed_bytes, elapsed_lines)) @pytest.mark.parametrize(('parser_type',), [ # pylint: disable=E1101 ('BYTES',), ('CHARS',), ]) def test_speed(parser_type, test_data_dir): stream_items = [] for i in xrange(10): stream_item = StreamItem() stream_item.body = ContentItem() path = os.path.join(test_data_dir, 'test' ) stream_item.body.clean_html = open( os.path.join(path, 'nytimes-index-clean.html')).read() stream_items.append( stream_item ) context = {} start = time.time() hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': False, 'domain_substrings': ['nytimes.com'], 'offset_types': [parser_type], }) for si in stream_items: si = hl(si, context) elapsed = time.time() - start rate = len(stream_items) / elapsed logger.debug('OffsetType: {0}'.format(OffsetType)) logger.info('{0:.1f} per second for {1}'.format(rate, parser_type)) sample_text = u''' This is a normal <a href="http://en.wikipedia.org/wiki/Hello">link</a>. This is a funky <a href ='http://en.wikipedia.org/wiki/Bogus' asdf=4 >the Bogus</a> Obviously intrigued by anything named Munn I sought <a href="http://en.wikipedia.org/wiki/Munny">more info</a>.</font></p> ''' def test_anchors_re(): parts = sample_text.split('</a>') matches = list(anchors_re.match(part) for part in parts) ## now we check more of the output assert len(matches) == 4 for m in matches: if not m: continue before = m.group('before') href = m.group('href') ahref = m.group('ahref') posthref = m.group('posthref') preequals = m.group('preequals') postequals = m.group('postequals') # logger.debug(m.groups('href') assert sum(map(int, map(bool, matches))) == 3 @pytest.mark.parametrize(('parser_type',), [ # pylint: disable=E1101 ('BYTES',), ('CHARS',), ]) def test_long_doc(parser_type, test_data_dir): stream_item = StreamItem() stream_item.body = ContentItem() path = os.path.join(test_data_dir, 'test' ) stream_item.body.clean_html = open( os.path.join(path, 'company-test.html')).read() context = {} hl = hyperlink_labels(config={ 'require_abs_url': True, 'all_domains': True, 'offset_types': [parser_type], }) hl(stream_item, context)
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/tests/test_hyperlink_labels.py
streamcorpus_pipeline/_lingpipe.py
# flake8: noqa from pandas.core.reshape.concat import concat from pandas.core.reshape.melt import lreshape, melt, wide_to_long from pandas.core.reshape.merge import merge, merge_asof, merge_ordered from pandas.core.reshape.pivot import crosstab, pivot, pivot_table from pandas.core.reshape.reshape import get_dummies from pandas.core.reshape.tile import cut, qcut
from datetime import datetime, timedelta import operator import numpy as np import pytest import pytz from pandas._libs.tslibs import iNaT import pandas.compat as compat from pandas.core.dtypes.common import is_datetime64_any_dtype from pandas import ( DatetimeIndex, DatetimeTZDtype, Index, NaT, Period, Series, Timedelta, TimedeltaIndex, Timestamp, isna, offsets, ) import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.ops import roperator @pytest.mark.parametrize( "nat,idx", [ (Timestamp("NaT"), DatetimeIndex), (Timedelta("NaT"), TimedeltaIndex), (Period("NaT", freq="M"), PeriodArray), ], ) def test_nat_fields(nat, idx): for field in idx._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue result = getattr(NaT, field) assert np.isnan(result) result = getattr(nat, field) assert np.isnan(result) for field in idx._bool_ops: result = getattr(NaT, field) assert result is False result = getattr(nat, field) assert result is False def test_nat_vector_field_access(): idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"]) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue if field in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear continue result = getattr(idx, field) expected = Index([getattr(x, field) for x in idx]) tm.assert_index_equal(result, expected) ser = Series(idx) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue if field in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear continue result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) for field in DatetimeIndex._bool_ops: result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat"]) def test_identity(klass, value): assert klass(value) is NaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan]) def test_equality(klass, value): if klass is Period and value == "": pytest.skip("Period cannot parse empty string") assert klass(value).value == iNaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta]) @pytest.mark.parametrize("method", ["round", "floor", "ceil"]) @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"]) def test_round_nat(klass, method, freq): # see gh-14940 ts = klass("nat") round_method = getattr(ts, method) assert round_method(freq) is ts @pytest.mark.parametrize( "method", [ "astimezone", "combine", "ctime", "dst", "fromordinal", "fromtimestamp", pytest.param( "fromisocalendar", marks=pytest.mark.skipif( not compat.PY38, reason="'fromisocalendar' was added in stdlib datetime in python 3.8", ), ), "isocalendar", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "toordinal", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "timestamp", ], ) def test_nat_methods_raise(method): # see gh-9513, gh-17329 msg = f"NaTType does not support {method}" with pytest.raises(ValueError, match=msg): getattr(NaT, method)() @pytest.mark.parametrize("method", ["weekday", "isoweekday"]) def test_nat_methods_nan(method): # see gh-9513, gh-17329 assert np.isnan(getattr(NaT, method)()) @pytest.mark.parametrize( "method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"] ) def test_nat_methods_nat(method): # see gh-8254, gh-9513, gh-17329 assert getattr(NaT, method)() is NaT @pytest.mark.parametrize( "get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)] ) def test_nat_iso_format(get_nat): # see gh-12300 assert get_nat("NaT").isoformat() == "NaT" @pytest.mark.parametrize( "klass,expected", [ (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]), ( Timedelta, [ "components", "delta", "is_populated", "resolution_string", "to_pytimedelta", "to_timedelta64", "view", ], ), ], ) def test_missing_public_nat_methods(klass, expected): # see gh-17327 # # NaT should have *most* of the Timestamp and Timedelta methods. # Here, we check which public methods NaT does not have. We # ignore any missing private methods. nat_names = dir(NaT) klass_names = dir(klass) missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")] missing.sort() assert missing == expected def _get_overlap_public_nat_methods(klass, as_tuple=False): """ Get overlapping public methods between NaT and another class. Parameters ---------- klass : type The class to compare with NaT as_tuple : bool, default False Whether to return a list of tuples of the form (klass, method). Returns ------- overlap : list """ nat_names = dir(NaT) klass_names = dir(klass) overlap = [ x for x in nat_names if x in klass_names and not x.startswith("_") and callable(getattr(klass, x)) ] # Timestamp takes precedence over Timedelta in terms of overlap. if klass is Timedelta: ts_names = dir(Timestamp) overlap = [x for x in overlap if x not in ts_names] if as_tuple: overlap = [(klass, method) for method in overlap] overlap.sort() return overlap @pytest.mark.parametrize( "klass,expected", [ ( Timestamp, [ "astimezone", "ceil", "combine", "ctime", "date", "day_name", "dst", "floor", "fromisocalendar", "fromisoformat", "fromordinal", "fromtimestamp", "isocalendar", "isoformat", "isoweekday", "month_name", "now", "replace", "round", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "to_datetime64", "to_numpy", "to_pydatetime", "today", "toordinal", "tz_convert", "tz_localize", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "weekday", ], ), (Timedelta, ["total_seconds"]), ], ) def test_overlap_public_nat_methods(klass, expected): # see gh-17327 # # NaT should have *most* of the Timestamp and Timedelta methods. # In case when Timestamp, Timedelta, and NaT are overlap, the overlap # is considered to be with Timestamp and NaT, not Timedelta. # "fromisocalendar" was introduced in 3.8 if klass is Timestamp and not compat.PY38: expected.remove("fromisocalendar") assert _get_overlap_public_nat_methods(klass) == expected @pytest.mark.parametrize( "compare", ( _get_overlap_public_nat_methods(Timestamp, True) + _get_overlap_public_nat_methods(Timedelta, True) ), ) def test_nat_doc_strings(compare): # see gh-17327 # # The docstrings for overlapping methods should match. klass, method = compare klass_doc = getattr(klass, method).__doc__ nat_doc = getattr(NaT, method).__doc__ assert klass_doc == nat_doc _ops = { "left_plus_right": lambda a, b: a + b, "right_plus_left": lambda a, b: b + a, "left_minus_right": lambda a, b: a - b, "right_minus_left": lambda a, b: b - a, "left_times_right": lambda a, b: a * b, "right_times_left": lambda a, b: b * a, "left_div_right": lambda a, b: a / b, "right_div_left": lambda a, b: b / a, } @pytest.mark.parametrize("op_name", list(_ops.keys())) @pytest.mark.parametrize( "value,val_type", [ (2, "scalar"), (1.5, "floating"), (np.nan, "floating"), ("foo", "str"), (timedelta(3600), "timedelta"), (Timedelta("5s"), "timedelta"), (datetime(2014, 1, 1), "timestamp"), (Timestamp("2014-01-01"), "timestamp"), (Timestamp("2014-01-01", tz="UTC"), "timestamp"), (Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"), (pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"), ], ) def test_nat_arithmetic_scalar(op_name, value, val_type): # see gh-6873 invalid_ops = { "scalar": {"right_div_left"}, "floating": { "right_div_left", "left_minus_right", "right_minus_left", "left_plus_right", "right_plus_left", }, "str": set(_ops.keys()), "timedelta": {"left_times_right", "right_times_left"}, "timestamp": { "left_times_right", "right_times_left", "left_div_right", "right_div_left", }, } op = _ops[op_name] if op_name in invalid_ops.get(val_type, set()): if ( val_type == "timedelta" and "times" in op_name and isinstance(value, Timedelta) ): typs = "(Timedelta|NaTType)" msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'" elif val_type == "str": # un-specific check here because the message comes from str # and varies by method msg = "|".join( [ "can only concatenate str", "unsupported operand type", "can't multiply sequence", "Can't convert 'NaTType'", "must be str, not NaTType", ] ) else: msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): op(NaT, value) else: if val_type == "timedelta" and "div" in op_name: expected = np.nan else: expected = NaT assert op(NaT, value) is expected @pytest.mark.parametrize( "val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)] ) def test_nat_rfloordiv_timedelta(val, expected): # see gh-#18846 # # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv td = Timedelta(hours=3, minutes=4) assert td // val is expected @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) @pytest.mark.parametrize( "value", [ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"), DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]), DatetimeArray._from_sequence( ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific") ), TimedeltaIndex(["1 day", "2 day"], name="x"), ], ) def test_nat_arithmetic_index(op_name, value): # see gh-11718 exp_name = "x" exp_data = [NaT] * 2 if is_datetime64_any_dtype(value.dtype) and "plus" in op_name: expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name) else: expected = TimedeltaIndex(exp_data, name=exp_name) if not isinstance(value, Index): expected = expected.array op = _ops[op_name] result = op(NaT, value) tm.assert_equal(result, expected) @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) @pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence]) def test_nat_arithmetic_td64_vector(op_name, box): # see gh-19124 vec = box(["1 day", "2 day"], dtype="timedelta64[ns]") box_nat = box([NaT, NaT], dtype="timedelta64[ns]") tm.assert_equal(_ops[op_name](vec, NaT), box_nat) @pytest.mark.parametrize( "dtype,op,out_dtype", [ ("datetime64[ns]", operator.add, "datetime64[ns]"), ("datetime64[ns]", roperator.radd, "datetime64[ns]"), ("datetime64[ns]", operator.sub, "timedelta64[ns]"), ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"), ("timedelta64[ns]", operator.add, "datetime64[ns]"), ("timedelta64[ns]", roperator.radd, "datetime64[ns]"), ("timedelta64[ns]", operator.sub, "datetime64[ns]"), ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"), ], ) def test_nat_arithmetic_ndarray(dtype, op, out_dtype): other = np.arange(10).astype(dtype) result = op(NaT, other) expected = np.empty(other.shape, dtype=out_dtype) expected.fill("NaT") tm.assert_numpy_array_equal(result, expected) def test_nat_pinned_docstrings(): # see gh-17327 assert NaT.ctime.__doc__ == datetime.ctime.__doc__ def test_to_numpy_alias(): # GH 24653: alias .to_numpy() for scalars expected = NaT.to_datetime64() result = NaT.to_numpy() assert isna(expected) and isna(result) @pytest.mark.parametrize( "other", [ Timedelta(0), Timedelta(0).to_pytimedelta(), pytest.param( Timedelta(0).to_timedelta64(), marks=pytest.mark.xfail( reason="td64 doesnt return NotImplemented, see numpy#17017" ), ), Timestamp(0), Timestamp(0).to_pydatetime(), pytest.param( Timestamp(0).to_datetime64(), marks=pytest.mark.xfail( reason="dt64 doesnt return NotImplemented, see numpy#17017" ), ), Timestamp(0).tz_localize("UTC"), NaT, ], ) def test_nat_comparisons(compare_operators_no_eq_ne, other): # GH 26039 opname = compare_operators_no_eq_ne assert getattr(NaT, opname)(other) is False op = getattr(operator, opname.strip("_")) assert op(NaT, other) is False assert op(other, NaT) is False @pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")]) def test_nat_comparisons_numpy(other): # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons # pass, this test can be removed assert not NaT == other assert NaT != other assert not NaT < other assert not NaT > other assert not NaT <= other assert not NaT >= other @pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")]) @pytest.mark.parametrize( "symbol_and_op", [("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)], ) def test_nat_comparisons_invalid(other_and_type, symbol_and_op): # GH#35585 other, other_type = other_and_type symbol, op = symbol_and_op assert not NaT == other assert not other == NaT assert NaT != other assert other != NaT msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'" with pytest.raises(TypeError, match=msg): op(NaT, other) msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'" with pytest.raises(TypeError, match=msg): op(other, NaT) def test_compare_date(): # GH#39151 comparing NaT with date object is deprecated # See also: tests.scalar.timestamps.test_comparisons::test_compare_date dt = Timestamp.now().to_pydatetime().date() for left, right in [(NaT, dt), (dt, NaT)]: assert not left == right assert left != right with tm.assert_produces_warning(FutureWarning): assert not left < right with tm.assert_produces_warning(FutureWarning): assert not left <= right with tm.assert_produces_warning(FutureWarning): assert not left > right with tm.assert_produces_warning(FutureWarning): assert not left >= right # Once the deprecation is enforced, the following assertions # can be enabled: # assert not left == right # assert left != right # # with pytest.raises(TypeError): # left < right # with pytest.raises(TypeError): # left <= right # with pytest.raises(TypeError): # left > right # with pytest.raises(TypeError): # left >= right @pytest.mark.parametrize( "obj", [ offsets.YearEnd(2), offsets.YearBegin(2), offsets.MonthBegin(1), offsets.MonthEnd(2), offsets.MonthEnd(12), offsets.Day(2), offsets.Day(5), offsets.Hour(24), offsets.Hour(3), offsets.Minute(), np.timedelta64(3, "h"), np.timedelta64(4, "h"), np.timedelta64(3200, "s"), np.timedelta64(3600, "s"), np.timedelta64(3600 * 24, "s"), np.timedelta64(2, "D"), np.timedelta64(365, "D"), timedelta(-2), timedelta(365), timedelta(minutes=120), timedelta(days=4, minutes=180), timedelta(hours=23), timedelta(hours=23, minutes=30), timedelta(hours=48), ], ) def test_nat_addsub_tdlike_scalar(obj): assert NaT + obj is NaT assert obj + NaT is NaT assert NaT - obj is NaT def test_pickle(): # GH#4606 p = tm.round_trip_pickle(NaT) assert p is NaT
gfyoung/pandas
pandas/tests/scalar/test_nat.py
pandas/core/reshape/api.py
from pandas.core.arrays.base import ( ExtensionArray, ExtensionOpsMixin, ExtensionScalarOpsMixin, ) from pandas.core.arrays.boolean import BooleanArray from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.floating import FloatingArray from pandas.core.arrays.integer import IntegerArray from pandas.core.arrays.interval import IntervalArray from pandas.core.arrays.masked import BaseMaskedArray from pandas.core.arrays.numpy_ import PandasArray, PandasDtype from pandas.core.arrays.period import PeriodArray, period_array from pandas.core.arrays.sparse import SparseArray from pandas.core.arrays.string_ import StringArray from pandas.core.arrays.timedeltas import TimedeltaArray __all__ = [ "ExtensionArray", "ExtensionOpsMixin", "ExtensionScalarOpsMixin", "BaseMaskedArray", "BooleanArray", "Categorical", "DatetimeArray", "FloatingArray", "IntegerArray", "IntervalArray", "PandasArray", "PandasDtype", "PeriodArray", "period_array", "SparseArray", "StringArray", "TimedeltaArray", ]
from datetime import datetime, timedelta import operator import numpy as np import pytest import pytz from pandas._libs.tslibs import iNaT import pandas.compat as compat from pandas.core.dtypes.common import is_datetime64_any_dtype from pandas import ( DatetimeIndex, DatetimeTZDtype, Index, NaT, Period, Series, Timedelta, TimedeltaIndex, Timestamp, isna, offsets, ) import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.ops import roperator @pytest.mark.parametrize( "nat,idx", [ (Timestamp("NaT"), DatetimeIndex), (Timedelta("NaT"), TimedeltaIndex), (Period("NaT", freq="M"), PeriodArray), ], ) def test_nat_fields(nat, idx): for field in idx._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue result = getattr(NaT, field) assert np.isnan(result) result = getattr(nat, field) assert np.isnan(result) for field in idx._bool_ops: result = getattr(NaT, field) assert result is False result = getattr(nat, field) assert result is False def test_nat_vector_field_access(): idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"]) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue if field in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear continue result = getattr(idx, field) expected = Index([getattr(x, field) for x in idx]) tm.assert_index_equal(result, expected) ser = Series(idx) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue if field in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear continue result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) for field in DatetimeIndex._bool_ops: result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat"]) def test_identity(klass, value): assert klass(value) is NaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan]) def test_equality(klass, value): if klass is Period and value == "": pytest.skip("Period cannot parse empty string") assert klass(value).value == iNaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta]) @pytest.mark.parametrize("method", ["round", "floor", "ceil"]) @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"]) def test_round_nat(klass, method, freq): # see gh-14940 ts = klass("nat") round_method = getattr(ts, method) assert round_method(freq) is ts @pytest.mark.parametrize( "method", [ "astimezone", "combine", "ctime", "dst", "fromordinal", "fromtimestamp", pytest.param( "fromisocalendar", marks=pytest.mark.skipif( not compat.PY38, reason="'fromisocalendar' was added in stdlib datetime in python 3.8", ), ), "isocalendar", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "toordinal", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "timestamp", ], ) def test_nat_methods_raise(method): # see gh-9513, gh-17329 msg = f"NaTType does not support {method}" with pytest.raises(ValueError, match=msg): getattr(NaT, method)() @pytest.mark.parametrize("method", ["weekday", "isoweekday"]) def test_nat_methods_nan(method): # see gh-9513, gh-17329 assert np.isnan(getattr(NaT, method)()) @pytest.mark.parametrize( "method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"] ) def test_nat_methods_nat(method): # see gh-8254, gh-9513, gh-17329 assert getattr(NaT, method)() is NaT @pytest.mark.parametrize( "get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)] ) def test_nat_iso_format(get_nat): # see gh-12300 assert get_nat("NaT").isoformat() == "NaT" @pytest.mark.parametrize( "klass,expected", [ (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]), ( Timedelta, [ "components", "delta", "is_populated", "resolution_string", "to_pytimedelta", "to_timedelta64", "view", ], ), ], ) def test_missing_public_nat_methods(klass, expected): # see gh-17327 # # NaT should have *most* of the Timestamp and Timedelta methods. # Here, we check which public methods NaT does not have. We # ignore any missing private methods. nat_names = dir(NaT) klass_names = dir(klass) missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")] missing.sort() assert missing == expected def _get_overlap_public_nat_methods(klass, as_tuple=False): """ Get overlapping public methods between NaT and another class. Parameters ---------- klass : type The class to compare with NaT as_tuple : bool, default False Whether to return a list of tuples of the form (klass, method). Returns ------- overlap : list """ nat_names = dir(NaT) klass_names = dir(klass) overlap = [ x for x in nat_names if x in klass_names and not x.startswith("_") and callable(getattr(klass, x)) ] # Timestamp takes precedence over Timedelta in terms of overlap. if klass is Timedelta: ts_names = dir(Timestamp) overlap = [x for x in overlap if x not in ts_names] if as_tuple: overlap = [(klass, method) for method in overlap] overlap.sort() return overlap @pytest.mark.parametrize( "klass,expected", [ ( Timestamp, [ "astimezone", "ceil", "combine", "ctime", "date", "day_name", "dst", "floor", "fromisocalendar", "fromisoformat", "fromordinal", "fromtimestamp", "isocalendar", "isoformat", "isoweekday", "month_name", "now", "replace", "round", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "to_datetime64", "to_numpy", "to_pydatetime", "today", "toordinal", "tz_convert", "tz_localize", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "weekday", ], ), (Timedelta, ["total_seconds"]), ], ) def test_overlap_public_nat_methods(klass, expected): # see gh-17327 # # NaT should have *most* of the Timestamp and Timedelta methods. # In case when Timestamp, Timedelta, and NaT are overlap, the overlap # is considered to be with Timestamp and NaT, not Timedelta. # "fromisocalendar" was introduced in 3.8 if klass is Timestamp and not compat.PY38: expected.remove("fromisocalendar") assert _get_overlap_public_nat_methods(klass) == expected @pytest.mark.parametrize( "compare", ( _get_overlap_public_nat_methods(Timestamp, True) + _get_overlap_public_nat_methods(Timedelta, True) ), ) def test_nat_doc_strings(compare): # see gh-17327 # # The docstrings for overlapping methods should match. klass, method = compare klass_doc = getattr(klass, method).__doc__ nat_doc = getattr(NaT, method).__doc__ assert klass_doc == nat_doc _ops = { "left_plus_right": lambda a, b: a + b, "right_plus_left": lambda a, b: b + a, "left_minus_right": lambda a, b: a - b, "right_minus_left": lambda a, b: b - a, "left_times_right": lambda a, b: a * b, "right_times_left": lambda a, b: b * a, "left_div_right": lambda a, b: a / b, "right_div_left": lambda a, b: b / a, } @pytest.mark.parametrize("op_name", list(_ops.keys())) @pytest.mark.parametrize( "value,val_type", [ (2, "scalar"), (1.5, "floating"), (np.nan, "floating"), ("foo", "str"), (timedelta(3600), "timedelta"), (Timedelta("5s"), "timedelta"), (datetime(2014, 1, 1), "timestamp"), (Timestamp("2014-01-01"), "timestamp"), (Timestamp("2014-01-01", tz="UTC"), "timestamp"), (Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"), (pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"), ], ) def test_nat_arithmetic_scalar(op_name, value, val_type): # see gh-6873 invalid_ops = { "scalar": {"right_div_left"}, "floating": { "right_div_left", "left_minus_right", "right_minus_left", "left_plus_right", "right_plus_left", }, "str": set(_ops.keys()), "timedelta": {"left_times_right", "right_times_left"}, "timestamp": { "left_times_right", "right_times_left", "left_div_right", "right_div_left", }, } op = _ops[op_name] if op_name in invalid_ops.get(val_type, set()): if ( val_type == "timedelta" and "times" in op_name and isinstance(value, Timedelta) ): typs = "(Timedelta|NaTType)" msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'" elif val_type == "str": # un-specific check here because the message comes from str # and varies by method msg = "|".join( [ "can only concatenate str", "unsupported operand type", "can't multiply sequence", "Can't convert 'NaTType'", "must be str, not NaTType", ] ) else: msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): op(NaT, value) else: if val_type == "timedelta" and "div" in op_name: expected = np.nan else: expected = NaT assert op(NaT, value) is expected @pytest.mark.parametrize( "val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)] ) def test_nat_rfloordiv_timedelta(val, expected): # see gh-#18846 # # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv td = Timedelta(hours=3, minutes=4) assert td // val is expected @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) @pytest.mark.parametrize( "value", [ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"), DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]), DatetimeArray._from_sequence( ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific") ), TimedeltaIndex(["1 day", "2 day"], name="x"), ], ) def test_nat_arithmetic_index(op_name, value): # see gh-11718 exp_name = "x" exp_data = [NaT] * 2 if is_datetime64_any_dtype(value.dtype) and "plus" in op_name: expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name) else: expected = TimedeltaIndex(exp_data, name=exp_name) if not isinstance(value, Index): expected = expected.array op = _ops[op_name] result = op(NaT, value) tm.assert_equal(result, expected) @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) @pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence]) def test_nat_arithmetic_td64_vector(op_name, box): # see gh-19124 vec = box(["1 day", "2 day"], dtype="timedelta64[ns]") box_nat = box([NaT, NaT], dtype="timedelta64[ns]") tm.assert_equal(_ops[op_name](vec, NaT), box_nat) @pytest.mark.parametrize( "dtype,op,out_dtype", [ ("datetime64[ns]", operator.add, "datetime64[ns]"), ("datetime64[ns]", roperator.radd, "datetime64[ns]"), ("datetime64[ns]", operator.sub, "timedelta64[ns]"), ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"), ("timedelta64[ns]", operator.add, "datetime64[ns]"), ("timedelta64[ns]", roperator.radd, "datetime64[ns]"), ("timedelta64[ns]", operator.sub, "datetime64[ns]"), ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"), ], ) def test_nat_arithmetic_ndarray(dtype, op, out_dtype): other = np.arange(10).astype(dtype) result = op(NaT, other) expected = np.empty(other.shape, dtype=out_dtype) expected.fill("NaT") tm.assert_numpy_array_equal(result, expected) def test_nat_pinned_docstrings(): # see gh-17327 assert NaT.ctime.__doc__ == datetime.ctime.__doc__ def test_to_numpy_alias(): # GH 24653: alias .to_numpy() for scalars expected = NaT.to_datetime64() result = NaT.to_numpy() assert isna(expected) and isna(result) @pytest.mark.parametrize( "other", [ Timedelta(0), Timedelta(0).to_pytimedelta(), pytest.param( Timedelta(0).to_timedelta64(), marks=pytest.mark.xfail( reason="td64 doesnt return NotImplemented, see numpy#17017" ), ), Timestamp(0), Timestamp(0).to_pydatetime(), pytest.param( Timestamp(0).to_datetime64(), marks=pytest.mark.xfail( reason="dt64 doesnt return NotImplemented, see numpy#17017" ), ), Timestamp(0).tz_localize("UTC"), NaT, ], ) def test_nat_comparisons(compare_operators_no_eq_ne, other): # GH 26039 opname = compare_operators_no_eq_ne assert getattr(NaT, opname)(other) is False op = getattr(operator, opname.strip("_")) assert op(NaT, other) is False assert op(other, NaT) is False @pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")]) def test_nat_comparisons_numpy(other): # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons # pass, this test can be removed assert not NaT == other assert NaT != other assert not NaT < other assert not NaT > other assert not NaT <= other assert not NaT >= other @pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")]) @pytest.mark.parametrize( "symbol_and_op", [("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)], ) def test_nat_comparisons_invalid(other_and_type, symbol_and_op): # GH#35585 other, other_type = other_and_type symbol, op = symbol_and_op assert not NaT == other assert not other == NaT assert NaT != other assert other != NaT msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'" with pytest.raises(TypeError, match=msg): op(NaT, other) msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'" with pytest.raises(TypeError, match=msg): op(other, NaT) def test_compare_date(): # GH#39151 comparing NaT with date object is deprecated # See also: tests.scalar.timestamps.test_comparisons::test_compare_date dt = Timestamp.now().to_pydatetime().date() for left, right in [(NaT, dt), (dt, NaT)]: assert not left == right assert left != right with tm.assert_produces_warning(FutureWarning): assert not left < right with tm.assert_produces_warning(FutureWarning): assert not left <= right with tm.assert_produces_warning(FutureWarning): assert not left > right with tm.assert_produces_warning(FutureWarning): assert not left >= right # Once the deprecation is enforced, the following assertions # can be enabled: # assert not left == right # assert left != right # # with pytest.raises(TypeError): # left < right # with pytest.raises(TypeError): # left <= right # with pytest.raises(TypeError): # left > right # with pytest.raises(TypeError): # left >= right @pytest.mark.parametrize( "obj", [ offsets.YearEnd(2), offsets.YearBegin(2), offsets.MonthBegin(1), offsets.MonthEnd(2), offsets.MonthEnd(12), offsets.Day(2), offsets.Day(5), offsets.Hour(24), offsets.Hour(3), offsets.Minute(), np.timedelta64(3, "h"), np.timedelta64(4, "h"), np.timedelta64(3200, "s"), np.timedelta64(3600, "s"), np.timedelta64(3600 * 24, "s"), np.timedelta64(2, "D"), np.timedelta64(365, "D"), timedelta(-2), timedelta(365), timedelta(minutes=120), timedelta(days=4, minutes=180), timedelta(hours=23), timedelta(hours=23, minutes=30), timedelta(hours=48), ], ) def test_nat_addsub_tdlike_scalar(obj): assert NaT + obj is NaT assert obj + NaT is NaT assert NaT - obj is NaT def test_pickle(): # GH#4606 p = tm.round_trip_pickle(NaT) assert p is NaT
gfyoung/pandas
pandas/tests/scalar/test_nat.py
pandas/core/arrays/__init__.py
import atexit import os import re import subprocess import threading from contextlib import contextmanager from functools import partial import diaper from cached_property import cached_property from werkzeug.local import LocalProxy # import diaper for backward compatibility on_rtd = os.environ.get('READTHEDOCS') == 'True' class TriesExceeded(Exception): """Default exception raised when tries() method doesn't catch a func exception""" pass class FakeObject: def __init__(self, **kwargs): self.__dict__ = kwargs def fakeobject_or_object(obj, attr, default=None): if isinstance(obj, str): return FakeObject(**{attr: obj}) elif not obj: return FakeObject(**{attr: default}) else: return obj def clear_property_cache(obj, *names): """ clear a cached property regardess of if it was cached priority """ if isinstance(obj, LocalProxy): obj = obj._get_current_object() for name in names: assert isinstance(getattr(type(obj), name), cached_property) obj.__dict__.pop(name, None) class _classproperty(property): """Subclass property to make classmethod properties possible""" def __get__(self, cls, owner): return self.fget.__get__(None, owner)() def classproperty(f): """Enables properties for whole classes: Usage: >>> class Foo(object): ... @classproperty ... def bar(cls): ... return "bar" ... >>> print(Foo.bar) baz """ return _classproperty(classmethod(f)) def at_exit(f, *args, **kwargs): """Diaper-protected atexit handler registering. Same syntax as atexit.register()""" return atexit.register(lambda: diaper(f, *args, **kwargs)) def _prenormalize_text(text): """Makes the text lowercase and removes all characters that are not digits, alphas, or spaces""" # _'s represent spaces so convert those to spaces too return re.sub(r"[^a-z0-9 ]", "", text.strip().lower().replace('_', ' ')) def _replace_spaces_with(text, delim): """Contracts spaces into one character and replaces it with a custom character.""" return re.sub(r"\s+", delim, text) def normalize_text(text): """Converts a string to a lowercase string containing only letters, digits and spaces. The space is always one character long if it is present. """ return _replace_spaces_with(_prenormalize_text(text), ' ') def attributize_string(text): """Converts a string to a lowercase string containing only letters, digits and underscores. Usable for eg. generating object key names. The underscore is always one character long if it is present. """ return _replace_spaces_with(_prenormalize_text(text), '_') def normalize_space(text): """Works in accordance with the XPath's normalize-space() operator. `Description <https://developer.mozilla.org/en-US/docs/Web/XPath/Functions/normalize-space>`_: *The normalize-space function strips leading and trailing white-space from a string, replaces sequences of whitespace characters by a single space, and returns the resulting string.* """ return _replace_spaces_with(text.strip(), ' ') def tries(num_tries, exceptions, f, *args, **kwargs): """ Tries to call the function multiple times if specific exceptions occur. Args: num_tries: How many times to try if exception is raised exceptions: Tuple (or just single one) of exceptions that should be treated as repeat. f: Callable to be called. *args: Arguments to be passed through to the callable **kwargs: Keyword arguments to be passed through to the callable Returns: What ``f`` returns. Raises: What ``f`` raises if the try count is exceeded. """ caught_exception = TriesExceeded('Tries were exhausted without a func exception') tries = 0 while tries < num_tries: tries += 1 try: return f(*args, **kwargs) except exceptions as e: caught_exception = e pass else: raise caught_exception # There are some environment variables that get smuggled in anyway. # If there is yet another one that will be possibly smuggled in, update this entry. READ_ENV_UNWANTED = {'SHLVL', '_', 'PWD'} def read_env(file): """Given a :py:class:`py.path.Local` file name, return a dict of exported shell vars and their values. Args: file: A :py:class:`py.path.Local` instance. Note: This will only include shell variables that are exported from the file being parsed Returns: A :py:class:`dict` of key/value pairs. If the file does not exist or bash could not parse the file, this dict will be empty. """ env_vars = {} if file.check(): # parse the file with bash, since it's pretty good at it, and dump the env # Use env -i to clean up the env (except the very few variables provider by bash itself) command = ['env', '-i', 'bash', '-c', f'source {file.strpath} && env'] proc = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1) # filter out the remaining unwanted things for line in iter(proc.stdout.readline, b''): try: key, value = line.split("=", 1) except ValueError: continue if key not in READ_ENV_UNWANTED: try: value = int(value.strip()) except (ValueError, TypeError): value = value.strip() env_vars[key] = value stdout, stderr = proc.communicate() return env_vars def safe_string(o): """This will make string out of ANYTHING without having to worry about the stupid Unicode errors This function tries to make str/unicode out of ``o`` unless it already is one of those and then it processes it so in the end there is a harmless ascii string. Args: o: Anything. """ if not isinstance(o, str): o = str(o) if isinstance(o, bytes): o = o.decode('utf-8', "ignore") if not isinstance(o, str): o = o.encode("ascii", "xmlcharrefreplace") else: o = o.encode("ascii", "xmlcharrefreplace").decode('ascii') return o def process_pytest_path(path): # Processes the path elements with regards to [] path = path.lstrip("/") if len(path) == 0: return [] try: seg_end = path.index("/") except ValueError: seg_end = None try: param_start = path.index("[") except ValueError: param_start = None try: param_end = path.index("]") except ValueError: param_end = None if seg_end is None: # Definitely a final segment return [path] else: if (param_start is not None and param_end is not None and seg_end > param_start and seg_end < param_end): # The / inside [] segment = path[:param_end + 1] rest = path[param_end + 1:] return [segment] + process_pytest_path(rest) else: # The / that is not inside [] segment = path[:seg_end] rest = path[seg_end + 1:] return [segment] + process_pytest_path(rest) def process_shell_output(value): """This function allows you to unify the behaviour when you putput some values to stdout. You can check the code of the function how exactly does it behave for the particular types of variables. If no output is expected, it returns None. Args: value: Value to be outputted. Returns: A tuple consisting of returncode and the output to be printed. """ result_lines = [] exit = 0 if isinstance(value, (list, tuple, set)): for entry in sorted(value): result_lines.append(entry) elif isinstance(value, dict): for key, value in value.items(): result_lines.append(f'{key}={value}') elif isinstance(value, str): result_lines.append(value) elif isinstance(value, bool): # 'True' result becomes flipped exit 0, and vice versa for False exit = int(not value) else: # Unknown type, print it result_lines.append(str(value)) return exit, '\n'.join(result_lines) if result_lines else None def iterate_pairs(iterable): """Iterates over iterable, always taking two items at time. Eg. ``[1, 2, 3, 4, 5, 6]`` will yield ``(1, 2)``, then ``(3, 4)`` ... Must have even number of items. Args: iterable: An iterable with even number of items to be iterated over. """ if len(iterable) % 2 != 0: raise ValueError('Iterable must have even number of items.') it = iter(iterable) for i in it: yield i, next(it) def icastmap(t, i, *args, **kwargs): """Works like the map() but is made specially to map classes on iterables. A generator version. This function only applies the ``t`` to the item of ``i`` if it is not of that type. Args: t: The class that you want all the yielded items to be type of. i: Iterable with items to be cast. Returns: A generator. """ for item in i: if isinstance(item, t): yield item else: yield t(item, *args, **kwargs) def castmap(t, i, *args, **kwargs): """Works like the map() but is made specially to map classes on iterables. This function only applies the ``t`` to the item of ``i`` if it is not of that type. Args: t: The class that you want all theitems in the list to be type of. i: Iterable with items to be cast. Returns: A list. """ return list(icastmap(t, i, *args, **kwargs)) class InstanceClassMethod: """ Decorator-descriptor that enables you to use any method both as class and instance one Usage: .. code-block:: python class SomeClass(object): @InstanceClassMethod def a_method(self): the_instance_variant() @a_method.classmethod def a_method(cls): the_class_variant() i = SomeClass() i.a_method() SomeClass.a_method() # Both are possible If you don't pass ``classmethod`` the "instance" method, the one that was passed first will be called for both kinds of invocation. """ def __init__(self, instance_or_class_method): self.instance_or_class_method = instance_or_class_method self.class_method = None def classmethod(self, class_method): self.class_method = class_method return self def __get__(self, o, t): if o is None: # classmethod return partial(self.class_method or self.instance_or_class_method, t) else: # instancemethod return partial(self.instance_or_class_method, o) class ParamClassName: """ ParamClassName is a Descriptor to help when using classes and instances as parameters Note: This descriptor is a hack until collections are implemented everywhere Usage: .. code-block:: python class Provider(object): _param_name = ParamClassName('name') def __init__(self, name): self.name = name When accessing the ``_param_name`` on the class object it will return the ``__name__`` of the class by default. When accessing the ``_param_name`` on an instance of the class, it will return the attribute that is passed in. """ def __init__(self, instance_attr, class_attr='__name__'): self.instance_attr = instance_attr self.class_attr = class_attr def __get__(self, instance, owner): if instance: return getattr(instance, self.instance_attr) else: return getattr(owner, self.class_attr) @contextmanager def periodic_call(period_seconds, call, args=None, kwargs=None): timer = None args = args or [] kwargs = kwargs or {} def timer_event(): call(*args, **kwargs) reschedule() def reschedule(): nonlocal timer timer = threading.Timer(period_seconds, timer_event) timer.start() reschedule() try: yield finally: timer.cancel()
""" This test generate one default report for each category under reports accordion """ import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE # from selenium.common.exceptions import NoSuchElementException # from utils.log import logger pytestmark = [ pytest.mark.tier(3), test_requirements.report, pytest.mark.usefixtures('setup_provider_modscope'), pytest.mark.provider([InfraProvider], scope='module', selector=ONE), ] report_path = [ ["Configuration Management", "Virtual Machines", "Guest OS Information - any OS"], ["Migration Readiness", "Virtual Machines", "Summary - VMs migration ready"], ["Operations", "Virtual Machines", "VMs not Powered On"], ["VM Sprawl", "Candidates", "Summary of VM Create and Deletes"], ["Relationships", "Virtual Machines, Folders, Clusters", "VM Relationships"], ["Events", "Operations", "Events for VM prod_webserver"], ["Performance by Asset Type", "Virtual Machines", "Top CPU Consumers (weekly)"], ["Running Processes", "Virtual Machines", "Processes for prod VMs sort by CPU Time"], ["Trending", "Clusters", "Cluster CPU Trends (last week)"], ["Tenants", "Tenant Quotas", "Tenant Quotas"], ["Provisioning", "Activity Reports", "Provisioning Activity - by VM"], ] @pytest.mark.rhel_testing @pytest.mark.parametrize('path', report_path, scope="module", ids=lambda param: '/'.join(param[:2])) def test_reports_generate_report(request, path, appliance): """ This Tests run one default report for each category Steps: *Run one default report *Delete this Saved Report from the Database Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/16h """ report = appliance.collections.reports.instantiate( type=path[0], subtype=path[1], menu_name=path[2] ).queue(wait_for_finish=True) request.addfinalizer(report.delete_if_exists) assert report.exists
ManageIQ/integration_tests
cfme/tests/intelligence/reports/test_generate_report.py
cfme/utils/__init__.py
# Common stuff for custom button testing from widgetastic.widget import ParametrizedLocator from widgetastic.widget import ParametrizedView from widgetastic.widget import Text from widgetastic.widget import TextInput from widgetastic.widget import View from widgetastic_patternfly import BootstrapSelect from widgetastic_patternfly import Button from widgetastic_patternfly import Dropdown OBJ_TYPE = [ "AZONE", "CLOUD_NETWORK", "CLOUD_OBJECT_STORE_CONTAINER", "CLOUD_SUBNET", "CLOUD_TENANT", "CLOUD_VOLUME", "CLUSTERS", "CONTAINER_IMAGES", "CONTAINER_NODES", "CONTAINER_PODS", "CONTAINER_PROJECTS", "CONTAINER_TEMPLATES", "CONTAINER_VOLUMES", "DATASTORES", "GROUP", "USER", "GENERIC", "HOSTS", "LOAD_BALANCER", "ROUTER", "ORCHESTRATION_STACK", "PROVIDER", "SECURITY_GROUP", "SERVICE", "SWITCH", "TENANT", "TEMPLATE_IMAGE", "VM_INSTANCE", ] CLASS_MAP = { "AZONE": {"ui": "Availability Zone", "rest": "AvailabilityZone"}, "CLOUD_NETWORK": {"ui": "Cloud Network", "rest": "CloudNetwork"}, "CLOUD_OBJECT_STORE_CONTAINER": { "ui": "Cloud Object Store Container", "rest": "CloudObjectStoreContainer", }, "CLOUD_SUBNET": {"ui": "Cloud Subnet", "rest": "CloudSubnet"}, "CLOUD_TENANT": {"ui": "Cloud Tenant", "rest": "CloudTenant"}, "CLOUD_VOLUME": {"ui": "Cloud Volume", "rest": "CloudVolume"}, "CLUSTERS": {"ui": "Cluster / Deployment Role", "rest": "EmsCluster"}, "CONTAINER_IMAGES": {"ui": "Container Image", "rest": "ContainerImage"}, "CONTAINER_NODES": {"ui": "Container Node", "rest": "ContainerNode"}, "CONTAINER_PODS": {"ui": "Container Pod", "rest": "ContainerGroup"}, "CONTAINER_PROJECTS": {"ui": "Container Project", "rest": "ContainerProject"}, "CONTAINER_TEMPLATES": {"ui": "Container Template", "rest": "ContainerTemplate"}, "CONTAINER_VOLUMES": {"ui": "Container Volume", "rest": "ContainerVolume"}, "DATASTORES": {"ui": "Datastore", "rest": "Storage"}, "GROUP": {"ui": "Group", "rest": "MiqGroup"}, "USER": {"ui": "User", "rest": "User"}, "GENERIC": {"ui": "Generic Object", "rest": "GenericObject"}, "HOSTS": {"ui": "Host / Node", "rest": "Host"}, "LOAD_BALANCER": {"ui": "Load Balancer", "rest": "LoadBalancer"}, "ROUTER": {"ui": "Network Router", "rest": "NetworkRouter"}, "ORCHESTRATION_STACK": {"ui": "Orchestration Stack", "rest": "OrchestrationStack"}, "PROVIDER": {"ui": "Provider", "rest": "ExtManagementSystem"}, "SECURITY_GROUP": {"ui": "Security Group", "rest": "SecurityGroup"}, "SERVICE": {"ui": "Service", "rest": "Service"}, "SWITCH": {"ui": "Virtual Infra Switch", "rest": "Switch"}, "TENANT": {"ui": "Tenant", "rest": "Tenant"}, "TEMPLATE_IMAGE": {"ui": "VM Template and Image", "rest": "MiqTemplate"}, "VM_INSTANCE": {"ui": "VM and Instance", "rest": "Vm"}, } def check_log_requests_count(appliance, parse_str=None): """ Method for checking number of requests count in automation log Args: appliance: an appliance for ssh parse_str: string check-in automation log Return: requests string count """ if not parse_str: parse_str = "Attributes - Begin" count = appliance.ssh_client.run_command( f"grep -c -w '{parse_str}' /var/www/miq/vmdb/log/automation.log" ) return int(count.output) def log_request_check(appliance, expected_count): """ Method for checking expected request count in automation log Args: appliance: an appliance for ssh expected_count: expected request count in automation log """ return check_log_requests_count(appliance=appliance) == expected_count class TextInputDialogView(View): """ This is view comes on different custom button objects for dialog execution""" title = Text("#explorer_title_text") service_name = TextInput(id="service_name") submit = Button("Submit") cancel = Button("Cancel") @property def is_displayed(self): # This is only for wait for view return self.submit.is_displayed and self.service_name.is_displayed class TextInputAutomateView(View): """This is view comes on clicking custom button""" title = Text("#explorer_title_text") text_box1 = TextInput(id="text_box_1") text_box2 = TextInput(id="text_box_2") submit = Button("Submit") cancel = Button("Cancel") @property def is_displayed(self): return (self.submit.is_displayed and self.text_box1.is_displayed and self.text_box2.is_displayed) class CredsHostsDialogView(View): """This view for custom button default ansible playbook dialog""" machine_credential = BootstrapSelect(locator=".//select[@id='credential']//parent::div") hosts = TextInput(id="hosts") submit = Button("Submit") cancel = Button("Cancel") @property def is_displayed(self): return self.submit.is_displayed and self.machine_credential.is_displayed class TextInputDialogSSUIView(TextInputDialogView): """ This is view comes on SSUI custom button dialog execution""" submit = Button("Submit Request") class DropdownDialogView(ParametrizedView): """ This is custom view for custom button dropdown dialog execution""" title = Text("#explorer_title_text") class service_name(ParametrizedView): # noqa PARAMETERS = ("dialog_id",) dropdown = BootstrapSelect( locator=ParametrizedLocator("//select[@id={dialog_id|quote}]/..") ) submit = Button("Submit") submit_request = Button("Submit Request") cancel = Button("Cancel") class CustomButtonSSUIDropdwon(Dropdown): """This is workaround for custom button Dropdown in SSUI item_enabled method""" def item_enabled(self, item): self._verify_enabled() el = self.item_element(item) return "disabled" not in self.browser.classes(el)
""" This test generate one default report for each category under reports accordion """ import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE # from selenium.common.exceptions import NoSuchElementException # from utils.log import logger pytestmark = [ pytest.mark.tier(3), test_requirements.report, pytest.mark.usefixtures('setup_provider_modscope'), pytest.mark.provider([InfraProvider], scope='module', selector=ONE), ] report_path = [ ["Configuration Management", "Virtual Machines", "Guest OS Information - any OS"], ["Migration Readiness", "Virtual Machines", "Summary - VMs migration ready"], ["Operations", "Virtual Machines", "VMs not Powered On"], ["VM Sprawl", "Candidates", "Summary of VM Create and Deletes"], ["Relationships", "Virtual Machines, Folders, Clusters", "VM Relationships"], ["Events", "Operations", "Events for VM prod_webserver"], ["Performance by Asset Type", "Virtual Machines", "Top CPU Consumers (weekly)"], ["Running Processes", "Virtual Machines", "Processes for prod VMs sort by CPU Time"], ["Trending", "Clusters", "Cluster CPU Trends (last week)"], ["Tenants", "Tenant Quotas", "Tenant Quotas"], ["Provisioning", "Activity Reports", "Provisioning Activity - by VM"], ] @pytest.mark.rhel_testing @pytest.mark.parametrize('path', report_path, scope="module", ids=lambda param: '/'.join(param[:2])) def test_reports_generate_report(request, path, appliance): """ This Tests run one default report for each category Steps: *Run one default report *Delete this Saved Report from the Database Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/16h """ report = appliance.collections.reports.instantiate( type=path[0], subtype=path[1], menu_name=path[2] ).queue(wait_for_finish=True) request.addfinalizer(report.delete_if_exists) assert report.exists
ManageIQ/integration_tests
cfme/tests/intelligence/reports/test_generate_report.py
cfme/tests/automate/custom_button/__init__.py
"""A model of an Infrastructure PhysicalServer in CFME.""" import attr from cached_property import cached_property from navmazing import NavigateToAttribute from navmazing import NavigateToSibling from varmeth import variable from wrapanapi.systems import LenovoSystem from cfme.common import PolicyProfileAssignable from cfme.common import Taggable from cfme.common.physical_server_views import PhysicalServerDetailsView from cfme.common.physical_server_views import PhysicalServerEditTagsView from cfme.common.physical_server_views import PhysicalServerManagePoliciesView from cfme.common.physical_server_views import PhysicalServerNetworkDevicesView from cfme.common.physical_server_views import PhysicalServerProvisionView from cfme.common.physical_server_views import PhysicalServerStorageDevicesView from cfme.common.physical_server_views import PhysicalServersView from cfme.common.physical_server_views import PhysicalServerTimelinesView from cfme.exceptions import HostStatsNotContains from cfme.exceptions import ItemNotFound from cfme.exceptions import ProviderHasNoProperty from cfme.exceptions import StatsDoNotMatch from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator from cfme.utils.log import logger from cfme.utils.pretty import Pretty from cfme.utils.providers import get_crud_by_name from cfme.utils.update import Updateable from cfme.utils.wait import wait_for @attr.s class PhysicalServer(BaseEntity, Updateable, Pretty, PolicyProfileAssignable, Taggable): """Model of an Physical Server in cfme. Args: name: Name of the physical server. hostname: hostname of the physical server. ip_address: The IP address as a string. custom_ident: The custom identifiter. Usage: myhost = PhysicalServer(name='vmware') myhost.create() """ pretty_attrs = ['name', 'hostname', 'ip_address', 'custom_ident'] name = attr.ib() ems_ref = attr.ib(default=None) provider = attr.ib(default=None) hostname = attr.ib(default=None) ip_address = attr.ib(default=None) custom_ident = attr.ib(default=None) db_id = None mgmt_class = LenovoSystem INVENTORY_TO_MATCH = ['power_state'] STATS_TO_MATCH = ['cores_capacity', 'memory_capacity', 'num_network_devices', 'num_storage_devices'] def load_details(self, refresh=False): """To be compatible with the Taggable and PolicyProfileAssignable mixins. Args: refresh (bool): Whether to perform the page refresh, defaults to False """ view = navigate_to(self, "Details") if refresh: view.browser.refresh() view.flush_widget_cache() def _execute_button(self, button, option, handle_alert=False): view = navigate_to(self, "Details") view.toolbar.custom_button(button).item_select(option, handle_alert=handle_alert) return view def _execute_action_button(self, button, option, handle_alert=True, **kwargs): target = kwargs.get("target", None) provider = kwargs.get("provider", None) desired_state = kwargs.get("desired_state", None) view = self._execute_button(button, option, handle_alert=handle_alert) if desired_state: self._wait_for_state_change(desired_state, target, provider, view) elif handle_alert: wait_for( lambda: view.flash.is_displayed, message="Wait for the handle alert to appear...", num_sec=5, delay=2 ) def power_on(self, **kwargs): self._execute_action_button("Power", "Power On", **kwargs) def power_off(self, **kwargs): self._execute_action_button("Power", "Power Off", **kwargs) def power_off_immediately(self, **kwargs): self._execute_action_button("Power", "Power Off Immediately", **kwargs) def restart(self, **kwargs): self._execute_action_button("Power", "Restart", **kwargs) def restart_immediately(self, **kwargs): self._execute_action_button("Power", "Restart Immediately", **kwargs) def refresh(self, provider, handle_alert=False): last_refresh = provider.last_refresh_date() self._execute_button("Configuration", "Refresh Relationships and Power States", handle_alert) wait_for( lambda: last_refresh != provider.last_refresh_date(), message="Wait for the server to be refreshed...", num_sec=300, delay=5 ) def turn_on_led(self, **kwargs): self._execute_action_button('Identify', 'Turn On LED', **kwargs) def turn_off_led(self, **kwargs): self._execute_action_button('Identify', 'Turn Off LED', **kwargs) def turn_blink_led(self, **kwargs): self._execute_action_button('Identify', 'Blink LED', **kwargs) @variable(alias='ui') def power_state(self): view = navigate_to(self, "Details") return view.entities.power_management.get_text_of("Power State") @variable(alias='ui') def cores_capacity(self): view = navigate_to(self, "Details") return view.entities.properties.get_text_of("CPU total cores") @variable(alias='ui') def memory_capacity(self): view = navigate_to(self, "Details") return view.entities.properties.get_text_of("Total memory (mb)") @variable(alias='ui') def num_network_devices(self): view = navigate_to(self, "Details") return view.entities.properties.get_text_of("Network Devices") @variable(alias='ui') def num_storage_devices(self): view = navigate_to(self, "Details") return view.entities.properties.get_text_of("Storage Devices") def _wait_for_state_change(self, desired_state, target, provider, view, timeout=300, delay=10): """Wait for PhysicalServer to come to desired state. This function waits just the needed amount of time thanks to wait_for. Args: desired_state (str): 'on' or 'off' target (str): The name of the method that most be used to compare with the desired_state view (object): The view that most be refreshed to verify if the value was changed provider (object): 'LenovoProvider' timeout (int): Specify amount of time (in seconds) to wait until TimedOutError is raised delay (int): Specify amount of time (in seconds) to repeat each time. """ def _is_state_changed(): self.refresh(provider, handle_alert=True) return desired_state == getattr(self, target)() wait_for(_is_state_changed, fail_func=view.browser.refresh, num_sec=timeout, delay=delay) @property def exists(self): """Checks if the physical_server exists in the UI. Returns: :py:class:`bool` """ view = navigate_to(self.parent, "All") try: view.entities.get_entity(name=self.name, surf_pages=True) except ItemNotFound: return False else: return True @cached_property def get_db_id(self): if self.db_id is None: self.db_id = self.appliance.physical_server_id(self.name) return self.db_id else: return self.db_id def wait_to_appear(self): """Waits for the server to appear in the UI.""" view = navigate_to(self.parent, "All") logger.info("Waiting for the server to appear...") wait_for( lambda: self.exists, message="Wait for the server to appear", num_sec=1000, fail_func=view.browser.refresh ) def wait_for_delete(self): """Waits for the server to remove from the UI.""" view = navigate_to(self.parent, "All") logger.info("Waiting for the server to delete...") wait_for( lambda: not self.exists, message="Wait for the server to disappear", num_sec=500, fail_func=view.browser.refresh ) def validate_stats(self, ui=False): """ Validates that the detail page matches the physical server's information. This method logs into the provider using the mgmt_system interface and collects a set of statistics to be matched against the UI. An exception will be raised if the stats retrieved from the UI do not match those retrieved from wrapanapi. """ # Make sure we are on the physical server detail page if ui: self.load_details() # Retrieve the client and the stats and inventory to match client = self.provider.mgmt stats_to_match = self.STATS_TO_MATCH inventory_to_match = self.INVENTORY_TO_MATCH # Retrieve the stats and inventory from wrapanapi server_stats = client.server_stats(self, stats_to_match) server_inventory = client.server_inventory(self, inventory_to_match) # Refresh the browser if ui: self.browser.selenium.refresh() # Verify that the stats retrieved from wrapanapi match those retrieved # from the UI for stat in stats_to_match: try: cfme_stat = int(getattr(self, stat)(method='ui' if ui else None)) server_stat = int(server_stats[stat]) if server_stat != cfme_stat: msg = "The {} stat does not match. (server: {}, server stat: {}, cfme stat: {})" raise StatsDoNotMatch(msg.format(stat, self.name, server_stat, cfme_stat)) except KeyError: raise HostStatsNotContains( f"Server stats information does not contain '{stat}'") except AttributeError: raise ProviderHasNoProperty(f"Provider does not know how to get '{stat}'") # Verify that the inventory retrieved from wrapanapi match those retrieved # from the UI for inventory in inventory_to_match: try: cfme_inventory = getattr(self, inventory)(method='ui' if ui else None) server_inventory = server_inventory[inventory] if server_inventory != cfme_inventory: msg = "The {} inventory does not match. (server: {}, server inventory: {}, " \ "cfme inventory: {})" raise StatsDoNotMatch(msg.format(inventory, self.name, server_inventory, cfme_inventory)) except KeyError: raise HostStatsNotContains( f"Server inventory information does not contain '{inventory}'") except AttributeError: msg = "Provider does not know how to get '{}'" raise ProviderHasNoProperty(msg.format(inventory)) @attr.s class PhysicalServerCollection(BaseCollection): """Collection object for the :py:class:`cfme.infrastructure.host.PhysicalServer`.""" ENTITY = PhysicalServer def select_entity_rows(self, physical_servers): """ Select all physical server objects """ physical_servers = list(physical_servers) checked_physical_servers = list() view = navigate_to(self, 'All') for physical_server in physical_servers: view.entities.get_entity(name=physical_server.name, surf_pages=True).ensure_checked() checked_physical_servers.append(physical_server) return view def all(self, provider): """returning all physical_servers objects""" physical_server_table = self.appliance.db.client['physical_servers'] ems_table = self.appliance.db.client['ext_management_systems'] physical_server_query = ( self.appliance.db.client.session .query(physical_server_table.name, physical_server_table.ems_ref, ems_table.name) .join(ems_table, physical_server_table.ems_id == ems_table.id)) provider = None if self.filters.get('provider'): provider = self.filters.get('provider') physical_server_query = physical_server_query.filter(ems_table.name == provider.name) physical_servers = [] for name, ems_ref, ems_name in physical_server_query.all(): physical_servers.append(self.instantiate(name=name, ems_ref=ems_ref, provider=provider or get_crud_by_name(ems_name))) return physical_servers def find_by(self, provider, ph_name): """returning all physical_servers objects""" physical_server_table = self.appliance.db.client['physical_servers'] ems_table = self.appliance.db.client['ext_management_systems'] physical_server_query = ( self.appliance.db.client.session .query(physical_server_table.name, ems_table.name) .join(ems_table, physical_server_table.ems_id == ems_table.id)) provider = None if self.filters.get('provider'): provider = self.filters.get('provider') physical_server_query = physical_server_query.filter(ems_table.name == provider.name) for name, ems_name in physical_server_query.all(): if ph_name == name: return self.instantiate(name=name, provider=provider or get_crud_by_name(ems_name)) def power_on(self, *physical_servers): view = self.select_entity_rows(physical_servers) view.toolbar.power.item_select("Power On", handle_alert=True) def power_off(self, *physical_servers): view = self.select_entity_rows(physical_servers) view.toolbar.power.item_select("Power Off", handle_alert=True) def custom_button_action(self, button, option, physical_servers, handle_alert=True): view = self.select_entity_rows(physical_servers) view.toolbar.custom_button(button).item_select(option, handle_alert=handle_alert) @navigator.register(PhysicalServerCollection) class All(CFMENavigateStep): VIEW = PhysicalServersView prerequisite = NavigateToAttribute("appliance.server", "LoggedIn") def step(self, *args, **kwargs): self.prerequisite_view.navigation.select("Compute", "Physical Infrastructure", "Servers") @navigator.register(PhysicalServerCollection) class ManagePoliciesCollection(CFMENavigateStep): VIEW = PhysicalServerManagePoliciesView prerequisite = NavigateToSibling("All") def step(self, *args, **kwargs): self.prerequisite_view.toolbar.policy.item_select("Manage Policies") @navigator.register(PhysicalServerCollection) class EditTagsCollection(CFMENavigateStep): VIEW = PhysicalServerEditTagsView prerequisite = NavigateToSibling("All") def step(self, *args, **kwargs): self.prerequisite_view.toolbar.policy.item_select("Edit Tags") @navigator.register(PhysicalServerCollection) class ProvisionCollection(CFMENavigateStep): VIEW = PhysicalServerProvisionView prerequisite = NavigateToSibling("All") def step(self, *args, **kwargs): self.prerequisite_view.toolbar.lifecycle.item_select("Provision Physical Server") @navigator.register(PhysicalServer) class Details(CFMENavigateStep): VIEW = PhysicalServerDetailsView prerequisite = NavigateToAttribute("parent", "All") def step(self, *args, **kwargs): self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click() @navigator.register(PhysicalServer) class ManagePolicies(CFMENavigateStep): VIEW = PhysicalServerManagePoliciesView prerequisite = NavigateToSibling("Details") def step(self, *args, **kwargs): self.prerequisite_view.toolbar.policy.item_select("Manage Policies") @navigator.register(PhysicalServer) class Provision(CFMENavigateStep): VIEW = PhysicalServerProvisionView prerequisite = NavigateToSibling("Details") def step(self, *args, **kwargs): self.prerequisite_view.toolbar.lifecycle.item_select("Provision Physical Server") @navigator.register(PhysicalServer) class Timelines(CFMENavigateStep): VIEW = PhysicalServerTimelinesView prerequisite = NavigateToSibling("Details") def step(self, *args, **kwargs): self.prerequisite_view.toolbar.monitoring.item_select("Timelines") @navigator.register(PhysicalServer) class NetworkDevices(CFMENavigateStep): VIEW = PhysicalServerNetworkDevicesView prerequisite = NavigateToSibling("Details") def step(self, *args, **kwargs): self.prerequisite_view.entities.properties.click_at("Network Devices") @navigator.register(PhysicalServer) class StorageDevices(CFMENavigateStep): VIEW = PhysicalServerStorageDevicesView prerequisite = NavigateToSibling("Details") def step(self, *args, **kwargs): self.prerequisite_view.entities.properties.click_at("Storage Devices")
""" This test generate one default report for each category under reports accordion """ import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE # from selenium.common.exceptions import NoSuchElementException # from utils.log import logger pytestmark = [ pytest.mark.tier(3), test_requirements.report, pytest.mark.usefixtures('setup_provider_modscope'), pytest.mark.provider([InfraProvider], scope='module', selector=ONE), ] report_path = [ ["Configuration Management", "Virtual Machines", "Guest OS Information - any OS"], ["Migration Readiness", "Virtual Machines", "Summary - VMs migration ready"], ["Operations", "Virtual Machines", "VMs not Powered On"], ["VM Sprawl", "Candidates", "Summary of VM Create and Deletes"], ["Relationships", "Virtual Machines, Folders, Clusters", "VM Relationships"], ["Events", "Operations", "Events for VM prod_webserver"], ["Performance by Asset Type", "Virtual Machines", "Top CPU Consumers (weekly)"], ["Running Processes", "Virtual Machines", "Processes for prod VMs sort by CPU Time"], ["Trending", "Clusters", "Cluster CPU Trends (last week)"], ["Tenants", "Tenant Quotas", "Tenant Quotas"], ["Provisioning", "Activity Reports", "Provisioning Activity - by VM"], ] @pytest.mark.rhel_testing @pytest.mark.parametrize('path', report_path, scope="module", ids=lambda param: '/'.join(param[:2])) def test_reports_generate_report(request, path, appliance): """ This Tests run one default report for each category Steps: *Run one default report *Delete this Saved Report from the Database Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/16h """ report = appliance.collections.reports.instantiate( type=path[0], subtype=path[1], menu_name=path[2] ).queue(wait_for_finish=True) request.addfinalizer(report.delete_if_exists) assert report.exists
ManageIQ/integration_tests
cfme/tests/intelligence/reports/test_generate_report.py
cfme/physical/physical_server.py