input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
# coding: utf-8 from __future__ import absolute_import, unicode_literals from datetime import datetime, timedelta import json import random import string import time from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey import jwt from six import binary_type, string_types, raise_from, text_type from ..config import API from ..exception import BoxOAuthException from .oauth2 import OAuth2 from ..object.user import User from ..util.compat import NoneType class JWTAuth(OAuth2): """ Responsible for handling JWT Auth for Box Developer Edition. Can authenticate enterprise instances or app users. """ _GRANT_TYPE = 'urn:ietf:params:oauth:grant-type:jwt-bearer' def __init__( self, client_id, client_secret, enterprise_id, jwt_key_id, rsa_private_key_file_sys_path=None, rsa_private_key_passphrase=None, user=None, store_tokens=None, box_device_id='0', box_device_name='', access_token=None, session=None, jwt_algorithm='RS256', rsa_private_key_data=None, **kwargs ): """Extends baseclass method. Must pass exactly one of either `rsa_private_key_file_sys_path` or `rsa_private_key_data`. If both `enterprise_id` and `user` are non-`None`, the `user` takes precedence when `refresh()` is called. This can be overruled with a call to `authenticate_instance()`. :param client_id: Box API key used for identifying the application the user is authenticating with. :type client_id: `unicode` :param client_secret: Box API secret used for making OAuth2 requests. :type client_secret: `unicode` :param enterprise_id: The ID of the Box Developer Edition enterprise. May be `None`, if the caller knows that it will not be authenticating as an enterprise instance / service account. If `user` is passed, this value is not used, unless `authenticate_instance()` is called to clear the user and authenticate as the enterprise instance. :type enterprise_id: `unicode` or `None` :param jwt_key_id: Key ID for the JWT assertion. :type jwt_key_id: `unicode` :param rsa_private_key_file_sys_path: (optional) Path to an RSA private key file, used for signing the JWT assertion. :type rsa_private_key_file_sys_path: `unicode` :param rsa_private_key_passphrase: Passphrase used to unlock the private key. Do not pass a unicode string - this must be bytes. :type rsa_private_key_passphrase: `bytes` or None :param user: (optional) The user to authenticate, expressed as a Box User ID or as a :class:`User` instance. This value is not required. But if it is provided, then the user will be auto-authenticated at the time of the first API call or when calling `authenticate_user()` without any arguments. Should be `None` if the intention is to authenticate as the enterprise instance / service account. If both `enterprise_id` and `user` are non-`None`, the `user` takes precedense when `refresh()` is called. May be one of this application's created App User. Depending on the configured User Access Level, may also be any other App User or Managed User in the enterprise. <https://developer.box.com/en/guides/applications/> <https://developer.box.com/en/guides/authentication/select/> :type user: `unicode` or :class:`User` or `None` :param store_tokens: Optional callback for getting access to tokens for storing them. :type store_tokens: `callable` :param box_device_id: Optional unique ID of this device. Used for applications that want to support device-pinning. :type box_device_id: `unicode` :param box_device_name: Optional human readable name for this device. :type box_device_name: `unicode` :param access_token: Access token to use for auth until it expires. :type access_token: `unicode` :param network_layer: If specified, use it to make network requests. If not, the default network implementation will be used. :type network_layer: :class:`Network` :param jwt_algorithm: Which algorithm to use for signing the JWT assertion. Must be one of 'RS256', 'RS384', 'RS512'. :type jwt_algorithm: `unicode` :param rsa_private_key_data: (optional) Contents of RSA private key, used for signing the JWT assertion. Do not pass a unicode string. Can pass a byte string, or a file-like object that returns bytes, or an already-loaded `RSAPrivateKey` object. :type rsa_private_key_data: `bytes` or :class:`io.IOBase` or :class:`RSAPrivateKey` """ user_id = self._normalize_user_id(user) rsa_private_key = self._normalize_rsa_private_key( file_sys_path=rsa_private_key_file_sys_path, data=rsa_private_key_data, passphrase=rsa_private_key_passphrase, ) del rsa_private_key_data del rsa_private_key_file_sys_path super(JWTAuth, self).__init__( client_id, client_secret, store_tokens=store_tokens, box_device_id=box_device_id, box_device_name=box_device_name, access_token=access_token, refresh_token=None, session=session, **kwargs ) self._rsa_private_key = rsa_private_key self._enterprise_id = enterprise_id self._jwt_algorithm = jwt_algorithm self._jwt_key_id = jwt_key_id self._user_id = user_id def _construct_and_send_jwt_auth(self, sub, sub_type, now_time=None): """ Construct the claims used for JWT auth and send a request to get a JWT. Pass an enterprise ID to get an enterprise token (which can be used to provision/deprovision users), or a user ID to get a user token. :param sub: The enterprise ID or user ID to auth. :type sub: `unicode` :param sub_type: Either 'enterprise' or 'user' :type sub_type: `unicode` :param now_time: Optional. The current UTC time is needed in order to construct the expiration time of the JWT claim. If None, `datetime.utcnow()` will be used. :type now_time: `datetime` or None :return: The access token for the enterprise or app user. :rtype: `unicode` """ system_random = random.SystemRandom() jti_length = system_random.randint(16, 128) ascii_alphabet = string.ascii_letters + string.digits ascii_len = len(ascii_alphabet) jti = ''.join(ascii_alphabet[int(system_random.random() * ascii_len)] for _ in range(jti_length)) if now_time is None: now_time = datetime.utcnow() now_plus_30 = now_time + timedelta(seconds=30) assertion = jwt.encode( { 'iss': self._client_id, 'sub': sub, 'box_sub_type': sub_type, 'aud': 'https://api.box.com/oauth2/token', 'jti': jti, 'exp': int((now_plus_30 - datetime(1970, 1, 1)).total_seconds()), }, self._rsa_private_key, algorithm=self._jwt_algorithm, headers={ 'kid': self._jwt_key_id, }, ) data = { 'grant_type': self._GRANT_TYPE, 'client_id': self._client_id, 'client_secret': self._client_secret, 'assertion': assertion, } if self._box_device_id: data['box_device_id'] = self._box_device_id if self._box_device_name: data['box_device_name'] = self._box_device_name return self.send_token_request(data, access_token=None, expect_refresh_token=False)[0] def _auth_with_jwt(self, sub, sub_type): """ Auth with JWT. If authorization fails because the expiration time is out of sync with the Box servers, retry using the time returned in the error response. Pass an enterprise ID to get an enterprise token (which can be used to provision/deprovision users), or a user ID to get a user token. :param sub: The enterprise ID or user ID to auth. :type sub: `unicode` :param sub_type: Either 'enterprise' or 'user' :type sub_type: `unicode` :return: The access token for the enterprise or app user. :rtype: `unicode` """ attempt_number = 0 jwt_time = None while True: try: return self._construct_and_send_jwt_auth(sub, sub_type, jwt_time) except BoxOAuthException as ex: network_response = ex.network_response code = network_response.status_code # pylint: disable=maybe-no-member box_datetime = self._get_date_header(network_response) if attempt_number >= API.MAX_RETRY_ATTEMPTS: raise ex if (code == 429 or code >= 500): jwt_time = None elif box_datetime is not None and self._was_exp_claim_rejected_due_to_clock_skew(network_response): jwt_time = box_datetime else: raise ex time_delay = self._session.get_retry_after_time(attempt_number, network_response.headers.get('Retry-After', None)) # pylint: disable=maybe-no-member time.sleep(time_delay) attempt_number += 1 self._logger.debug('Retrying JWT request') @staticmethod def _get_date_header(network_response): """ Get datetime object for Date header, if the Date header is available. :param network_response: The response from the Box API that should include a Date header. :type network_response: :class:`Response` :return: The datetime parsed from the Date header, or None if the header is absent or if it couldn't be parsed. :rtype: `datetime` or `None` """ box_date_header = network_response.headers.get('Date', None) if box_date_header is not None: try: return datetime.strptime(box_date_header, '%a, %d %b %Y %H:%M:%S %Z') except ValueError: pass return None @staticmethod def _was_exp_claim_rejected_due_to_clock_skew(network_response): """ Determine whether the network response indicates that the authorization request was rejected because of the exp claim. This can happen if the current system time is too different from the Box server time. Returns True if the status code is 400, the error code is invalid_grant, and the error description indicates a problem with the exp claim; False, otherwise. :param network_response: :type network_response: :class:`Response` :rtype: `bool` """ status_code = network_response.status_code try: json_response = network_response.json() except ValueError: return False error_code = json_response.get('error', '') error_description = json_response.get('error_description', '') return status_code == 400 and error_code == 'invalid_grant' and 'exp' in error_description def authenticate_user(self, user=None): """ Get an access token for a User. May be one of this application's created App User. Depending on the configured User Access Level, may also be any other App User or Managed User in the enterprise. <https://developer.box.com/en/guides/applications/> <https://developer.box.com/en/guides/authentication/select/> :param user: (optional) The user to authenticate, expressed as a Box User ID or as a :class:`User` instance. If not given, then the most recently provided user ID, if available, will be used. :type user: `unicode` or :class:`User` :raises: :exc:`ValueError` if no user ID was passed and the object is not currently configured with one. :return: The access token for the user. :rtype: `unicode` """ sub = self._normalize_user_id(user) or self._user_id if not sub: raise ValueError("authenticate_user: Requires the user ID, but it was not provided.") self._user_id = sub return self._auth_with_jwt(sub, 'user') authenticate_app_user = authenticate_user @classmethod def _normalize_user_id(cls, user): """Get a Box user ID from a selection of supported param types. :param user: An object representing the user or user ID. Currently supported types are `unicode` (which represents the user ID) and :class:`User`. If `None`, returns `None`. :raises: :exc:`TypeError` for unsupported types. :rtype: `unicode` or `None` """ if user is None: return None if isinstance(user, User): return user.object_id if isinstance(user, string_types): return text_type(user) raise TypeError("Got unsupported type {0!r} for user.".format(user.__class__.__name__)) def authenticate_instance(self, enterprise=None): """ Get an access token for a Box Developer Edition enterprise. :param enterprise: The ID of the Box Developer Edition enterprise. Optional if the value was already given to `__init__`, otherwise required. :type enterprise: `unicode` or `None` :raises: :exc:`ValueError` if `None` was passed for the enterprise ID here and in `__init__`, or if the non-`None` value passed here does not match the non-`None` value passed to `__init__`. :return: The access token for the enterprise which can provision/deprovision app users. :rtype: `unicode` """ enterprises = [enterprise, self._enterprise_id] if not any(enterprises): raise ValueError("authenticate_instance: Requires the enterprise ID, but it was not provided.") if all(enterprises) and (enterprise != self._enterprise_id): raise ValueError( "authenticate_instance: Given enterprise ID {given_enterprise!r}, but {auth} already has ID {existing_enterprise!r}" .format(auth=self, given_enterprise=enterprise, existing_enterprise=self._enterprise_id) ) if not self._enterprise_id: self._enterprise_id = enterprise self._user_id = None return self._auth_with_jwt(self._enterprise_id, 'enterprise') def _refresh(self, access_token): """ Base class override. Instead of refreshing an access token using a refresh token, we just issue a new JWT request. """ # pylint:disable=unused-argument if self._user_id is None: new_access_token = self.authenticate_instance() else: new_access_token = self.authenticate_user() return new_access_token, None @classmethod def _normalize_rsa_private_key(cls, file_sys_path, data, passphrase=None): if len(list(filter(None, [file_sys_path, data]))) != 1: raise TypeError("must pass exactly one of either rsa_private_key_file_sys_path or rsa_private_key_data") if file_sys_path: with open(file_sys_path, 'rb') as key_file: data = key_file.read() if hasattr(data, 'read') and callable(data.read): data = data.read() if isinstance(data, text_type): try: data = data.encode('ascii') except UnicodeError: raise_from( TypeError("rsa_private_key_data must contain binary data (bytes/str), not a text/unicode string"), None, ) if isinstance(data, binary_type): passphrase = cls._normalize_rsa_private_key_passphrase(passphrase) return serialization.load_pem_private_key( data, password=passphrase, backend=default_backend(), ) if isinstance(data, RSAPrivateKey): return data raise TypeError( 'rsa_private_key_data must be binary data (bytes/str), ' 'a file-like object with a read() method, ' 'or an instance of RSAPrivateKey, ' 'but got {0!r}' .format(data.__class__.__name__) ) @staticmethod def _normalize_rsa_private_key_passphrase(passphrase): if isinstance(passphrase, text_type): try: return passphrase.encode('ascii') except UnicodeError: raise_from( TypeError("rsa_private_key_passphrase must contain binary data (bytes/str), not a text/unicode string"), None, ) if not isinstance(passphrase, (binary_type, NoneType)): raise TypeError( "rsa_private_key_passphrase must contain binary data (bytes/str), got {0!r}" .format(passphrase.__class__.__name__) ) return passphrase @classmethod def from_settings_dictionary(cls, settings_dictionary, **kwargs): """ Create an auth instance as defined by the given settings dictionary. The dictionary should have the structure of the JSON file downloaded from the Box Developer Console. :param settings_dictionary: Dictionary containing settings for configuring app auth. :type settings_dictionary: `dict` :return: Auth instance configured as specified by the config dictionary. :rtype: :class:`JWTAuth` """ if 'boxAppSettings' not in settings_dictionary: raise ValueError('boxAppSettings not present in configuration') return cls( client_id=settings_dictionary['boxAppSettings']['clientID'], client_secret=settings_dictionary['boxAppSettings']['clientSecret'], enterprise_id=settings_dictionary.get('enterpriseID', None), jwt_key_id=settings_dictionary['boxAppSettings']['appAuth'].get('publicKeyID', None), rsa_private_key_data=settings_dictionary['boxAppSettings']['appAuth'].get('privateKey', None), rsa_private_key_passphrase=settings_dictionary['boxAppSettings']['appAuth'].get('passphrase', None), **kwargs ) @classmethod def from_settings_file(cls, settings_file_sys_path, **kwargs): """ Create an auth instance as defined by a JSON file downloaded from the Box Developer Console. See https://developer.box.com/en/guides/authentication/jwt/ for more information. :param settings_file_sys_path: Path to the JSON file containing the configuration. :type settings_file_sys_path: `unicode` :return: Auth instance configured as specified by the JSON file. :rtype: :class:`JWTAuth` """ with open(settings_file_sys_path) as config_file: config_dictionary = json.load(config_file) return cls.from_settings_dictionary(config_dictionary, **kwargs)
# coding: utf-8 from __future__ import absolute_import, unicode_literals from contextlib import contextmanager from datetime import datetime, timedelta import io from itertools import cycle, product import json import random import string from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, generate_private_key as generate_rsa_private_key from cryptography.hazmat.primitives import serialization from mock import Mock, mock_open, patch, sentinel, call import pytest import pytz import requests from six import binary_type, string_types, text_type from boxsdk.auth.jwt_auth import JWTAuth from boxsdk.exception import BoxOAuthException from boxsdk.config import API from boxsdk.object.user import User @pytest.fixture(params=[16, 32, 128]) def jti_length(request): return request.param @pytest.fixture(params=('RS256', 'RS512')) def jwt_algorithm(request): return request.param @pytest.fixture(scope='module') def jwt_key_id(): return 'jwt_key_id_1' @pytest.fixture(scope='module') def rsa_private_key_object(): return generate_rsa_private_key(public_exponent=65537, key_size=4096, backend=default_backend()) @pytest.fixture(params=(None, b'strong_password')) def rsa_passphrase(request): return request.param @pytest.fixture def rsa_private_key_bytes(rsa_private_key_object, rsa_passphrase): encryption = serialization.BestAvailableEncryption(rsa_passphrase) if rsa_passphrase else serialization.NoEncryption() return rsa_private_key_object.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=encryption, ) @pytest.fixture(scope='function') def successful_token_response(successful_token_mock, successful_token_json_response): # pylint:disable=redefined-outer-name response = successful_token_json_response.copy() del response['refresh_token'] successful_token_mock.json = Mock(return_value=response) successful_token_mock.ok = True successful_token_mock.content = json.dumps(response) successful_token_mock.status_code = 200 return successful_token_mock @pytest.mark.parametrize(('key_file', 'key_data'), [(None, None), ('fake sys path', 'fake key data')]) @pytest.mark.parametrize('rsa_passphrase', [None]) def test_jwt_auth_init_raises_type_error_unless_exactly_one_of_rsa_private_key_file_or_data_is_given(key_file, key_data, rsa_private_key_bytes): kwargs = dict( rsa_private_key_data=rsa_private_key_bytes, client_id=None, client_secret=None, jwt_key_id=None, enterprise_id=None, ) JWTAuth(**kwargs) kwargs.update(rsa_private_key_file_sys_path=key_file, rsa_private_key_data=key_data) with pytest.raises(TypeError): JWTAuth(**kwargs) @pytest.mark.parametrize('key_data', [object(), u'ƒøø']) @pytest.mark.parametrize('rsa_passphrase', [None]) def test_jwt_auth_init_raises_type_error_if_rsa_private_key_data_has_unexpected_type(key_data, rsa_private_key_bytes): kwargs = dict( rsa_private_key_data=rsa_private_key_bytes, client_id=None, client_secret=None, jwt_key_id=None, enterprise_id=None, ) JWTAuth(**kwargs) kwargs.update(rsa_private_key_data=key_data) with pytest.raises(TypeError): JWTAuth(**kwargs) @pytest.mark.parametrize('rsa_private_key_data_type', [io.BytesIO, text_type, binary_type, RSAPrivateKey]) def test_jwt_auth_init_accepts_rsa_private_key_data(rsa_private_key_bytes, rsa_passphrase, rsa_private_key_data_type): if rsa_private_key_data_type is text_type: rsa_private_key_data = text_type(rsa_private_key_bytes.decode('ascii')) elif rsa_private_key_data_type is RSAPrivateKey: rsa_private_key_data = serialization.load_pem_private_key( rsa_private_key_bytes, password=rsa_passphrase, backend=default_backend(), ) else: rsa_private_key_data = rsa_private_key_data_type(rsa_private_key_bytes) JWTAuth( rsa_private_key_data=rsa_private_key_data, rsa_private_key_passphrase=rsa_passphrase, client_id=None, client_secret=None, jwt_key_id=None, enterprise_id=None, ) @pytest.fixture(params=[False, True]) def pass_private_key_by_path(request): """For jwt_auth_init_mocks, whether to pass the private key via sys_path (True) or pass the data directly (False).""" return request.param @pytest.fixture def jwt_auth_init_mocks( mock_box_session, successful_token_response, jwt_algorithm, jwt_key_id, rsa_passphrase, rsa_private_key_bytes, pass_private_key_by_path, ): # pylint:disable=redefined-outer-name @contextmanager def _jwt_auth_init_mocks(**kwargs): assert_authed = kwargs.pop('assert_authed', True) fake_client_id = 'fake_client_id' fake_client_secret = 'fake_client_secret' assertion = Mock() data = { 'grant_type': JWTAuth._GRANT_TYPE, # pylint:disable=protected-access 'client_id': fake_client_id, 'client_secret': fake_client_secret, 'assertion': assertion, 'box_device_id': '0', 'box_device_name': 'my_awesome_device', } mock_box_session.request.return_value = successful_token_response with patch('boxsdk.auth.jwt_auth.open', mock_open(read_data=rsa_private_key_bytes), create=True) as jwt_auth_open: with patch('cryptography.hazmat.primitives.serialization.load_pem_private_key') as load_pem_private_key: oauth = JWTAuth( client_id=fake_client_id, client_secret=fake_client_secret, rsa_private_key_file_sys_path=(sentinel.rsa_path if pass_private_key_by_path else None), rsa_private_key_data=(None if pass_private_key_by_path else rsa_private_key_bytes), rsa_private_key_passphrase=rsa_passphrase, session=mock_box_session, box_device_name='my_awesome_device', jwt_algorithm=jwt_algorithm, jwt_key_id=jwt_key_id, enterprise_id=kwargs.pop('enterprise_id', None), **kwargs ) if pass_private_key_by_path: jwt_auth_open.assert_called_once_with(sentinel.rsa_path, 'rb') jwt_auth_open.return_value.read.assert_called_once_with() # pylint:disable=no-member else: jwt_auth_open.assert_not_called() load_pem_private_key.assert_called_once_with( rsa_private_key_bytes, password=rsa_passphrase, backend=default_backend(), ) yield oauth, assertion, fake_client_id, load_pem_private_key.return_value if assert_authed: mock_box_session.request.assert_called_once_with( 'POST', '{0}/token'.format(API.OAUTH2_API_URL), data=data, headers={'content-type': 'application/x-www-form-urlencoded'}, access_token=None, ) assert oauth.access_token == successful_token_response.json()['access_token'] return _jwt_auth_init_mocks def test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor(jwt_auth_init_and_auth_mocks): user = 'fake_user_id' with jwt_auth_init_and_auth_mocks(sub=user, sub_type='user', enterprise_id='fake_enterprise_id', user=user) as oauth: oauth.refresh(None) @pytest.mark.parametrize('jwt_auth_method_name', ['authenticate_user', 'authenticate_instance']) def test_authenticate_raises_value_error_if_sub_was_never_given(jwt_auth_init_mocks, jwt_auth_method_name): with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] authenticate_method = getattr(auth, jwt_auth_method_name) with pytest.raises(ValueError): authenticate_method() def test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type(jwt_auth_init_mocks): with pytest.raises(TypeError): with jwt_auth_init_mocks(user=object()): assert False def test_authenticate_user_raises_type_error_if_user_is_unsupported_type(jwt_auth_init_mocks): with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] with pytest.raises(TypeError): auth.authenticate_user(object()) @pytest.mark.parametrize('user_id_for_init', [None, 'fake_user_id_1']) def test_authenticate_user_saves_user_id_for_future_calls(jwt_auth_init_and_auth_mocks, user_id_for_init, jwt_encode): def assert_jwt_encode_call_args(user_id): assert jwt_encode.call_args[0][0]['sub'] == user_id assert jwt_encode.call_args[0][0]['box_sub_type'] == 'user' jwt_encode.call_args = None with jwt_auth_init_and_auth_mocks(sub=None, sub_type=None, assert_authed=False, user=user_id_for_init) as auth: for new_user_id in ['fake_user_id_2', 'fake_user_id_3']: auth.authenticate_user(new_user_id) assert_jwt_encode_call_args(new_user_id) auth.authenticate_user() assert_jwt_encode_call_args(new_user_id) def test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given(jwt_auth_init_mocks): with jwt_auth_init_mocks(enterprise_id='fake_enterprise_id_1', assert_authed=False) as params: auth = params[0] with pytest.raises(ValueError): auth.authenticate_instance('fake_enterprise_id_2') def test_authenticate_instance_saves_enterprise_id_for_future_calls(jwt_auth_init_and_auth_mocks): enterprise_id = 'fake_enterprise_id' with jwt_auth_init_and_auth_mocks(sub=enterprise_id, sub_type='enterprise', assert_authed=False) as auth: auth.authenticate_instance(enterprise_id) auth.authenticate_instance() auth.authenticate_instance(enterprise_id) with pytest.raises(ValueError): auth.authenticate_instance('fake_enterprise_id_2') @pytest.yield_fixture def jwt_encode(): with patch('jwt.encode') as patched_jwt_encode: yield patched_jwt_encode @pytest.fixture def jwt_auth_auth_mocks(jti_length, jwt_algorithm, jwt_key_id, jwt_encode): @contextmanager def _jwt_auth_auth_mocks(sub, sub_type, oauth, assertion, client_id, secret, assert_authed=True): # pylint:disable=redefined-outer-name with patch('boxsdk.auth.jwt_auth.datetime') as mock_datetime: with patch('boxsdk.auth.jwt_auth.random.SystemRandom') as mock_system_random: jwt_encode.return_value = assertion mock_datetime.utcnow.return_value = datetime(2015, 7, 6, 12, 1, 2) mock_datetime.return_value = datetime(1970, 1, 1) now_plus_30 = mock_datetime.utcnow.return_value + timedelta(seconds=30) exp = int((now_plus_30 - datetime(1970, 1, 1)).total_seconds()) system_random = mock_system_random.return_value system_random.randint.return_value = jti_length random_choices = [random.random() for _ in range(jti_length)] # Use cycle so that we can do auth more than once inside the context manager. system_random.random.side_effect = cycle(random_choices) ascii_alphabet = string.ascii_letters + string.digits ascii_len = len(ascii_alphabet) jti = ''.join(ascii_alphabet[int(r * ascii_len)] for r in random_choices) yield oauth if assert_authed: system_random.randint.assert_called_once_with(16, 128) assert len(system_random.random.mock_calls) == jti_length jwt_encode.assert_called_once_with({ 'iss': client_id, 'sub': sub, 'box_sub_type': sub_type, 'aud': 'https://api.box.com/oauth2/token', 'jti': jti, 'exp': exp, }, secret, algorithm=jwt_algorithm, headers={'kid': jwt_key_id}) return _jwt_auth_auth_mocks @pytest.fixture def jwt_auth_init_and_auth_mocks(jwt_auth_init_mocks, jwt_auth_auth_mocks): @contextmanager def _jwt_auth_init_and_auth_mocks(sub, sub_type, *jwt_auth_init_mocks_args, **jwt_auth_init_mocks_kwargs): assert_authed = jwt_auth_init_mocks_kwargs.pop('assert_authed', True) with jwt_auth_init_mocks(*jwt_auth_init_mocks_args, assert_authed=assert_authed, **jwt_auth_init_mocks_kwargs) as params: with jwt_auth_auth_mocks(sub, sub_type, *params, assert_authed=assert_authed) as oauth: yield oauth return _jwt_auth_init_and_auth_mocks @pytest.mark.parametrize( ('user', 'pass_in_init'), list(product([str('fake_user_id'), text_type('fake_user_id'), User(None, 'fake_user_id')], [False, True])), ) def test_authenticate_user_sends_post_request_with_correct_params(jwt_auth_init_and_auth_mocks, user, pass_in_init): # pylint:disable=redefined-outer-name if isinstance(user, User): user_id = user.object_id elif isinstance(user, string_types): user_id = user else: raise NotImplementedError init_kwargs = {} authenticate_params = [] if pass_in_init: init_kwargs['user'] = user else: authenticate_params.append(user) with jwt_auth_init_and_auth_mocks(user_id, 'user', **init_kwargs) as oauth: oauth.authenticate_user(*authenticate_params) @pytest.mark.parametrize(('pass_in_init', 'pass_in_auth'), [(True, False), (False, True), (True, True)]) def test_authenticate_instance_sends_post_request_with_correct_params(jwt_auth_init_and_auth_mocks, pass_in_init, pass_in_auth): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' init_kwargs = {} auth_params = [] if pass_in_init: init_kwargs['enterprise_id'] = enterprise_id if pass_in_auth: auth_params.append(enterprise_id) with jwt_auth_init_and_auth_mocks(enterprise_id, 'enterprise', **init_kwargs) as oauth: oauth.authenticate_instance(*auth_params) def test_refresh_app_user_sends_post_request_with_correct_params(jwt_auth_init_and_auth_mocks): # pylint:disable=redefined-outer-name fake_user_id = 'fake_user_id' with jwt_auth_init_and_auth_mocks(fake_user_id, 'user', user=fake_user_id) as oauth: oauth.refresh(None) def test_refresh_instance_sends_post_request_with_correct_params(jwt_auth_init_and_auth_mocks): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' with jwt_auth_init_and_auth_mocks(enterprise_id, 'enterprise', enterprise_id=enterprise_id) as oauth: oauth.refresh(None) @pytest.fixture() def jwt_subclass_that_just_stores_params(): class StoreParamJWTAuth(JWTAuth): def __init__(self, **kwargs): self.kwargs = kwargs super(StoreParamJWTAuth, self).__init__(**kwargs) return StoreParamJWTAuth @pytest.fixture def fake_client_id(): return 'fake_client_id' @pytest.fixture def fake_client_secret(): return 'fake_client_secret' @pytest.fixture def fake_enterprise_id(): return 'fake_enterprise_id' @pytest.fixture def app_config_json_content( fake_client_id, fake_client_secret, fake_enterprise_id, jwt_key_id, rsa_private_key_bytes, rsa_passphrase, ): template = r""" {{ "boxAppSettings": {{ "clientID": "{client_id}", "clientSecret": "{client_secret}", "appAuth": {{ "publicKeyID": "{jwt_key_id}", "privateKey": "{private_key}", "passphrase": {passphrase} }} }}, "enterpriseID": {enterprise_id} }}""" return template.format( client_id=fake_client_id, client_secret=fake_client_secret, jwt_key_id=jwt_key_id, private_key=rsa_private_key_bytes.replace(b"\n", b"\\n").decode(), passphrase=json.dumps(rsa_passphrase and rsa_passphrase.decode()), enterprise_id=json.dumps(fake_enterprise_id), ) @pytest.fixture() def assert_jwt_kwargs_expected( fake_client_id, fake_client_secret, fake_enterprise_id, jwt_key_id, rsa_private_key_bytes, rsa_passphrase, ): def _assert_jwt_kwargs_expected(jwt_auth): assert jwt_auth.kwargs['client_id'] == fake_client_id assert jwt_auth.kwargs['client_secret'] == fake_client_secret assert jwt_auth.kwargs['enterprise_id'] == fake_enterprise_id assert jwt_auth.kwargs['jwt_key_id'] == jwt_key_id assert jwt_auth.kwargs['rsa_private_key_data'] == rsa_private_key_bytes.decode() assert jwt_auth.kwargs['rsa_private_key_passphrase'] == (rsa_passphrase and rsa_passphrase.decode()) return _assert_jwt_kwargs_expected def test_from_config_file( jwt_subclass_that_just_stores_params, app_config_json_content, assert_jwt_kwargs_expected, ): # pylint:disable=redefined-outer-name with patch('boxsdk.auth.jwt_auth.open', mock_open(read_data=app_config_json_content), create=True): jwt_auth_from_config_file = jwt_subclass_that_just_stores_params.from_settings_file('fake_config_file_sys_path') assert_jwt_kwargs_expected(jwt_auth_from_config_file) def test_from_settings_dictionary( jwt_subclass_that_just_stores_params, app_config_json_content, assert_jwt_kwargs_expected, ): jwt_auth_from_dictionary = jwt_subclass_that_just_stores_params.from_settings_dictionary(json.loads(app_config_json_content)) assert_jwt_kwargs_expected(jwt_auth_from_dictionary) @pytest.fixture def expect_auth_retry(status_code, error_description, include_date_header, error_code): return status_code == 400 and 'exp' in error_description and include_date_header and error_code == 'invalid_grant' @pytest.fixture def box_datetime(): return datetime.now(tz=pytz.utc) - timedelta(100) @pytest.fixture def unsuccessful_jwt_response(box_datetime, status_code, error_description, include_date_header, error_code): headers = {'Date': box_datetime.strftime('%a, %d %b %Y %H:%M:%S %Z')} if include_date_header else {} unsuccessful_response = Mock(requests.Response(), headers=headers) unsuccessful_response.json.return_value = {'error_description': error_description, 'error': error_code} unsuccessful_response.status_code = status_code unsuccessful_response.ok = False return unsuccessful_response @pytest.mark.parametrize('jwt_algorithm', ('RS512',)) @pytest.mark.parametrize('rsa_passphrase', (None,)) @pytest.mark.parametrize('pass_private_key_by_path', (False,)) @pytest.mark.parametrize('status_code', (400, 401)) @pytest.mark.parametrize('error_description', ('invalid box_sub_type claim', 'invalid kid', "check the 'exp' claim")) @pytest.mark.parametrize('error_code', ('invalid_grant', 'bad_request')) @pytest.mark.parametrize('include_date_header', (True, False)) def test_auth_retry_for_invalid_exp_claim( jwt_auth_init_mocks, expect_auth_retry, unsuccessful_jwt_response, box_datetime, ): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] with patch.object(auth, '_construct_and_send_jwt_auth') as mock_send_jwt: mock_send_jwt.side_effect = [BoxOAuthException(400, network_response=unsuccessful_jwt_response), 'jwt_token'] if not expect_auth_retry: with pytest.raises(BoxOAuthException): auth.authenticate_instance(enterprise_id) else: auth.authenticate_instance(enterprise_id) expected_calls = [call(enterprise_id, 'enterprise', None)] if expect_auth_retry: expected_calls.append(call(enterprise_id, 'enterprise', box_datetime.replace(microsecond=0, tzinfo=None))) assert len(mock_send_jwt.mock_calls) == len(expected_calls) mock_send_jwt.assert_has_calls(expected_calls) @pytest.mark.parametrize('jwt_algorithm', ('RS512',)) @pytest.mark.parametrize('rsa_passphrase', (None,)) @pytest.mark.parametrize('pass_private_key_by_path', (False,)) @pytest.mark.parametrize('status_code', (429,)) @pytest.mark.parametrize('error_description', ('Request rate limit exceeded',)) @pytest.mark.parametrize('error_code', ('rate_limit_exceeded',)) @pytest.mark.parametrize('include_date_header', (False,)) def test_auth_retry_for_rate_limit_error( jwt_auth_init_mocks, unsuccessful_jwt_response, ): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] with patch.object(auth, '_construct_and_send_jwt_auth') as mock_send_jwt: side_effect = [] expected_calls = [] # Retries multiple times, but less than max retries. Then succeeds when it gets a token. for _ in range(API.MAX_RETRY_ATTEMPTS - 2): side_effect.append(BoxOAuthException(429, network_response=unsuccessful_jwt_response)) expected_calls.append(call(enterprise_id, 'enterprise', None)) side_effect.append('jwt_token') expected_calls.append(call(enterprise_id, 'enterprise', None)) mock_send_jwt.side_effect = side_effect auth.authenticate_instance(enterprise_id) assert len(mock_send_jwt.mock_calls) == len(expected_calls) mock_send_jwt.assert_has_calls(expected_calls) @pytest.mark.parametrize('jwt_algorithm', ('RS512',)) @pytest.mark.parametrize('rsa_passphrase', (None,)) @pytest.mark.parametrize('pass_private_key_by_path', (False,)) @pytest.mark.parametrize('status_code', (429,)) @pytest.mark.parametrize('error_description', ('Request rate limit exceeded',)) @pytest.mark.parametrize('error_code', ('rate_limit_exceeded',)) @pytest.mark.parametrize('include_date_header', (False,)) def test_auth_max_retries_for_rate_limit_error( jwt_auth_init_mocks, unsuccessful_jwt_response, ): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] with patch.object(auth, '_construct_and_send_jwt_auth') as mock_send_jwt: side_effect = [] expected_calls = [] # Retries max number of times, then throws the error for _ in range(API.MAX_RETRY_ATTEMPTS + 1): side_effect.append(BoxOAuthException(429, network_response=unsuccessful_jwt_response)) expected_calls.append(call(enterprise_id, 'enterprise', None)) mock_send_jwt.side_effect = side_effect with pytest.raises(BoxOAuthException) as error: auth.authenticate_instance(enterprise_id) assert error.value.status == 429 assert len(mock_send_jwt.mock_calls) == len(expected_calls) mock_send_jwt.assert_has_calls(expected_calls) @pytest.mark.parametrize('jwt_algorithm', ('RS512',)) @pytest.mark.parametrize('rsa_passphrase', (None,)) @pytest.mark.parametrize('pass_private_key_by_path', (False,)) @pytest.mark.parametrize('status_code', (500,)) @pytest.mark.parametrize('error_description', ('Internal Server Error',)) @pytest.mark.parametrize('error_code', ('internal_server_error',)) @pytest.mark.parametrize('include_date_header', (False,)) def test_auth_retry_for_internal_server_error( jwt_auth_init_mocks, unsuccessful_jwt_response, ): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] with patch.object(auth, '_construct_and_send_jwt_auth') as mock_send_jwt: side_effect = [] expected_calls = [] # Retries multiple times, but less than max retries. Then succeeds when it gets a token. for _ in range(API.MAX_RETRY_ATTEMPTS - 2): side_effect.append(BoxOAuthException(500, network_response=unsuccessful_jwt_response)) expected_calls.append(call(enterprise_id, 'enterprise', None)) side_effect.append('jwt_token') expected_calls.append(call(enterprise_id, 'enterprise', None)) mock_send_jwt.side_effect = side_effect auth.authenticate_instance(enterprise_id) assert len(mock_send_jwt.mock_calls) == len(expected_calls) mock_send_jwt.assert_has_calls(expected_calls) @pytest.mark.parametrize('jwt_algorithm', ('RS512',)) @pytest.mark.parametrize('rsa_passphrase', (None,)) @pytest.mark.parametrize('pass_private_key_by_path', (False,)) @pytest.mark.parametrize('status_code', (500,)) @pytest.mark.parametrize('error_description', ('Internal Server Error',)) @pytest.mark.parametrize('error_code', ('internal_server_error',)) @pytest.mark.parametrize('include_date_header', (False,)) def test_auth_max_retries_for_internal_server_error( jwt_auth_init_mocks, unsuccessful_jwt_response, ): # pylint:disable=redefined-outer-name enterprise_id = 'fake_enterprise_id' with jwt_auth_init_mocks(assert_authed=False) as params: auth = params[0] with patch.object(auth, '_construct_and_send_jwt_auth') as mock_send_jwt: side_effect = [] expected_calls = [] # Retries max number of times, then throws the error for _ in range(API.MAX_RETRY_ATTEMPTS + 1): side_effect.append(BoxOAuthException(500, network_response=unsuccessful_jwt_response)) expected_calls.append(call(enterprise_id, 'enterprise', None)) mock_send_jwt.side_effect = side_effect with pytest.raises(BoxOAuthException) as error: auth.authenticate_instance(enterprise_id) assert error.value.status == 500 assert len(mock_send_jwt.mock_calls) == len(expected_calls) mock_send_jwt.assert_has_calls(expected_calls)
box/box-python-sdk
test/unit/auth/test_jwt_auth.py
boxsdk/auth/jwt_auth.py
from typing import List, Iterator, Union from collections import Counter from KINCluster.core.item import Item from KINCluster.lib.tokenizer import tokenizer from KINCluster import settings import numpy as np from gensim.models import Doc2Vec from gensim.models.doc2vec import TaggedDocument from scipy.cluster import hierarchy as hcluster class Cluster: def __init__(self, **kwargs): """hyperparameters :alpha = learning rate :min_alph = minimum learning reate :window = max value of vector :size = vector size :tokenizer = lambda document: str -> list or words: List[str] """ def getattrs(module): keys = [k for k in dir(module) if not k.startswith('__')] return {key: getattr(module, key) for key in keys} if 'settings' not in kwargs: self.settings = getattrs(settings) else: self.settings = kwargs['settings'] alpha = kwargs.get("alpha", self.settings['LEARNING_RATE']) min_alpha = kwargs.get("min_alpha", self.settings['LEARNING_RATE_MIN']) window = kwargs.get("window", self.settings['WINDOW']) size = kwargs.get("size", self.settings['SIZE']) self.trate = kwargs.get("trate", self.settings['TRANING_RATE']) self.epoch = kwargs.get("epoch", self.settings['EPOCH']) self.thresh = kwargs.get("thresh", self.settings['THRESHOLD']) self.tokenizer = tokenizer.s[kwargs.get("tokenizer", self.settings['TOKENIZER'])] self.model = Doc2Vec(alpha=alpha, min_alpha=min_alpha, window=window, size=size) self._items = [] self._counters = [] self._vectors = [] self._clusters = [] self._dumps = [] def put_item(self, item: Item): self._items.append(item) def __vocabs(self) -> Iterator[TaggedDocument]: for idx, item in enumerate(self._items): token = self.tokenizer(repr(item)) self._counters.append(Counter(token)) yield TaggedDocument(token, ['line_%s' % idx]) def __documents(self) -> Iterator[TaggedDocument]: for idx, item in enumerate(self._items): yield TaggedDocument(self.tokenizer(str(item)), ['line_%s' % idx]) def __cluster(self, method, metric, criterion) -> np.ndarray: return hcluster.fclusterdata(self._vectors, self.thresh, method=method, metric=metric, criterion=criterion) def cluster(self): # COMMENT: Top keyword 만 잘라서 분류해보기 """cluster process : build vocab, using repr of item : train items, using str of item : get _vectors and _clusters """ self.model.build_vocab(self.__vocabs()) documents = list(self.__documents()) for _ in range(self.epoch): self.model.train(documents) self.model.alpha *= self.trate self.model.min_alpha = self.model.alpha self._vectors = np.array(self.model.docvecs) self._clusters = self.__cluster(self.settings['METHOD'], self.settings['METRIC'], self.settings['CRITERION']) dumps = {c: [] for c in self.unique} for cluster, item, vector, counter in zip(self._clusters, self._items, self._vectors, self._counters): dumps[cluster].append((item, vector, counter)) self._dumps = list(dumps.values()) def similar(self, pos, neg=[], top=10): return self.model.most_similar(positive=pos, negative=neg, topn=top) @property def items(self) -> List[Item]: return self._items @property def vocab(self) -> List[str]: return self.model.vocab @property def vocab_count(self) -> List[Counter]: return self._counters @property def dumps(self) -> List[List[Union[Item, np.ndarray]]]: return self._dumps @property def vectors(self) -> np.ndarray: return self._vectors @property def unique(self) -> np.ndarray: return np.unique(self._clusters) @property def clusters(self) -> np.ndarray: return self._clusters @property def distribution(self) -> np.ndarray: return Counter(self._clusters) def __len__(self): return len(self._clusters)
# -*- coding: utf-8 -*- """ tests.cluster --------------- Test cluster of KINCluster :author: MaybeS(maytryark@gmail.com) """ import pytest from KINCluster.core.extractor import Extractor, extractable from KINCluster.core.cluster import Cluster from KINCluster.core.pipeline import Pipeline from KINCluster.core.item import Item from KINCluster.lib.tokenizer import tokenize, stemize import codecs test_text = ['2016헌나1.txt', '2014헌나1.txt'] test_keyword = ['헌법판결문', '헌법판결문'] class Pipeline(Pipeline): def capture_item(self): for text, keyword in zip(test_text, test_keyword): with codecs.open('tests/data/' + text, 'r', 'utf-8') as f: content = f.read() yield Item(title=text,content=content,keyword=keyword,date='') def test_extractor1(): cluster = Cluster(epoch=32, tokenizer="tokenize") pipeline = Pipeline() for item in pipeline.capture_item(): cluster.put_item(item) cluster.cluster() extractor = Extractor(cluster) for idx, dump in enumerate(cluster.dumps): items, vectors, counter = map(list, zip(*dump)) assert set(['items', 'vectors', 'counter', 'center', 'keywords']) == set(extractable.s.keys()) extracted = extractor.dump(idx) assert isinstance(extracted, Item) assert isinstance(extracted.keywords, list) assert 32 == len(extracted.keywords)
memento7/KINCluster
tests/test_extractor.py
KINCluster/core/cluster.py
# Copyright 2015 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from impala.error import Error as ImpylaError # noqa from impala.error import HiveServer2Error as HS2Error # noqa import impala.dbapi as impyla # noqa
# Copyright 2015 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import pytest import ibis import ibis.expr.datatypes as dt import ibis.expr.types as ir from ibis.compat import unittest from ibis.common import IbisTypeError from ibis.impala.client import pandas_to_ibis_schema from ibis.impala.tests.common import ImpalaE2E functional_alltypes_with_nulls = pd.DataFrame({ 'bigint_col': np.int64([0, 10, 20, 30, 40, 50, 60, 70, 80, 90]), 'bool_col': np.bool_([True, False, True, False, True, None, True, False, True, False]), 'date_string_col': ['11/01/10', None, '11/01/10', '11/01/10', '11/01/10', '11/01/10', '11/01/10', '11/01/10', '11/01/10', '11/01/10'], 'double_col': np.float64([0.0, 10.1, None, 30.299999999999997, 40.399999999999999, 50.5, 60.599999999999994, 70.700000000000003, 80.799999999999997, 90.899999999999991]), 'float_col': np.float32([None, 1.1000000238418579, 2.2000000476837158, 3.2999999523162842, 4.4000000953674316, 5.5, 6.5999999046325684, 7.6999998092651367, 8.8000001907348633, 9.8999996185302734]), 'int_col': np.int32([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11], 'smallint_col': np.int16([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 'string_col': ['0', '1', None, '3', '4', '5', '6', '7', '8', '9'], 'timestamp_col': [pd.Timestamp('2010-11-01 00:00:00'), None, pd.Timestamp('2010-11-01 00:02:00.100000'), pd.Timestamp('2010-11-01 00:03:00.300000'), pd.Timestamp('2010-11-01 00:04:00.600000'), pd.Timestamp('2010-11-01 00:05:00.100000'), pd.Timestamp('2010-11-01 00:06:00.150000'), pd.Timestamp('2010-11-01 00:07:00.210000'), pd.Timestamp('2010-11-01 00:08:00.280000'), pd.Timestamp('2010-11-01 00:09:00.360000')], 'tinyint_col': np.int8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 'year': [2010, 2010, 2010, 2010, 2010, 2010, 2010, 2010, 2010, 2010]}) class TestPandasTypeInterop(unittest.TestCase): def test_series_to_ibis_literal(self): values = [1, 2, 3, 4] s = pd.Series(values) expr = ir.as_value_expr(s) expected = ir.sequence(list(s)) assert expr.equals(expected) class TestPandasSchemaInference(unittest.TestCase): def test_dtype_bool(self): df = pd.DataFrame({'col': [True, False, False]}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'boolean')]) assert inferred == expected def test_dtype_int8(self): df = pd.DataFrame({'col': np.int8([-3, 9, 17])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int8')]) assert inferred == expected def test_dtype_int16(self): df = pd.DataFrame({'col': np.int16([-5, 0, 12])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int16')]) assert inferred == expected def test_dtype_int32(self): df = pd.DataFrame({'col': np.int32([-12, 3, 25000])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int32')]) assert inferred == expected def test_dtype_int64(self): df = pd.DataFrame({'col': np.int64([102, 67228734, -0])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int64')]) assert inferred == expected def test_dtype_float32(self): df = pd.DataFrame({'col': np.float32([45e-3, -0.4, 99.])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'float')]) assert inferred == expected def test_dtype_float64(self): df = pd.DataFrame({'col': np.float64([-3e43, 43., 10000000.])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'double')]) assert inferred == expected def test_dtype_uint8(self): df = pd.DataFrame({'col': np.uint8([3, 0, 16])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int16')]) assert inferred == expected def test_dtype_uint16(self): df = pd.DataFrame({'col': np.uint16([5569, 1, 33])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int32')]) assert inferred == expected def test_dtype_uint32(self): df = pd.DataFrame({'col': np.uint32([100, 0, 6])}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int64')]) assert inferred == expected def test_dtype_uint64(self): df = pd.DataFrame({'col': np.uint64([666, 2, 3])}) with self.assertRaises(IbisTypeError): inferred = pandas_to_ibis_schema(df) # noqa def test_dtype_datetime64(self): df = pd.DataFrame({ 'col': [pd.Timestamp('2010-11-01 00:01:00'), pd.Timestamp('2010-11-01 00:02:00.1000'), pd.Timestamp('2010-11-01 00:03:00.300000')]}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'timestamp')]) assert inferred == expected def test_dtype_timedelta64(self): df = pd.DataFrame({ 'col': [pd.Timedelta('1 days'), pd.Timedelta('-1 days 2 min 3us'), pd.Timedelta('-2 days +23:57:59.999997')]}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'int64')]) assert inferred == expected def test_dtype_string(self): df = pd.DataFrame({'col': ['foo', 'bar', 'hello']}) inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', 'string')]) assert inferred == expected def test_dtype_categorical(self): df = pd.DataFrame({'col': ['a', 'b', 'c', 'a']}, dtype='category') inferred = pandas_to_ibis_schema(df) expected = ibis.schema([('col', dt.Category(3))]) assert inferred == expected class TestPandasRoundTrip(ImpalaE2E, unittest.TestCase): def test_round_trip(self): pytest.skip('fails') df1 = self.alltypes.execute() df2 = self.con.pandas(df1, 'bamboo', database=self.tmp_db).execute() assert (df1.columns == df2.columns).all() assert (df1.dtypes == df2.dtypes).all() assert (df1 == df2).all().all() def test_round_trip_non_int_missing_data(self): pytest.skip('WM: hangs -- will investigate later') df1 = functional_alltypes_with_nulls table = self.con.pandas(df1, 'fawn', database=self.tmp_db) df2 = table.execute() assert (df1.columns == df2.columns).all() assert (df1.dtypes == df2.dtypes).all() # bool/int cols should be exact assert (df1.bool_col == df2.bool_col).all() assert (df1.tinyint_col == df2.tinyint_col).all() assert (df1.smallint_col == df2.smallint_col).all() assert (df1.int_col == df2.int_col).all() assert (df1.bigint_col == df2.bigint_col).all() assert (df1.month == df2.month).all() assert (df1.year == df2.year).all() # string cols should be equal everywhere except for the NULLs assert ((df1.string_col == df2.string_col) == [1, 1, 0, 1, 1, 1, 1, 1, 1, 1]).all() assert ((df1.date_string_col == df2.date_string_col) == [1, 0, 1, 1, 1, 1, 1, 1, 1, 1]).all() # float cols within tolerance, and NULLs should be False assert ((df1.double_col - df2.double_col < 1e-9) == [1, 1, 0, 1, 1, 1, 1, 1, 1, 1]).all() assert ((df1.float_col - df2.float_col < 1e-9) == [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]).all() def test_round_trip_missing_type_promotion(self): pytest.skip('unfinished') # prepare Impala table with missing ints # TODO: switch to self.con.raw_sql once #412 is fixed create_query = ('CREATE TABLE {0}.missing_ints ' ' (tinyint_col TINYINT, bigint_col BIGINT) ' 'STORED AS PARQUET'.format(self.tmp_db)) insert_query = ('INSERT INTO {0}.missing_ints ' 'VALUES (NULL, 3), (-5, NULL), (19, 444444)'.format( self.tmp_db)) self.con.con.cursor.execute(create_query) self.con.con.cursor.execute(insert_query) table = self.con.table('missing_ints', database=self.tmp_db) df = table.execute() # noqa # REMOVE LATER # WHAT NOW?
aslihandincer/ibis
ibis/impala/tests/test_pandas_interop.py
ibis/impala/compat.py
"""Component to integrate the Home Assistant cloud.""" from hass_nabucasa import Cloud import voluptuous as vol from homeassistant.components.alexa import const as alexa_const from homeassistant.components.google_assistant import const as ga_c from homeassistant.const import ( CONF_DESCRIPTION, CONF_MODE, CONF_NAME, CONF_REGION, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_validation as cv, entityfilter from homeassistant.loader import bind_hass from homeassistant.util.aiohttp import MockRequest from . import account_link, http_api from .client import CloudClient from .const import ( CONF_ACCOUNT_LINK_URL, CONF_ACME_DIRECTORY_SERVER, CONF_ALEXA, CONF_ALEXA_ACCESS_TOKEN_URL, CONF_ALIASES, CONF_CLOUDHOOK_CREATE_URL, CONF_COGNITO_CLIENT_ID, CONF_ENTITY_CONFIG, CONF_FILTER, CONF_GOOGLE_ACTIONS, CONF_GOOGLE_ACTIONS_REPORT_STATE_URL, CONF_RELAYER, CONF_REMOTE_API_URL, CONF_SUBSCRIPTION_INFO_URL, CONF_USER_POOL_ID, CONF_VOICE_API_URL, DOMAIN, MODE_DEV, MODE_PROD, ) from .prefs import CloudPreferences DEFAULT_MODE = MODE_PROD SERVICE_REMOTE_CONNECT = "remote_connect" SERVICE_REMOTE_DISCONNECT = "remote_disconnect" ALEXA_ENTITY_SCHEMA = vol.Schema( { vol.Optional(CONF_DESCRIPTION): cv.string, vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string, vol.Optional(CONF_NAME): cv.string, } ) GOOGLE_ENTITY_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]), vol.Optional(ga_c.CONF_ROOM_HINT): cv.string, } ) ASSISTANT_SCHEMA = vol.Schema( {vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA} ) ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend( {vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}} ) GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend( {vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}} ) # pylint: disable=no-value-for-parameter CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In( [MODE_DEV, MODE_PROD] ), vol.Optional(CONF_COGNITO_CLIENT_ID): str, vol.Optional(CONF_USER_POOL_ID): str, vol.Optional(CONF_REGION): str, vol.Optional(CONF_RELAYER): str, vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(), vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(), vol.Optional(CONF_REMOTE_API_URL): vol.Url(), vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(), vol.Optional(CONF_ALEXA): ALEXA_SCHEMA, vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA, vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(), vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(), vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(), vol.Optional(CONF_VOICE_API_URL): vol.Url(), } ) }, extra=vol.ALLOW_EXTRA, ) class CloudNotAvailable(HomeAssistantError): """Raised when an action requires the cloud but it's not available.""" @bind_hass @callback def async_is_logged_in(hass) -> bool: """Test if user is logged in.""" return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in @bind_hass @callback def async_active_subscription(hass) -> bool: """Test if user has an active subscription.""" return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired @bind_hass async def async_create_cloudhook(hass, webhook_id: str) -> str: """Create a cloudhook.""" if not async_is_logged_in(hass): raise CloudNotAvailable hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True) return hook["cloudhook_url"] @bind_hass async def async_delete_cloudhook(hass, webhook_id: str) -> None: """Delete a cloudhook.""" if DOMAIN not in hass.data: raise CloudNotAvailable await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id) @bind_hass @callback def async_remote_ui_url(hass) -> str: """Get the remote UI URL.""" if not async_is_logged_in(hass): raise CloudNotAvailable if not hass.data[DOMAIN].client.prefs.remote_enabled: raise CloudNotAvailable if not hass.data[DOMAIN].remote.instance_domain: raise CloudNotAvailable return f"https://{hass.data[DOMAIN].remote.instance_domain}" def is_cloudhook_request(request): """Test if a request came from a cloudhook. Async friendly. """ return isinstance(request, MockRequest) async def async_setup(hass, config): """Initialize the Home Assistant cloud.""" # Process configs if DOMAIN in config: kwargs = dict(config[DOMAIN]) else: kwargs = {CONF_MODE: DEFAULT_MODE} # Alexa/Google custom config alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({}) google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({}) # Cloud settings prefs = CloudPreferences(hass) await prefs.async_initialize() # Initialize Cloud websession = hass.helpers.aiohttp_client.async_get_clientsession() client = CloudClient(hass, prefs, websession, alexa_conf, google_conf) cloud = hass.data[DOMAIN] = Cloud(client, **kwargs) async def _shutdown(event): """Shutdown event.""" await cloud.stop() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown) async def _service_handler(service): """Handle service for cloud.""" if service.service == SERVICE_REMOTE_CONNECT: await cloud.remote.connect() await prefs.async_update(remote_enabled=True) elif service.service == SERVICE_REMOTE_DISCONNECT: await cloud.remote.disconnect() await prefs.async_update(remote_enabled=False) hass.helpers.service.async_register_admin_service( DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler ) hass.helpers.service.async_register_admin_service( DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler ) loaded = False async def _on_connect(): """Discover RemoteUI binary sensor.""" nonlocal loaded # Prevent multiple discovery if loaded: return loaded = True await hass.helpers.discovery.async_load_platform( "binary_sensor", DOMAIN, {}, config ) await hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config) await hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config) cloud.iot.register_on_connect(_on_connect) await cloud.start() await http_api.async_setup(hass) account_link.async_setup(hass) return True
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/cloud/__init__.py
"""Support for hydrological data from the Fed. Office for the Environment.""" from datetime import timedelta import logging from swisshydrodata import SwissHydroData import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by the Swiss Federal Office for the Environment FOEN" ATTR_DELTA_24H = "delta-24h" ATTR_MAX_1H = "max-1h" ATTR_MAX_24H = "max-24h" ATTR_MEAN_1H = "mean-1h" ATTR_MEAN_24H = "mean-24h" ATTR_MIN_1H = "min-1h" ATTR_MIN_24H = "min-24h" ATTR_PREVIOUS_24H = "previous-24h" ATTR_STATION = "station" ATTR_STATION_UPDATE = "station_update" ATTR_WATER_BODY = "water_body" ATTR_WATER_BODY_TYPE = "water_body_type" CONF_STATION = "station" MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) SENSOR_DISCHARGE = "discharge" SENSOR_LEVEL = "level" SENSOR_TEMPERATURE = "temperature" CONDITIONS = { SENSOR_DISCHARGE: "mdi:waves", SENSOR_LEVEL: "mdi:zodiac-aquarius", SENSOR_TEMPERATURE: "mdi:oil-temperature", } CONDITION_DETAILS = [ ATTR_DELTA_24H, ATTR_MAX_1H, ATTR_MAX_24H, ATTR_MEAN_1H, ATTR_MEAN_24H, ATTR_MIN_1H, ATTR_MIN_24H, ATTR_PREVIOUS_24H, ] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_STATION): vol.Coerce(int), vol.Optional(CONF_MONITORED_CONDITIONS, default=[SENSOR_TEMPERATURE]): vol.All( cv.ensure_list, [vol.In(CONDITIONS)] ), } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Swiss hydrological sensor.""" station = config.get(CONF_STATION) monitored_conditions = config.get(CONF_MONITORED_CONDITIONS) hydro_data = HydrologicalData(station) hydro_data.update() if hydro_data.data is None: _LOGGER.error("The station doesn't exists: %s", station) return entities = [] for condition in monitored_conditions: entities.append(SwissHydrologicalDataSensor(hydro_data, station, condition)) add_entities(entities, True) class SwissHydrologicalDataSensor(SensorEntity): """Implementation of a Swiss hydrological sensor.""" def __init__(self, hydro_data, station, condition): """Initialize the Swiss hydrological sensor.""" self.hydro_data = hydro_data self._condition = condition self._data = self._state = self._unit_of_measurement = None self._icon = CONDITIONS[condition] self._station = station @property def name(self): """Return the name of the sensor.""" return "{} {}".format(self._data["water-body-name"], self._condition) @property def unique_id(self) -> str: """Return a unique, friendly identifier for this entity.""" return f"{self._station}_{self._condition}" @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" if self._state is not None: return self.hydro_data.data["parameters"][self._condition]["unit"] return None @property def state(self): """Return the state of the sensor.""" if isinstance(self._state, (int, float)): return round(self._state, 2) return None @property def extra_state_attributes(self): """Return the device state attributes.""" attrs = {} if not self._data: attrs[ATTR_ATTRIBUTION] = ATTRIBUTION return attrs attrs[ATTR_WATER_BODY_TYPE] = self._data["water-body-type"] attrs[ATTR_STATION] = self._data["name"] attrs[ATTR_STATION_UPDATE] = self._data["parameters"][self._condition][ "datetime" ] attrs[ATTR_ATTRIBUTION] = ATTRIBUTION for entry in CONDITION_DETAILS: attrs[entry.replace("-", "_")] = self._data["parameters"][self._condition][ entry ] return attrs @property def icon(self): """Icon to use in the frontend.""" return self._icon def update(self): """Get the latest data and update the state.""" self.hydro_data.update() self._data = self.hydro_data.data if self._data is None: self._state = None else: self._state = self._data["parameters"][self._condition]["value"] class HydrologicalData: """The Class for handling the data retrieval.""" def __init__(self, station): """Initialize the data object.""" self.station = station self.data = None @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest data.""" shd = SwissHydroData() self.data = shd.get_station(self.station)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/swiss_hydrological_data/sensor.py
"""Support for MQTT vacuums.""" import functools import voluptuous as vol from homeassistant.components.vacuum import DOMAIN from homeassistant.helpers.reload import async_setup_reload_service from .. import DOMAIN as MQTT_DOMAIN, PLATFORMS from ..mixins import async_setup_entry_helper from .schema import CONF_SCHEMA, LEGACY, MQTT_VACUUM_SCHEMA, STATE from .schema_legacy import PLATFORM_SCHEMA_LEGACY, async_setup_entity_legacy from .schema_state import PLATFORM_SCHEMA_STATE, async_setup_entity_state def validate_mqtt_vacuum(value): """Validate MQTT vacuum schema.""" schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE} return schemas[value[CONF_SCHEMA]](value) PLATFORM_SCHEMA = vol.All( MQTT_VACUUM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA), validate_mqtt_vacuum ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up MQTT vacuum through configuration.yaml.""" await async_setup_reload_service(hass, MQTT_DOMAIN, PLATFORMS) await _async_setup_entity(async_add_entities, config) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up MQTT vacuum dynamically through MQTT discovery.""" setup = functools.partial( _async_setup_entity, async_add_entities, config_entry=config_entry ) await async_setup_entry_helper(hass, DOMAIN, setup, PLATFORM_SCHEMA) async def _async_setup_entity( async_add_entities, config, config_entry=None, discovery_data=None ): """Set up the MQTT vacuum.""" setup_entity = {LEGACY: async_setup_entity_legacy, STATE: async_setup_entity_state} await setup_entity[config[CONF_SCHEMA]]( config, async_add_entities, config_entry, discovery_data )
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/mqtt/vacuum/__init__.py
"""Reproduce an Input select state.""" from __future__ import annotations import asyncio import logging from types import MappingProxyType from typing import Any, Iterable from homeassistant.const import ATTR_ENTITY_ID from homeassistant.core import Context, HomeAssistant, State from . import ( ATTR_OPTION, ATTR_OPTIONS, DOMAIN, SERVICE_SELECT_OPTION, SERVICE_SET_OPTIONS, ) ATTR_GROUP = [ATTR_OPTION, ATTR_OPTIONS] _LOGGER = logging.getLogger(__name__) async def _async_reproduce_state( hass: HomeAssistant, state: State, *, context: Context | None = None, reproduce_options: dict[str, Any] | None = None, ) -> None: """Reproduce a single state.""" cur_state = hass.states.get(state.entity_id) # Return if we can't find entity if cur_state is None: _LOGGER.warning("Unable to find entity %s", state.entity_id) return # Return if we are already at the right state. if cur_state.state == state.state and all( check_attr_equal(cur_state.attributes, state.attributes, attr) for attr in ATTR_GROUP ): return # Set service data service_data = {ATTR_ENTITY_ID: state.entity_id} # If options are specified, call SERVICE_SET_OPTIONS if ATTR_OPTIONS in state.attributes: service = SERVICE_SET_OPTIONS service_data[ATTR_OPTIONS] = state.attributes[ATTR_OPTIONS] await hass.services.async_call( DOMAIN, service, service_data, context=context, blocking=True ) # Remove ATTR_OPTIONS from service_data so we can reuse service_data in next call del service_data[ATTR_OPTIONS] # Call SERVICE_SELECT_OPTION service = SERVICE_SELECT_OPTION service_data[ATTR_OPTION] = state.state await hass.services.async_call( DOMAIN, service, service_data, context=context, blocking=True ) async def async_reproduce_states( hass: HomeAssistant, states: Iterable[State], *, context: Context | None = None, reproduce_options: dict[str, Any] | None = None, ) -> None: """Reproduce Input select states.""" # Reproduce states in parallel. await asyncio.gather( *( _async_reproduce_state( hass, state, context=context, reproduce_options=reproduce_options ) for state in states ) ) def check_attr_equal( attr1: MappingProxyType, attr2: MappingProxyType, attr_str: str ) -> bool: """Return true if the given attributes are equal.""" return attr1.get(attr_str) == attr2.get(attr_str)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/input_select/reproduce_state.py
"""Support for OpenTherm Gateway devices.""" import asyncio from datetime import date, datetime import logging import pyotgw import pyotgw.vars as gw_vars import voluptuous as vol from homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR from homeassistant.components.climate import DOMAIN as COMP_CLIMATE from homeassistant.components.sensor import DOMAIN as COMP_SENSOR from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( ATTR_DATE, ATTR_ID, ATTR_MODE, ATTR_TEMPERATURE, ATTR_TIME, CONF_DEVICE, CONF_ID, CONF_NAME, EVENT_HOMEASSISTANT_STOP, PRECISION_HALVES, PRECISION_TENTHS, PRECISION_WHOLE, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.device_registry import ( async_get_registry as async_get_dev_reg, ) from homeassistant.helpers.dispatcher import async_dispatcher_send from .const import ( ATTR_CH_OVRD, ATTR_DHW_OVRD, ATTR_GW_ID, ATTR_LEVEL, CONF_CLIMATE, CONF_FLOOR_TEMP, CONF_PRECISION, CONF_READ_PRECISION, CONF_SET_PRECISION, DATA_GATEWAYS, DATA_OPENTHERM_GW, DOMAIN, SERVICE_RESET_GATEWAY, SERVICE_SET_CH_OVRD, SERVICE_SET_CLOCK, SERVICE_SET_CONTROL_SETPOINT, SERVICE_SET_GPIO_MODE, SERVICE_SET_HOT_WATER_OVRD, SERVICE_SET_HOT_WATER_SETPOINT, SERVICE_SET_LED_MODE, SERVICE_SET_MAX_MOD, SERVICE_SET_OAT, SERVICE_SET_SB_TEMP, ) _LOGGER = logging.getLogger(__name__) CLIMATE_SCHEMA = vol.Schema( { vol.Optional(CONF_PRECISION): vol.In( [PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE] ), vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: cv.schema_with_slug_keys( { vol.Required(CONF_DEVICE): cv.string, vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA, vol.Optional(CONF_NAME): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) async def options_updated(hass, entry): """Handle options update.""" gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]] async_dispatcher_send(hass, gateway.options_update_signal, entry) async def async_setup_entry(hass, config_entry): """Set up the OpenTherm Gateway component.""" if DATA_OPENTHERM_GW not in hass.data: hass.data[DATA_OPENTHERM_GW] = {DATA_GATEWAYS: {}} gateway = OpenThermGatewayDevice(hass, config_entry) hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]] = gateway if config_entry.options.get(CONF_PRECISION): migrate_options = dict(config_entry.options) migrate_options.update( { CONF_READ_PRECISION: config_entry.options[CONF_PRECISION], CONF_SET_PRECISION: config_entry.options[CONF_PRECISION], } ) del migrate_options[CONF_PRECISION] hass.config_entries.async_update_entry(config_entry, options=migrate_options) config_entry.add_update_listener(options_updated) # Schedule directly on the loop to avoid blocking HA startup. hass.loop.create_task(gateway.connect_and_subscribe()) for comp in [COMP_BINARY_SENSOR, COMP_CLIMATE, COMP_SENSOR]: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, comp) ) register_services(hass) return True async def async_setup(hass, config): """Set up the OpenTherm Gateway component.""" if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config: conf = config[DOMAIN] for device_id, device_config in conf.items(): device_config[CONF_ID] = device_id hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=device_config ) ) return True def register_services(hass): """Register services for the component.""" service_reset_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ) } ) service_set_central_heating_ovrd_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_CH_OVRD): cv.boolean, } ) service_set_clock_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Optional(ATTR_DATE, default=date.today()): cv.date, vol.Optional(ATTR_TIME, default=datetime.now().time()): cv.time, } ) service_set_control_setpoint_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_TEMPERATURE): vol.All( vol.Coerce(float), vol.Range(min=0, max=90) ), } ) service_set_hot_water_setpoint_schema = service_set_control_setpoint_schema service_set_hot_water_ovrd_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_DHW_OVRD): vol.Any( vol.Equal("A"), vol.All(vol.Coerce(int), vol.Range(min=0, max=1)) ), } ) service_set_gpio_mode_schema = vol.Schema( vol.Any( vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_ID): vol.Equal("A"), vol.Required(ATTR_MODE): vol.All( vol.Coerce(int), vol.Range(min=0, max=6) ), } ), vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_ID): vol.Equal("B"), vol.Required(ATTR_MODE): vol.All( vol.Coerce(int), vol.Range(min=0, max=7) ), } ), ) ) service_set_led_mode_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_ID): vol.In("ABCDEF"), vol.Required(ATTR_MODE): vol.In("RXTBOFHWCEMP"), } ) service_set_max_mod_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_LEVEL): vol.All( vol.Coerce(int), vol.Range(min=-1, max=100) ), } ) service_set_oat_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_TEMPERATURE): vol.All( vol.Coerce(float), vol.Range(min=-40, max=99) ), } ) service_set_sb_temp_schema = vol.Schema( { vol.Required(ATTR_GW_ID): vol.All( cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS]) ), vol.Required(ATTR_TEMPERATURE): vol.All( vol.Coerce(float), vol.Range(min=0, max=30) ), } ) async def reset_gateway(call): """Reset the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] mode_rst = gw_vars.OTGW_MODE_RESET status = await gw_dev.gateway.set_mode(mode_rst) gw_dev.status = status async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway, service_reset_schema ) async def set_ch_ovrd(call): """Set the central heating override on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] await gw_dev.gateway.set_ch_enable_bit(1 if call.data[ATTR_CH_OVRD] else 0) hass.services.async_register( DOMAIN, SERVICE_SET_CH_OVRD, set_ch_ovrd, service_set_central_heating_ovrd_schema, ) async def set_control_setpoint(call): """Set the control setpoint on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_CONTROL_SETPOINT value = await gw_dev.gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_CONTROL_SETPOINT, set_control_setpoint, service_set_control_setpoint_schema, ) async def set_dhw_ovrd(call): """Set the domestic hot water override on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.OTGW_DHW_OVRD value = await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_HOT_WATER_OVRD, set_dhw_ovrd, service_set_hot_water_ovrd_schema, ) async def set_dhw_setpoint(call): """Set the domestic hot water setpoint on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_DHW_SETPOINT value = await gw_dev.gateway.set_dhw_setpoint(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_HOT_WATER_SETPOINT, set_dhw_setpoint, service_set_hot_water_setpoint_schema, ) async def set_device_clock(call): """Set the clock on the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] attr_date = call.data[ATTR_DATE] attr_time = call.data[ATTR_TIME] await gw_dev.gateway.set_clock(datetime.combine(attr_date, attr_time)) hass.services.async_register( DOMAIN, SERVICE_SET_CLOCK, set_device_clock, service_set_clock_schema ) async def set_gpio_mode(call): """Set the OpenTherm Gateway GPIO modes.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gpio_id = call.data[ATTR_ID] gpio_mode = call.data[ATTR_MODE] mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode) gpio_var = getattr(gw_vars, f"OTGW_GPIO_{gpio_id}") gw_dev.status.update({gpio_var: mode}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, service_set_gpio_mode_schema ) async def set_led_mode(call): """Set the OpenTherm Gateway LED modes.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] led_id = call.data[ATTR_ID] led_mode = call.data[ATTR_MODE] mode = await gw_dev.gateway.set_led_mode(led_id, led_mode) led_var = getattr(gw_vars, f"OTGW_LED_{led_id}") gw_dev.status.update({led_var: mode}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, service_set_led_mode_schema ) async def set_max_mod(call): """Set the max modulation level.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD level = call.data[ATTR_LEVEL] if level == -1: # Backend only clears setting on non-numeric values. level = "-" value = await gw_dev.gateway.set_max_relative_mod(level) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, service_set_max_mod_schema ) async def set_outside_temp(call): """Provide the outside temperature to the OpenTherm Gateway.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_OUTSIDE_TEMP value = await gw_dev.gateway.set_outside_temp(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_OAT, set_outside_temp, service_set_oat_schema ) async def set_setback_temp(call): """Set the OpenTherm Gateway SetBack temperature.""" gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]] gw_var = gw_vars.OTGW_SB_TEMP value = await gw_dev.gateway.set_setback_temp(call.data[ATTR_TEMPERATURE]) gw_dev.status.update({gw_var: value}) async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status) hass.services.async_register( DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, service_set_sb_temp_schema ) async def async_unload_entry(hass, entry): """Cleanup and disconnect from gateway.""" await asyncio.gather( hass.config_entries.async_forward_entry_unload(entry, COMP_BINARY_SENSOR), hass.config_entries.async_forward_entry_unload(entry, COMP_CLIMATE), hass.config_entries.async_forward_entry_unload(entry, COMP_SENSOR), ) gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]] await gateway.cleanup() return True class OpenThermGatewayDevice: """OpenTherm Gateway device class.""" def __init__(self, hass, config_entry): """Initialize the OpenTherm Gateway.""" self.hass = hass self.device_path = config_entry.data[CONF_DEVICE] self.gw_id = config_entry.data[CONF_ID] self.name = config_entry.data[CONF_NAME] self.climate_config = config_entry.options self.config_entry_id = config_entry.entry_id self.status = {} self.update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_update" self.options_update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_options_update" self.gateway = pyotgw.pyotgw() self.gw_version = None async def cleanup(self, event=None): """Reset overrides on the gateway.""" await self.gateway.set_control_setpoint(0) await self.gateway.set_max_relative_mod("-") await self.gateway.disconnect() async def connect_and_subscribe(self): """Connect to serial device and subscribe report handler.""" self.status = await self.gateway.connect(self.hass.loop, self.device_path) version_string = self.status[gw_vars.OTGW].get(gw_vars.OTGW_ABOUT) self.gw_version = version_string[18:] if version_string else None _LOGGER.debug( "Connected to OpenTherm Gateway %s at %s", self.gw_version, self.device_path ) dev_reg = await async_get_dev_reg(self.hass) gw_dev = dev_reg.async_get_or_create( config_entry_id=self.config_entry_id, identifiers={(DOMAIN, self.gw_id)}, name=self.name, manufacturer="Schelte Bron", model="OpenTherm Gateway", sw_version=self.gw_version, ) if gw_dev.sw_version != self.gw_version: dev_reg.async_update_device(gw_dev.id, sw_version=self.gw_version) self.hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.cleanup) async def handle_report(status): """Handle reports from the OpenTherm Gateway.""" _LOGGER.debug("Received report: %s", status) self.status = status async_dispatcher_send(self.hass, self.update_signal, status) self.gateway.subscribe(handle_report)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/opentherm_gw/__init__.py
"""Constants for the AVM Fritz!Box integration.""" import logging ATTR_STATE_BATTERY_LOW = "battery_low" ATTR_STATE_DEVICE_LOCKED = "device_locked" ATTR_STATE_HOLIDAY_MODE = "holiday_mode" ATTR_STATE_LOCKED = "locked" ATTR_STATE_SUMMER_MODE = "summer_mode" ATTR_STATE_WINDOW_OPEN = "window_open" ATTR_TEMPERATURE_UNIT = "temperature_unit" ATTR_TOTAL_CONSUMPTION = "total_consumption" ATTR_TOTAL_CONSUMPTION_UNIT = "total_consumption_unit" CONF_CONNECTIONS = "connections" DEFAULT_HOST = "fritz.box" DEFAULT_USERNAME = "admin" DOMAIN = "fritzbox" LOGGER = logging.getLogger(__package__) PLATFORMS = ["binary_sensor", "climate", "switch", "sensor"]
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/fritzbox/const.py
"""Support for ESPHome lights.""" from __future__ import annotations from aioesphomeapi import LightInfo, LightState from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, FLASH_LONG, FLASH_SHORT, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType import homeassistant.util.color as color_util from . import EsphomeEntity, esphome_state_property, platform_async_setup_entry FLASH_LENGTHS = {FLASH_SHORT: 2, FLASH_LONG: 10} async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up ESPHome lights based on a config entry.""" await platform_async_setup_entry( hass, entry, async_add_entities, component_key="light", info_type=LightInfo, entity_type=EsphomeLight, state_type=LightState, ) class EsphomeLight(EsphomeEntity, LightEntity): """A switch implementation for ESPHome.""" @property def _static_info(self) -> LightInfo: return super()._static_info @property def _state(self) -> LightState | None: return super()._state # https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property # pylint: disable=invalid-overridden-method @esphome_state_property def is_on(self) -> bool | None: """Return true if the switch is on.""" return self._state.state async def async_turn_on(self, **kwargs) -> None: """Turn the entity on.""" data = {"key": self._static_info.key, "state": True} if ATTR_HS_COLOR in kwargs: hue, sat = kwargs[ATTR_HS_COLOR] red, green, blue = color_util.color_hsv_to_RGB(hue, sat, 100) data["rgb"] = (red / 255, green / 255, blue / 255) if ATTR_FLASH in kwargs: data["flash_length"] = FLASH_LENGTHS[kwargs[ATTR_FLASH]] if ATTR_TRANSITION in kwargs: data["transition_length"] = kwargs[ATTR_TRANSITION] if ATTR_BRIGHTNESS in kwargs: data["brightness"] = kwargs[ATTR_BRIGHTNESS] / 255 if ATTR_COLOR_TEMP in kwargs: data["color_temperature"] = kwargs[ATTR_COLOR_TEMP] if ATTR_EFFECT in kwargs: data["effect"] = kwargs[ATTR_EFFECT] if ATTR_WHITE_VALUE in kwargs: data["white"] = kwargs[ATTR_WHITE_VALUE] / 255 await self._client.light_command(**data) async def async_turn_off(self, **kwargs) -> None: """Turn the entity off.""" data = {"key": self._static_info.key, "state": False} if ATTR_FLASH in kwargs: data["flash_length"] = FLASH_LENGTHS[kwargs[ATTR_FLASH]] if ATTR_TRANSITION in kwargs: data["transition_length"] = kwargs[ATTR_TRANSITION] await self._client.light_command(**data) @esphome_state_property def brightness(self) -> int | None: """Return the brightness of this light between 0..255.""" return round(self._state.brightness * 255) @esphome_state_property def hs_color(self) -> tuple[float, float] | None: """Return the hue and saturation color value [float, float].""" return color_util.color_RGB_to_hs( self._state.red * 255, self._state.green * 255, self._state.blue * 255 ) @esphome_state_property def color_temp(self) -> float | None: """Return the CT color value in mireds.""" return self._state.color_temperature @esphome_state_property def white_value(self) -> int | None: """Return the white value of this light between 0..255.""" return round(self._state.white * 255) @esphome_state_property def effect(self) -> str | None: """Return the current effect.""" return self._state.effect @property def supported_features(self) -> int: """Flag supported features.""" flags = SUPPORT_FLASH if self._static_info.supports_brightness: flags |= SUPPORT_BRIGHTNESS flags |= SUPPORT_TRANSITION if self._static_info.supports_rgb: flags |= SUPPORT_COLOR if self._static_info.supports_white_value: flags |= SUPPORT_WHITE_VALUE if self._static_info.supports_color_temperature: flags |= SUPPORT_COLOR_TEMP if self._static_info.effects: flags |= SUPPORT_EFFECT return flags @property def effect_list(self) -> list[str]: """Return the list of supported effects.""" return self._static_info.effects @property def min_mireds(self) -> float: """Return the coldest color_temp that this light supports.""" return self._static_info.min_mireds @property def max_mireds(self) -> float: """Return the warmest color_temp that this light supports.""" return self._static_info.max_mireds
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/esphome/light.py
"""This platform provides support for sensor data from RainMachine.""" from functools import partial from typing import Callable from regenmaschine.controller import Controller from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import TEMP_CELSIUS, VOLUME_CUBIC_METERS from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from . import RainMachineEntity from .const import ( DATA_CONTROLLER, DATA_COORDINATOR, DATA_PROVISION_SETTINGS, DATA_RESTRICTIONS_UNIVERSAL, DOMAIN, ) TYPE_FLOW_SENSOR_CLICK_M3 = "flow_sensor_clicks_cubic_meter" TYPE_FLOW_SENSOR_CONSUMED_LITERS = "flow_sensor_consumed_liters" TYPE_FLOW_SENSOR_START_INDEX = "flow_sensor_start_index" TYPE_FLOW_SENSOR_WATERING_CLICKS = "flow_sensor_watering_clicks" TYPE_FREEZE_TEMP = "freeze_protect_temp" SENSORS = { TYPE_FLOW_SENSOR_CLICK_M3: ( "Flow Sensor Clicks", "mdi:water-pump", f"clicks/{VOLUME_CUBIC_METERS}", None, False, DATA_PROVISION_SETTINGS, ), TYPE_FLOW_SENSOR_CONSUMED_LITERS: ( "Flow Sensor Consumed Liters", "mdi:water-pump", "liter", None, False, DATA_PROVISION_SETTINGS, ), TYPE_FLOW_SENSOR_START_INDEX: ( "Flow Sensor Start Index", "mdi:water-pump", "index", None, False, DATA_PROVISION_SETTINGS, ), TYPE_FLOW_SENSOR_WATERING_CLICKS: ( "Flow Sensor Clicks", "mdi:water-pump", "clicks", None, False, DATA_PROVISION_SETTINGS, ), TYPE_FREEZE_TEMP: ( "Freeze Protect Temperature", "mdi:thermometer", TEMP_CELSIUS, "temperature", True, DATA_RESTRICTIONS_UNIVERSAL, ), } async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable ) -> None: """Set up RainMachine sensors based on a config entry.""" controller = hass.data[DOMAIN][DATA_CONTROLLER][entry.entry_id] coordinators = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id] @callback def async_get_sensor(api_category: str) -> partial: """Generate the appropriate sensor object for an API category.""" if api_category == DATA_PROVISION_SETTINGS: return partial( ProvisionSettingsSensor, coordinators[DATA_PROVISION_SETTINGS], ) return partial( UniversalRestrictionsSensor, coordinators[DATA_RESTRICTIONS_UNIVERSAL], ) async_add_entities( [ async_get_sensor(api_category)( controller, sensor_type, name, icon, unit, device_class, enabled_by_default, ) for ( sensor_type, (name, icon, unit, device_class, enabled_by_default, api_category), ) in SENSORS.items() ] ) class RainMachineSensor(RainMachineEntity, SensorEntity): """Define a general RainMachine sensor.""" def __init__( self, coordinator: DataUpdateCoordinator, controller: Controller, sensor_type: str, name: str, icon: str, unit: str, device_class: str, enabled_by_default: bool, ) -> None: """Initialize.""" super().__init__(coordinator, controller) self._device_class = device_class self._enabled_by_default = enabled_by_default self._icon = icon self._name = name self._sensor_type = sensor_type self._state = None self._unit = unit @property def entity_registry_enabled_default(self) -> bool: """Determine whether an entity is enabled by default.""" return self._enabled_by_default @property def icon(self) -> str: """Return the icon.""" return self._icon @property def state(self) -> str: """Return the name of the entity.""" return self._state @property def unique_id(self) -> str: """Return a unique, Home Assistant friendly identifier for this entity.""" return f"{self._unique_id}_{self._sensor_type}" @property def unit_of_measurement(self) -> str: """Return the unit the value is expressed in.""" return self._unit class ProvisionSettingsSensor(RainMachineSensor): """Define a sensor that handles provisioning data.""" @callback def update_from_latest_data(self) -> None: """Update the state.""" if self._sensor_type == TYPE_FLOW_SENSOR_CLICK_M3: self._state = self.coordinator.data["system"].get( "flowSensorClicksPerCubicMeter" ) elif self._sensor_type == TYPE_FLOW_SENSOR_CONSUMED_LITERS: clicks = self.coordinator.data["system"].get("flowSensorWateringClicks") clicks_per_m3 = self.coordinator.data["system"].get( "flowSensorClicksPerCubicMeter" ) if clicks and clicks_per_m3: self._state = (clicks * 1000) / clicks_per_m3 else: self._state = None elif self._sensor_type == TYPE_FLOW_SENSOR_START_INDEX: self._state = self.coordinator.data["system"].get("flowSensorStartIndex") elif self._sensor_type == TYPE_FLOW_SENSOR_WATERING_CLICKS: self._state = self.coordinator.data["system"].get( "flowSensorWateringClicks" ) class UniversalRestrictionsSensor(RainMachineSensor): """Define a sensor that handles universal restrictions data.""" @callback def update_from_latest_data(self) -> None: """Update the state.""" if self._sensor_type == TYPE_FREEZE_TEMP: self._state = self.coordinator.data["freezeProtectTemp"]
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/rainmachine/sensor.py
"""Define a config flow manager for AirVisual.""" import asyncio from pyairvisual import CloudAPI, NodeSamba from pyairvisual.errors import ( AirVisualError, InvalidKeyError, NodeProError, NotFoundError, ) import voluptuous as vol from homeassistant import config_entries from homeassistant.const import ( CONF_API_KEY, CONF_IP_ADDRESS, CONF_LATITUDE, CONF_LONGITUDE, CONF_PASSWORD, CONF_SHOW_ON_MAP, CONF_STATE, ) from homeassistant.core import callback from homeassistant.helpers import aiohttp_client, config_validation as cv from . import async_get_geography_id from .const import ( CONF_CITY, CONF_COUNTRY, CONF_INTEGRATION_TYPE, DOMAIN, INTEGRATION_TYPE_GEOGRAPHY_COORDS, INTEGRATION_TYPE_GEOGRAPHY_NAME, INTEGRATION_TYPE_NODE_PRO, LOGGER, ) API_KEY_DATA_SCHEMA = vol.Schema({vol.Required(CONF_API_KEY): cv.string}) GEOGRAPHY_NAME_SCHEMA = API_KEY_DATA_SCHEMA.extend( { vol.Required(CONF_CITY): cv.string, vol.Required(CONF_STATE): cv.string, vol.Required(CONF_COUNTRY): cv.string, } ) NODE_PRO_SCHEMA = vol.Schema( {vol.Required(CONF_IP_ADDRESS): str, vol.Required(CONF_PASSWORD): cv.string} ) PICK_INTEGRATION_TYPE_SCHEMA = vol.Schema( { vol.Required("type"): vol.In( [ INTEGRATION_TYPE_GEOGRAPHY_COORDS, INTEGRATION_TYPE_GEOGRAPHY_NAME, INTEGRATION_TYPE_NODE_PRO, ] ) } ) class AirVisualFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle an AirVisual config flow.""" VERSION = 2 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL def __init__(self): """Initialize the config flow.""" self._entry_data_for_reauth = None self._geo_id = None @property def geography_coords_schema(self): """Return the data schema for the cloud API.""" return API_KEY_DATA_SCHEMA.extend( { vol.Required( CONF_LATITUDE, default=self.hass.config.latitude ): cv.latitude, vol.Required( CONF_LONGITUDE, default=self.hass.config.longitude ): cv.longitude, } ) async def _async_finish_geography(self, user_input, integration_type): """Validate a Cloud API key.""" websession = aiohttp_client.async_get_clientsession(self.hass) cloud_api = CloudAPI(user_input[CONF_API_KEY], session=websession) # If this is the first (and only the first) time we've seen this API key, check # that it's valid: valid_keys = self.hass.data.setdefault("airvisual_checked_api_keys", set()) valid_keys_lock = self.hass.data.setdefault( "airvisual_checked_api_keys_lock", asyncio.Lock() ) if integration_type == INTEGRATION_TYPE_GEOGRAPHY_COORDS: coro = cloud_api.air_quality.nearest_city() error_schema = self.geography_coords_schema error_step = "geography_by_coords" else: coro = cloud_api.air_quality.city( user_input[CONF_CITY], user_input[CONF_STATE], user_input[CONF_COUNTRY] ) error_schema = GEOGRAPHY_NAME_SCHEMA error_step = "geography_by_name" async with valid_keys_lock: if user_input[CONF_API_KEY] not in valid_keys: try: await coro except InvalidKeyError: return self.async_show_form( step_id=error_step, data_schema=error_schema, errors={CONF_API_KEY: "invalid_api_key"}, ) except NotFoundError: return self.async_show_form( step_id=error_step, data_schema=error_schema, errors={CONF_CITY: "location_not_found"}, ) except AirVisualError as err: LOGGER.error(err) return self.async_show_form( step_id=error_step, data_schema=error_schema, errors={"base": "unknown"}, ) valid_keys.add(user_input[CONF_API_KEY]) existing_entry = await self.async_set_unique_id(self._geo_id) if existing_entry: self.hass.config_entries.async_update_entry(existing_entry, data=user_input) return self.async_abort(reason="reauth_successful") return self.async_create_entry( title=f"Cloud API ({self._geo_id})", data={**user_input, CONF_INTEGRATION_TYPE: integration_type}, ) async def _async_init_geography(self, user_input, integration_type): """Handle the initialization of the integration via the cloud API.""" self._geo_id = async_get_geography_id(user_input) await self._async_set_unique_id(self._geo_id) self._abort_if_unique_id_configured() return await self._async_finish_geography(user_input, integration_type) async def _async_set_unique_id(self, unique_id): """Set the unique ID of the config flow and abort if it already exists.""" await self.async_set_unique_id(unique_id) self._abort_if_unique_id_configured() @staticmethod @callback def async_get_options_flow(config_entry): """Define the config flow to handle options.""" return AirVisualOptionsFlowHandler(config_entry) async def async_step_geography_by_coords(self, user_input=None): """Handle the initialization of the cloud API based on latitude/longitude.""" if not user_input: return self.async_show_form( step_id="geography_by_coords", data_schema=self.geography_coords_schema ) return await self._async_init_geography( user_input, INTEGRATION_TYPE_GEOGRAPHY_COORDS ) async def async_step_geography_by_name(self, user_input=None): """Handle the initialization of the cloud API based on city/state/country.""" if not user_input: return self.async_show_form( step_id="geography_by_name", data_schema=GEOGRAPHY_NAME_SCHEMA ) return await self._async_init_geography( user_input, INTEGRATION_TYPE_GEOGRAPHY_NAME ) async def async_step_node_pro(self, user_input=None): """Handle the initialization of the integration with a Node/Pro.""" if not user_input: return self.async_show_form(step_id="node_pro", data_schema=NODE_PRO_SCHEMA) await self._async_set_unique_id(user_input[CONF_IP_ADDRESS]) node = NodeSamba(user_input[CONF_IP_ADDRESS], user_input[CONF_PASSWORD]) try: await node.async_connect() except NodeProError as err: LOGGER.error("Error connecting to Node/Pro unit: %s", err) return self.async_show_form( step_id="node_pro", data_schema=NODE_PRO_SCHEMA, errors={CONF_IP_ADDRESS: "cannot_connect"}, ) await node.async_disconnect() return self.async_create_entry( title=f"Node/Pro ({user_input[CONF_IP_ADDRESS]})", data={**user_input, CONF_INTEGRATION_TYPE: INTEGRATION_TYPE_NODE_PRO}, ) async def async_step_reauth(self, data): """Handle configuration by re-auth.""" self._entry_data_for_reauth = data self._geo_id = async_get_geography_id(data) return await self.async_step_reauth_confirm() async def async_step_reauth_confirm(self, user_input=None): """Handle re-auth completion.""" if not user_input: return self.async_show_form( step_id="reauth_confirm", data_schema=API_KEY_DATA_SCHEMA ) conf = {CONF_API_KEY: user_input[CONF_API_KEY], **self._entry_data_for_reauth} return await self._async_finish_geography( conf, self._entry_data_for_reauth[CONF_INTEGRATION_TYPE] ) async def async_step_user(self, user_input=None): """Handle the start of the config flow.""" if not user_input: return self.async_show_form( step_id="user", data_schema=PICK_INTEGRATION_TYPE_SCHEMA ) if user_input["type"] == INTEGRATION_TYPE_GEOGRAPHY_COORDS: return await self.async_step_geography_by_coords() if user_input["type"] == INTEGRATION_TYPE_GEOGRAPHY_NAME: return await self.async_step_geography_by_name() return await self.async_step_node_pro() class AirVisualOptionsFlowHandler(config_entries.OptionsFlow): """Handle an AirVisual options flow.""" def __init__(self, config_entry): """Initialize.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="init", data_schema=vol.Schema( { vol.Required( CONF_SHOW_ON_MAP, default=self.config_entry.options.get(CONF_SHOW_ON_MAP), ): bool } ), )
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/airvisual/config_flow.py
"""Brother helpers functions.""" import logging import pysnmp.hlapi.asyncio as hlapi from pysnmp.hlapi.asyncio.cmdgen import lcd from homeassistant.const import EVENT_HOMEASSISTANT_STOP from homeassistant.core import callback from homeassistant.helpers import singleton from .const import DOMAIN, SNMP _LOGGER = logging.getLogger(__name__) @singleton.singleton("snmp_engine") def get_snmp_engine(hass): """Get SNMP engine.""" _LOGGER.debug("Creating SNMP engine") snmp_engine = hlapi.SnmpEngine() @callback def shutdown_listener(ev): if hass.data.get(DOMAIN): _LOGGER.debug("Unconfiguring SNMP engine") lcd.unconfigure(hass.data[DOMAIN][SNMP], None) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown_listener) return snmp_engine
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/brother/utils.py
"""The DirecTV integration.""" from __future__ import annotations import asyncio from datetime import timedelta from typing import Any from directv import DIRECTV, DIRECTVError from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_NAME, CONF_HOST from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import config_validation as cv from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.entity import Entity from .const import ( ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, ATTR_SOFTWARE_VERSION, ATTR_VIA_DEVICE, DOMAIN, ) CONFIG_SCHEMA = cv.deprecated(DOMAIN) PLATFORMS = ["media_player", "remote"] SCAN_INTERVAL = timedelta(seconds=30) async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up DirecTV from a config entry.""" dtv = DIRECTV(entry.data[CONF_HOST], session=async_get_clientsession(hass)) try: await dtv.update() except DIRECTVError as err: raise ConfigEntryNotReady from err hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = dtv for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, platform) ) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, platform) for platform in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok class DIRECTVEntity(Entity): """Defines a base DirecTV entity.""" def __init__(self, *, dtv: DIRECTV, name: str, address: str = "0") -> None: """Initialize the DirecTV entity.""" self._address = address self._device_id = address if address != "0" else dtv.device.info.receiver_id self._is_client = address != "0" self._name = name self.dtv = dtv @property def name(self) -> str: """Return the name of the entity.""" return self._name @property def device_info(self) -> dict[str, Any]: """Return device information about this DirecTV receiver.""" return { ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)}, ATTR_NAME: self.name, ATTR_MANUFACTURER: self.dtv.device.info.brand, ATTR_MODEL: None, ATTR_SOFTWARE_VERSION: self.dtv.device.info.version, ATTR_VIA_DEVICE: (DOMAIN, self.dtv.device.info.receiver_id), }
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/directv/__init__.py
"""Support for interface with a Gree climate systems.""" from __future__ import annotations from homeassistant.components.switch import DEVICE_CLASS_SWITCH, SwitchEntity from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import COORDINATOR, DOMAIN async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Gree HVAC device from a config entry.""" async_add_entities( [ GreeSwitchEntity(coordinator) for coordinator in hass.data[DOMAIN][COORDINATOR] ] ) class GreeSwitchEntity(CoordinatorEntity, SwitchEntity): """Representation of a Gree HVAC device.""" def __init__(self, coordinator): """Initialize the Gree device.""" super().__init__(coordinator) self._name = coordinator.device.device_info.name + " Panel Light" self._mac = coordinator.device.device_info.mac @property def name(self) -> str: """Return the name of the device.""" return self._name @property def unique_id(self) -> str: """Return a unique id for the device.""" return f"{self._mac}-panel-light" @property def icon(self) -> str | None: """Return the icon for the device.""" return "mdi:lightbulb" @property def device_info(self): """Return device specific attributes.""" return { "name": self._name, "identifiers": {(DOMAIN, self._mac)}, "manufacturer": "Gree", "connections": {(CONNECTION_NETWORK_MAC, self._mac)}, } @property def device_class(self): """Return the class of this device, from component DEVICE_CLASSES.""" return DEVICE_CLASS_SWITCH @property def is_on(self) -> bool: """Return if the light is turned on.""" return self.coordinator.device.light async def async_turn_on(self, **kwargs): """Turn the entity on.""" self.coordinator.device.light = True await self.coordinator.push_state_update() self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Turn the entity off.""" self.coordinator.device.light = False await self.coordinator.push_state_update() self.async_write_ha_state()
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/gree/switch.py
"""Implement the Google Smart Home traits.""" from __future__ import annotations import logging from homeassistant.components import ( alarm_control_panel, binary_sensor, camera, cover, fan, group, input_boolean, input_select, light, lock, media_player, scene, script, sensor, switch, vacuum, ) from homeassistant.components.climate import const as climate from homeassistant.components.humidifier import const as humidifier from homeassistant.const import ( ATTR_ASSUMED_STATE, ATTR_CODE, ATTR_DEVICE_CLASS, ATTR_ENTITY_ID, ATTR_MODE, ATTR_SUPPORTED_FEATURES, ATTR_TEMPERATURE, CAST_APP_ID_HOMEASSISTANT, SERVICE_ALARM_ARM_AWAY, SERVICE_ALARM_ARM_CUSTOM_BYPASS, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_NIGHT, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_CUSTOM_BYPASS, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED, STATE_IDLE, STATE_LOCKED, STATE_OFF, STATE_ON, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY, STATE_UNAVAILABLE, STATE_UNKNOWN, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) from homeassistant.core import DOMAIN as HA_DOMAIN from homeassistant.helpers.network import get_url from homeassistant.util import color as color_util, dt, temperature as temp_util from .const import ( CHALLENGE_ACK_NEEDED, CHALLENGE_FAILED_PIN_NEEDED, CHALLENGE_PIN_NEEDED, ERR_ALREADY_ARMED, ERR_ALREADY_DISARMED, ERR_ALREADY_STOPPED, ERR_CHALLENGE_NOT_SETUP, ERR_NOT_SUPPORTED, ERR_UNSUPPORTED_INPUT, ERR_VALUE_OUT_OF_RANGE, ) from .error import ChallengeNeeded, SmartHomeError _LOGGER = logging.getLogger(__name__) PREFIX_TRAITS = "action.devices.traits." TRAIT_CAMERA_STREAM = f"{PREFIX_TRAITS}CameraStream" TRAIT_ONOFF = f"{PREFIX_TRAITS}OnOff" TRAIT_DOCK = f"{PREFIX_TRAITS}Dock" TRAIT_STARTSTOP = f"{PREFIX_TRAITS}StartStop" TRAIT_BRIGHTNESS = f"{PREFIX_TRAITS}Brightness" TRAIT_COLOR_SETTING = f"{PREFIX_TRAITS}ColorSetting" TRAIT_SCENE = f"{PREFIX_TRAITS}Scene" TRAIT_TEMPERATURE_SETTING = f"{PREFIX_TRAITS}TemperatureSetting" TRAIT_LOCKUNLOCK = f"{PREFIX_TRAITS}LockUnlock" TRAIT_FANSPEED = f"{PREFIX_TRAITS}FanSpeed" TRAIT_MODES = f"{PREFIX_TRAITS}Modes" TRAIT_INPUTSELECTOR = f"{PREFIX_TRAITS}InputSelector" TRAIT_OPENCLOSE = f"{PREFIX_TRAITS}OpenClose" TRAIT_VOLUME = f"{PREFIX_TRAITS}Volume" TRAIT_ARMDISARM = f"{PREFIX_TRAITS}ArmDisarm" TRAIT_HUMIDITY_SETTING = f"{PREFIX_TRAITS}HumiditySetting" TRAIT_TRANSPORT_CONTROL = f"{PREFIX_TRAITS}TransportControl" TRAIT_MEDIA_STATE = f"{PREFIX_TRAITS}MediaState" PREFIX_COMMANDS = "action.devices.commands." COMMAND_ONOFF = f"{PREFIX_COMMANDS}OnOff" COMMAND_GET_CAMERA_STREAM = f"{PREFIX_COMMANDS}GetCameraStream" COMMAND_DOCK = f"{PREFIX_COMMANDS}Dock" COMMAND_STARTSTOP = f"{PREFIX_COMMANDS}StartStop" COMMAND_PAUSEUNPAUSE = f"{PREFIX_COMMANDS}PauseUnpause" COMMAND_BRIGHTNESS_ABSOLUTE = f"{PREFIX_COMMANDS}BrightnessAbsolute" COMMAND_COLOR_ABSOLUTE = f"{PREFIX_COMMANDS}ColorAbsolute" COMMAND_ACTIVATE_SCENE = f"{PREFIX_COMMANDS}ActivateScene" COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = ( f"{PREFIX_COMMANDS}ThermostatTemperatureSetpoint" ) COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = ( f"{PREFIX_COMMANDS}ThermostatTemperatureSetRange" ) COMMAND_THERMOSTAT_SET_MODE = f"{PREFIX_COMMANDS}ThermostatSetMode" COMMAND_LOCKUNLOCK = f"{PREFIX_COMMANDS}LockUnlock" COMMAND_FANSPEED = f"{PREFIX_COMMANDS}SetFanSpeed" COMMAND_MODES = f"{PREFIX_COMMANDS}SetModes" COMMAND_INPUT = f"{PREFIX_COMMANDS}SetInput" COMMAND_NEXT_INPUT = f"{PREFIX_COMMANDS}NextInput" COMMAND_PREVIOUS_INPUT = f"{PREFIX_COMMANDS}PreviousInput" COMMAND_OPENCLOSE = f"{PREFIX_COMMANDS}OpenClose" COMMAND_OPENCLOSE_RELATIVE = f"{PREFIX_COMMANDS}OpenCloseRelative" COMMAND_SET_VOLUME = f"{PREFIX_COMMANDS}setVolume" COMMAND_VOLUME_RELATIVE = f"{PREFIX_COMMANDS}volumeRelative" COMMAND_MUTE = f"{PREFIX_COMMANDS}mute" COMMAND_ARMDISARM = f"{PREFIX_COMMANDS}ArmDisarm" COMMAND_MEDIA_NEXT = f"{PREFIX_COMMANDS}mediaNext" COMMAND_MEDIA_PAUSE = f"{PREFIX_COMMANDS}mediaPause" COMMAND_MEDIA_PREVIOUS = f"{PREFIX_COMMANDS}mediaPrevious" COMMAND_MEDIA_RESUME = f"{PREFIX_COMMANDS}mediaResume" COMMAND_MEDIA_SEEK_RELATIVE = f"{PREFIX_COMMANDS}mediaSeekRelative" COMMAND_MEDIA_SEEK_TO_POSITION = f"{PREFIX_COMMANDS}mediaSeekToPosition" COMMAND_MEDIA_SHUFFLE = f"{PREFIX_COMMANDS}mediaShuffle" COMMAND_MEDIA_STOP = f"{PREFIX_COMMANDS}mediaStop" COMMAND_SET_HUMIDITY = f"{PREFIX_COMMANDS}SetHumidity" TRAITS = [] def register_trait(trait): """Decorate a function to register a trait.""" TRAITS.append(trait) return trait def _google_temp_unit(units): """Return Google temperature unit.""" if units == TEMP_FAHRENHEIT: return "F" return "C" def _next_selected(items: list[str], selected: str | None) -> str | None: """Return the next item in a item list starting at given value. If selected is missing in items, None is returned """ try: index = items.index(selected) except ValueError: return None next_item = 0 if index == len(items) - 1 else index + 1 return items[next_item] class _Trait: """Represents a Trait inside Google Assistant skill.""" commands = [] @staticmethod def might_2fa(domain, features, device_class): """Return if the trait might ask for 2FA.""" return False def __init__(self, hass, state, config): """Initialize a trait for a state.""" self.hass = hass self.state = state self.config = config def sync_attributes(self): """Return attributes for a sync request.""" raise NotImplementedError def query_attributes(self): """Return the attributes of this trait for this entity.""" raise NotImplementedError def can_execute(self, command, params): """Test if command can be executed.""" return command in self.commands async def execute(self, command, data, params, challenge): """Execute a trait command.""" raise NotImplementedError @register_trait class BrightnessTrait(_Trait): """Trait to control brightness of a device. https://developers.google.com/actions/smarthome/traits/brightness """ name = TRAIT_BRIGHTNESS commands = [COMMAND_BRIGHTNESS_ABSOLUTE] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == light.DOMAIN: return features & light.SUPPORT_BRIGHTNESS return False def sync_attributes(self): """Return brightness attributes for a sync request.""" return {} def query_attributes(self): """Return brightness query attributes.""" domain = self.state.domain response = {} if domain == light.DOMAIN: brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS) if brightness is not None: response["brightness"] = int(100 * (brightness / 255)) else: response["brightness"] = 0 return response async def execute(self, command, data, params, challenge): """Execute a brightness command.""" domain = self.state.domain if domain == light.DOMAIN: await self.hass.services.async_call( light.DOMAIN, light.SERVICE_TURN_ON, { ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_BRIGHTNESS_PCT: params["brightness"], }, blocking=True, context=data.context, ) @register_trait class CameraStreamTrait(_Trait): """Trait to stream from cameras. https://developers.google.com/actions/smarthome/traits/camerastream """ name = TRAIT_CAMERA_STREAM commands = [COMMAND_GET_CAMERA_STREAM] stream_info = None @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == camera.DOMAIN: return features & camera.SUPPORT_STREAM return False def sync_attributes(self): """Return stream attributes for a sync request.""" return { "cameraStreamSupportedProtocols": ["hls"], "cameraStreamNeedAuthToken": False, "cameraStreamNeedDrmEncryption": False, } def query_attributes(self): """Return camera stream attributes.""" return self.stream_info or {} async def execute(self, command, data, params, challenge): """Execute a get camera stream command.""" url = await self.hass.components.camera.async_request_stream( self.state.entity_id, "hls" ) self.stream_info = { "cameraStreamAccessUrl": f"{get_url(self.hass)}{url}", "cameraStreamReceiverAppId": CAST_APP_ID_HOMEASSISTANT, } @register_trait class OnOffTrait(_Trait): """Trait to offer basic on and off functionality. https://developers.google.com/actions/smarthome/traits/onoff """ name = TRAIT_ONOFF commands = [COMMAND_ONOFF] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" return domain in ( group.DOMAIN, input_boolean.DOMAIN, switch.DOMAIN, fan.DOMAIN, light.DOMAIN, media_player.DOMAIN, humidifier.DOMAIN, ) def sync_attributes(self): """Return OnOff attributes for a sync request.""" if self.state.attributes.get(ATTR_ASSUMED_STATE, False): return {"commandOnlyOnOff": True} return {} def query_attributes(self): """Return OnOff query attributes.""" return {"on": self.state.state not in (STATE_OFF, STATE_UNKNOWN)} async def execute(self, command, data, params, challenge): """Execute an OnOff command.""" domain = self.state.domain if domain == group.DOMAIN: service_domain = HA_DOMAIN service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF else: service_domain = domain service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF await self.hass.services.async_call( service_domain, service, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) @register_trait class ColorSettingTrait(_Trait): """Trait to offer color temperature functionality. https://developers.google.com/actions/smarthome/traits/colortemperature """ name = TRAIT_COLOR_SETTING commands = [COMMAND_COLOR_ABSOLUTE] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain != light.DOMAIN: return False return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR def sync_attributes(self): """Return color temperature attributes for a sync request.""" attrs = self.state.attributes features = attrs.get(ATTR_SUPPORTED_FEATURES, 0) response = {} if features & light.SUPPORT_COLOR: response["colorModel"] = "hsv" if features & light.SUPPORT_COLOR_TEMP: # Max Kelvin is Min Mireds K = 1000000 / mireds # Min Kelvin is Max Mireds K = 1000000 / mireds response["colorTemperatureRange"] = { "temperatureMaxK": color_util.color_temperature_mired_to_kelvin( attrs.get(light.ATTR_MIN_MIREDS) ), "temperatureMinK": color_util.color_temperature_mired_to_kelvin( attrs.get(light.ATTR_MAX_MIREDS) ), } return response def query_attributes(self): """Return color temperature query attributes.""" features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) color = {} if features & light.SUPPORT_COLOR: color_hs = self.state.attributes.get(light.ATTR_HS_COLOR) brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1) if color_hs is not None: color["spectrumHsv"] = { "hue": color_hs[0], "saturation": color_hs[1] / 100, "value": brightness / 255, } if features & light.SUPPORT_COLOR_TEMP: temp = self.state.attributes.get(light.ATTR_COLOR_TEMP) # Some faulty integrations might put 0 in here, raising exception. if temp == 0: _LOGGER.warning( "Entity %s has incorrect color temperature %s", self.state.entity_id, temp, ) elif temp is not None: color["temperatureK"] = color_util.color_temperature_mired_to_kelvin( temp ) response = {} if color: response["color"] = color return response async def execute(self, command, data, params, challenge): """Execute a color temperature command.""" if "temperature" in params["color"]: temp = color_util.color_temperature_kelvin_to_mired( params["color"]["temperature"] ) min_temp = self.state.attributes[light.ATTR_MIN_MIREDS] max_temp = self.state.attributes[light.ATTR_MAX_MIREDS] if temp < min_temp or temp > max_temp: raise SmartHomeError( ERR_VALUE_OUT_OF_RANGE, f"Temperature should be between {min_temp} and {max_temp}", ) await self.hass.services.async_call( light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp}, blocking=True, context=data.context, ) elif "spectrumRGB" in params["color"]: # Convert integer to hex format and left pad with 0's till length 6 hex_value = f"{params['color']['spectrumRGB']:06x}" color = color_util.color_RGB_to_hs( *color_util.rgb_hex_to_rgb_list(hex_value) ) await self.hass.services.async_call( light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color}, blocking=True, context=data.context, ) elif "spectrumHSV" in params["color"]: color = params["color"]["spectrumHSV"] saturation = color["saturation"] * 100 brightness = color["value"] * 255 await self.hass.services.async_call( light.DOMAIN, SERVICE_TURN_ON, { ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: [color["hue"], saturation], light.ATTR_BRIGHTNESS: brightness, }, blocking=True, context=data.context, ) @register_trait class SceneTrait(_Trait): """Trait to offer scene functionality. https://developers.google.com/actions/smarthome/traits/scene """ name = TRAIT_SCENE commands = [COMMAND_ACTIVATE_SCENE] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" return domain in (scene.DOMAIN, script.DOMAIN) def sync_attributes(self): """Return scene attributes for a sync request.""" # Neither supported domain can support sceneReversible return {} def query_attributes(self): """Return scene query attributes.""" return {} async def execute(self, command, data, params, challenge): """Execute a scene command.""" # Don't block for scripts as they can be slow. await self.hass.services.async_call( self.state.domain, SERVICE_TURN_ON, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=self.state.domain != script.DOMAIN, context=data.context, ) @register_trait class DockTrait(_Trait): """Trait to offer dock functionality. https://developers.google.com/actions/smarthome/traits/dock """ name = TRAIT_DOCK commands = [COMMAND_DOCK] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" return domain == vacuum.DOMAIN def sync_attributes(self): """Return dock attributes for a sync request.""" return {} def query_attributes(self): """Return dock query attributes.""" return {"isDocked": self.state.state == vacuum.STATE_DOCKED} async def execute(self, command, data, params, challenge): """Execute a dock command.""" await self.hass.services.async_call( self.state.domain, vacuum.SERVICE_RETURN_TO_BASE, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) @register_trait class StartStopTrait(_Trait): """Trait to offer StartStop functionality. https://developers.google.com/actions/smarthome/traits/startstop """ name = TRAIT_STARTSTOP commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == vacuum.DOMAIN: return True if domain == cover.DOMAIN and features & cover.SUPPORT_STOP: return True return False def sync_attributes(self): """Return StartStop attributes for a sync request.""" domain = self.state.domain if domain == vacuum.DOMAIN: return { "pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & vacuum.SUPPORT_PAUSE != 0 } if domain == cover.DOMAIN: return {} def query_attributes(self): """Return StartStop query attributes.""" domain = self.state.domain state = self.state.state if domain == vacuum.DOMAIN: return { "isRunning": state == vacuum.STATE_CLEANING, "isPaused": state == vacuum.STATE_PAUSED, } if domain == cover.DOMAIN: return {"isRunning": state in (cover.STATE_CLOSING, cover.STATE_OPENING)} async def execute(self, command, data, params, challenge): """Execute a StartStop command.""" domain = self.state.domain if domain == vacuum.DOMAIN: return await self._execute_vacuum(command, data, params, challenge) if domain == cover.DOMAIN: return await self._execute_cover(command, data, params, challenge) async def _execute_vacuum(self, command, data, params, challenge): """Execute a StartStop command.""" if command == COMMAND_STARTSTOP: if params["start"]: await self.hass.services.async_call( self.state.domain, vacuum.SERVICE_START, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) else: await self.hass.services.async_call( self.state.domain, vacuum.SERVICE_STOP, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) elif command == COMMAND_PAUSEUNPAUSE: if params["pause"]: await self.hass.services.async_call( self.state.domain, vacuum.SERVICE_PAUSE, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) else: await self.hass.services.async_call( self.state.domain, vacuum.SERVICE_START, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) async def _execute_cover(self, command, data, params, challenge): """Execute a StartStop command.""" if command == COMMAND_STARTSTOP: if params["start"] is False: if ( self.state.state in ( cover.STATE_CLOSING, cover.STATE_OPENING, ) or self.state.attributes.get(ATTR_ASSUMED_STATE) ): await self.hass.services.async_call( self.state.domain, cover.SERVICE_STOP_COVER, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) else: raise SmartHomeError( ERR_ALREADY_STOPPED, "Cover is already stopped" ) else: raise SmartHomeError( ERR_NOT_SUPPORTED, "Starting a cover is not supported" ) else: raise SmartHomeError( ERR_NOT_SUPPORTED, f"Command {command} is not supported" ) @register_trait class TemperatureSettingTrait(_Trait): """Trait to offer handling both temperature point and modes functionality. https://developers.google.com/actions/smarthome/traits/temperaturesetting """ name = TRAIT_TEMPERATURE_SETTING commands = [ COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT, COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE, COMMAND_THERMOSTAT_SET_MODE, ] # We do not support "on" as we are unable to know how to restore # the last mode. hvac_to_google = { climate.HVAC_MODE_HEAT: "heat", climate.HVAC_MODE_COOL: "cool", climate.HVAC_MODE_OFF: "off", climate.HVAC_MODE_AUTO: "auto", climate.HVAC_MODE_HEAT_COOL: "heatcool", climate.HVAC_MODE_FAN_ONLY: "fan-only", climate.HVAC_MODE_DRY: "dry", } google_to_hvac = {value: key for key, value in hvac_to_google.items()} preset_to_google = {climate.PRESET_ECO: "eco"} google_to_preset = {value: key for key, value in preset_to_google.items()} @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == climate.DOMAIN: return True return ( domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE ) @property def climate_google_modes(self): """Return supported Google modes.""" modes = [] attrs = self.state.attributes for mode in attrs.get(climate.ATTR_HVAC_MODES, []): google_mode = self.hvac_to_google.get(mode) if google_mode and google_mode not in modes: modes.append(google_mode) for preset in attrs.get(climate.ATTR_PRESET_MODES, []): google_mode = self.preset_to_google.get(preset) if google_mode and google_mode not in modes: modes.append(google_mode) return modes def sync_attributes(self): """Return temperature point and modes attributes for a sync request.""" response = {} attrs = self.state.attributes domain = self.state.domain response["thermostatTemperatureUnit"] = _google_temp_unit( self.hass.config.units.temperature_unit ) if domain == sensor.DOMAIN: device_class = attrs.get(ATTR_DEVICE_CLASS) if device_class == sensor.DEVICE_CLASS_TEMPERATURE: response["queryOnlyTemperatureSetting"] = True elif domain == climate.DOMAIN: modes = self.climate_google_modes # Some integrations don't support modes (e.g. opentherm), but Google doesn't # support changing the temperature if we don't have any modes. If there's # only one Google doesn't support changing it, so the default mode here is # only cosmetic. if len(modes) == 0: modes.append("heat") if "off" in modes and any( mode in modes for mode in ("heatcool", "heat", "cool") ): modes.append("on") response["availableThermostatModes"] = modes return response def query_attributes(self): """Return temperature point and modes query attributes.""" response = {} attrs = self.state.attributes domain = self.state.domain unit = self.hass.config.units.temperature_unit if domain == sensor.DOMAIN: device_class = attrs.get(ATTR_DEVICE_CLASS) if device_class == sensor.DEVICE_CLASS_TEMPERATURE: current_temp = self.state.state if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE): response["thermostatTemperatureAmbient"] = round( temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1 ) elif domain == climate.DOMAIN: operation = self.state.state preset = attrs.get(climate.ATTR_PRESET_MODE) supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0) if preset in self.preset_to_google: response["thermostatMode"] = self.preset_to_google[preset] else: response["thermostatMode"] = self.hvac_to_google.get(operation) current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE) if current_temp is not None: response["thermostatTemperatureAmbient"] = round( temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1 ) current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY) if current_humidity is not None: response["thermostatHumidityAmbient"] = current_humidity if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL): if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE: response["thermostatTemperatureSetpointHigh"] = round( temp_util.convert( attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS ), 1, ) response["thermostatTemperatureSetpointLow"] = round( temp_util.convert( attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS ), 1, ) else: target_temp = attrs.get(ATTR_TEMPERATURE) if target_temp is not None: target_temp = round( temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1 ) response["thermostatTemperatureSetpointHigh"] = target_temp response["thermostatTemperatureSetpointLow"] = target_temp else: target_temp = attrs.get(ATTR_TEMPERATURE) if target_temp is not None: response["thermostatTemperatureSetpoint"] = round( temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1 ) return response async def execute(self, command, data, params, challenge): """Execute a temperature point or mode command.""" domain = self.state.domain if domain == sensor.DOMAIN: raise SmartHomeError( ERR_NOT_SUPPORTED, "Execute is not supported by sensor" ) # All sent in temperatures are always in Celsius unit = self.hass.config.units.temperature_unit min_temp = self.state.attributes[climate.ATTR_MIN_TEMP] max_temp = self.state.attributes[climate.ATTR_MAX_TEMP] if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT: temp = temp_util.convert( params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit ) if unit == TEMP_FAHRENHEIT: temp = round(temp) if temp < min_temp or temp > max_temp: raise SmartHomeError( ERR_VALUE_OUT_OF_RANGE, f"Temperature should be between {min_temp} and {max_temp}", ) await self.hass.services.async_call( climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE, {ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp}, blocking=True, context=data.context, ) elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE: temp_high = temp_util.convert( params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit ) if unit == TEMP_FAHRENHEIT: temp_high = round(temp_high) if temp_high < min_temp or temp_high > max_temp: raise SmartHomeError( ERR_VALUE_OUT_OF_RANGE, ( f"Upper bound for temperature range should be between " f"{min_temp} and {max_temp}" ), ) temp_low = temp_util.convert( params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit ) if unit == TEMP_FAHRENHEIT: temp_low = round(temp_low) if temp_low < min_temp or temp_low > max_temp: raise SmartHomeError( ERR_VALUE_OUT_OF_RANGE, ( f"Lower bound for temperature range should be between " f"{min_temp} and {max_temp}" ), ) supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES) svc_data = {ATTR_ENTITY_ID: self.state.entity_id} if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE: svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low else: svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2 await self.hass.services.async_call( climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE, svc_data, blocking=True, context=data.context, ) elif command == COMMAND_THERMOSTAT_SET_MODE: target_mode = params["thermostatMode"] supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES) if target_mode == "on": await self.hass.services.async_call( climate.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) return if target_mode == "off": await self.hass.services.async_call( climate.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) return if target_mode in self.google_to_preset: await self.hass.services.async_call( climate.DOMAIN, climate.SERVICE_SET_PRESET_MODE, { climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode], ATTR_ENTITY_ID: self.state.entity_id, }, blocking=True, context=data.context, ) return await self.hass.services.async_call( climate.DOMAIN, climate.SERVICE_SET_HVAC_MODE, { ATTR_ENTITY_ID: self.state.entity_id, climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode], }, blocking=True, context=data.context, ) @register_trait class HumiditySettingTrait(_Trait): """Trait to offer humidity setting functionality. https://developers.google.com/actions/smarthome/traits/humiditysetting """ name = TRAIT_HUMIDITY_SETTING commands = [COMMAND_SET_HUMIDITY] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == humidifier.DOMAIN: return True return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY def sync_attributes(self): """Return humidity attributes for a sync request.""" response = {} attrs = self.state.attributes domain = self.state.domain if domain == sensor.DOMAIN: device_class = attrs.get(ATTR_DEVICE_CLASS) if device_class == sensor.DEVICE_CLASS_HUMIDITY: response["queryOnlyHumiditySetting"] = True elif domain == humidifier.DOMAIN: response["humiditySetpointRange"] = { "minPercent": round( float(self.state.attributes[humidifier.ATTR_MIN_HUMIDITY]) ), "maxPercent": round( float(self.state.attributes[humidifier.ATTR_MAX_HUMIDITY]) ), } return response def query_attributes(self): """Return humidity query attributes.""" response = {} attrs = self.state.attributes domain = self.state.domain if domain == sensor.DOMAIN: device_class = attrs.get(ATTR_DEVICE_CLASS) if device_class == sensor.DEVICE_CLASS_HUMIDITY: current_humidity = self.state.state if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE): response["humidityAmbientPercent"] = round(float(current_humidity)) elif domain == humidifier.DOMAIN: target_humidity = attrs.get(humidifier.ATTR_HUMIDITY) if target_humidity is not None: response["humiditySetpointPercent"] = round(float(target_humidity)) return response async def execute(self, command, data, params, challenge): """Execute a humidity command.""" domain = self.state.domain if domain == sensor.DOMAIN: raise SmartHomeError( ERR_NOT_SUPPORTED, "Execute is not supported by sensor" ) if command == COMMAND_SET_HUMIDITY: await self.hass.services.async_call( humidifier.DOMAIN, humidifier.SERVICE_SET_HUMIDITY, { ATTR_ENTITY_ID: self.state.entity_id, humidifier.ATTR_HUMIDITY: params["humidity"], }, blocking=True, context=data.context, ) @register_trait class LockUnlockTrait(_Trait): """Trait to lock or unlock a lock. https://developers.google.com/actions/smarthome/traits/lockunlock """ name = TRAIT_LOCKUNLOCK commands = [COMMAND_LOCKUNLOCK] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" return domain == lock.DOMAIN @staticmethod def might_2fa(domain, features, device_class): """Return if the trait might ask for 2FA.""" return True def sync_attributes(self): """Return LockUnlock attributes for a sync request.""" return {} def query_attributes(self): """Return LockUnlock query attributes.""" return {"isLocked": self.state.state == STATE_LOCKED} async def execute(self, command, data, params, challenge): """Execute an LockUnlock command.""" if params["lock"]: service = lock.SERVICE_LOCK else: _verify_pin_challenge(data, self.state, challenge) service = lock.SERVICE_UNLOCK await self.hass.services.async_call( lock.DOMAIN, service, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) @register_trait class ArmDisArmTrait(_Trait): """Trait to Arm or Disarm a Security System. https://developers.google.com/actions/smarthome/traits/armdisarm """ name = TRAIT_ARMDISARM commands = [COMMAND_ARMDISARM] state_to_service = { STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER, } state_to_support = { STATE_ALARM_ARMED_HOME: alarm_control_panel.const.SUPPORT_ALARM_ARM_HOME, STATE_ALARM_ARMED_AWAY: alarm_control_panel.const.SUPPORT_ALARM_ARM_AWAY, STATE_ALARM_ARMED_NIGHT: alarm_control_panel.const.SUPPORT_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_CUSTOM_BYPASS: alarm_control_panel.const.SUPPORT_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_TRIGGERED: alarm_control_panel.const.SUPPORT_ALARM_TRIGGER, } @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" return domain == alarm_control_panel.DOMAIN @staticmethod def might_2fa(domain, features, device_class): """Return if the trait might ask for 2FA.""" return True def _supported_states(self): """Return supported states.""" features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) return [ state for state, required_feature in self.state_to_support.items() if features & required_feature != 0 ] def sync_attributes(self): """Return ArmDisarm attributes for a sync request.""" response = {} levels = [] for state in self._supported_states(): # level synonyms are generated from state names # 'armed_away' becomes 'armed away' or 'away' level_synonym = [state.replace("_", " ")] if state != STATE_ALARM_TRIGGERED: level_synonym.append(state.split("_")[1]) level = { "level_name": state, "level_values": [{"level_synonym": level_synonym, "lang": "en"}], } levels.append(level) response["availableArmLevels"] = {"levels": levels, "ordered": False} return response def query_attributes(self): """Return ArmDisarm query attributes.""" if "next_state" in self.state.attributes: armed_state = self.state.attributes["next_state"] else: armed_state = self.state.state response = {"isArmed": armed_state in self.state_to_service} if response["isArmed"]: response.update({"currentArmLevel": armed_state}) return response async def execute(self, command, data, params, challenge): """Execute an ArmDisarm command.""" if params["arm"] and not params.get("cancel"): arm_level = params.get("armLevel") # If no arm level given, we can only arm it if there is # only one supported arm type. We never default to triggered. if not arm_level: states = self._supported_states() if STATE_ALARM_TRIGGERED in states: states.remove(STATE_ALARM_TRIGGERED) if len(states) != 1: raise SmartHomeError(ERR_NOT_SUPPORTED, "ArmLevel missing") arm_level = states[0] if self.state.state == arm_level: raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed") if self.state.attributes["code_arm_required"]: _verify_pin_challenge(data, self.state, challenge) service = self.state_to_service[arm_level] # disarm the system without asking for code when # 'cancel' arming action is received while current status is pending elif ( params["arm"] and params.get("cancel") and self.state.state == STATE_ALARM_PENDING ): service = SERVICE_ALARM_DISARM else: if self.state.state == STATE_ALARM_DISARMED: raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed") _verify_pin_challenge(data, self.state, challenge) service = SERVICE_ALARM_DISARM await self.hass.services.async_call( alarm_control_panel.DOMAIN, service, { ATTR_ENTITY_ID: self.state.entity_id, ATTR_CODE: data.config.secure_devices_pin, }, blocking=True, context=data.context, ) @register_trait class FanSpeedTrait(_Trait): """Trait to control speed of Fan. https://developers.google.com/actions/smarthome/traits/fanspeed """ name = TRAIT_FANSPEED commands = [COMMAND_FANSPEED] speed_synonyms = { fan.SPEED_OFF: ["stop", "off"], fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"], fan.SPEED_MEDIUM: ["medium", "mid", "middle"], fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"], } @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == fan.DOMAIN: return features & fan.SUPPORT_SET_SPEED if domain == climate.DOMAIN: return features & climate.SUPPORT_FAN_MODE return False def sync_attributes(self): """Return speed point and modes attributes for a sync request.""" domain = self.state.domain speeds = [] reversible = False if domain == fan.DOMAIN: modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, []) for mode in modes: if mode not in self.speed_synonyms: continue speed = { "speed_name": mode, "speed_values": [ {"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"} ], } speeds.append(speed) reversible = bool( self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & fan.SUPPORT_DIRECTION ) elif domain == climate.DOMAIN: modes = self.state.attributes.get(climate.ATTR_FAN_MODES, []) for mode in modes: speed = { "speed_name": mode, "speed_values": [{"speed_synonym": [mode], "lang": "en"}], } speeds.append(speed) return { "availableFanSpeeds": {"speeds": speeds, "ordered": True}, "reversible": reversible, "supportsFanSpeedPercent": True, } def query_attributes(self): """Return speed point and modes query attributes.""" attrs = self.state.attributes domain = self.state.domain response = {} if domain == climate.DOMAIN: speed = attrs.get(climate.ATTR_FAN_MODE) if speed is not None: response["currentFanSpeedSetting"] = speed if domain == fan.DOMAIN: speed = attrs.get(fan.ATTR_SPEED) percent = attrs.get(fan.ATTR_PERCENTAGE) or 0 if speed is not None: response["on"] = speed != fan.SPEED_OFF response["currentFanSpeedSetting"] = speed response["currentFanSpeedPercent"] = percent return response async def execute(self, command, data, params, challenge): """Execute an SetFanSpeed command.""" domain = self.state.domain if domain == climate.DOMAIN: await self.hass.services.async_call( climate.DOMAIN, climate.SERVICE_SET_FAN_MODE, { ATTR_ENTITY_ID: self.state.entity_id, climate.ATTR_FAN_MODE: params["fanSpeed"], }, blocking=True, context=data.context, ) if domain == fan.DOMAIN: service_params = { ATTR_ENTITY_ID: self.state.entity_id, } if "fanSpeedPercent" in params: service = fan.SERVICE_SET_PERCENTAGE service_params[fan.ATTR_PERCENTAGE] = params["fanSpeedPercent"] else: service = fan.SERVICE_SET_SPEED service_params[fan.ATTR_SPEED] = params["fanSpeed"] await self.hass.services.async_call( fan.DOMAIN, service, service_params, blocking=True, context=data.context, ) @register_trait class ModesTrait(_Trait): """Trait to set modes. https://developers.google.com/actions/smarthome/traits/modes """ name = TRAIT_MODES commands = [COMMAND_MODES] SYNONYMS = { "sound mode": ["sound mode", "effects"], "option": ["option", "setting", "mode", "value"], } @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == input_select.DOMAIN: return True if domain == humidifier.DOMAIN and features & humidifier.SUPPORT_MODES: return True if domain == light.DOMAIN and features & light.SUPPORT_EFFECT: return True if domain != media_player.DOMAIN: return False return features & media_player.SUPPORT_SELECT_SOUND_MODE def _generate(self, name, settings): """Generate a list of modes.""" mode = { "name": name, "name_values": [ {"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"} ], "settings": [], "ordered": False, } for setting in settings: mode["settings"].append( { "setting_name": setting, "setting_values": [ { "setting_synonym": self.SYNONYMS.get(setting, [setting]), "lang": "en", } ], } ) return mode def sync_attributes(self): """Return mode attributes for a sync request.""" modes = [] for domain, attr, name in ( (media_player.DOMAIN, media_player.ATTR_SOUND_MODE_LIST, "sound mode"), (input_select.DOMAIN, input_select.ATTR_OPTIONS, "option"), (humidifier.DOMAIN, humidifier.ATTR_AVAILABLE_MODES, "mode"), (light.DOMAIN, light.ATTR_EFFECT_LIST, "effect"), ): if self.state.domain != domain: continue items = self.state.attributes.get(attr) if items is not None: modes.append(self._generate(name, items)) # Shortcut since all domains are currently unique break payload = {"availableModes": modes} return payload def query_attributes(self): """Return current modes.""" attrs = self.state.attributes response = {} mode_settings = {} if self.state.domain == media_player.DOMAIN: if media_player.ATTR_SOUND_MODE_LIST in attrs: mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE) elif self.state.domain == input_select.DOMAIN: mode_settings["option"] = self.state.state elif self.state.domain == humidifier.DOMAIN: if ATTR_MODE in attrs: mode_settings["mode"] = attrs.get(ATTR_MODE) elif self.state.domain == light.DOMAIN and light.ATTR_EFFECT in attrs: mode_settings["effect"] = attrs.get(light.ATTR_EFFECT) if mode_settings: response["on"] = self.state.state not in (STATE_OFF, STATE_UNKNOWN) response["currentModeSettings"] = mode_settings return response async def execute(self, command, data, params, challenge): """Execute a SetModes command.""" settings = params.get("updateModeSettings") if self.state.domain == input_select.DOMAIN: option = params["updateModeSettings"]["option"] await self.hass.services.async_call( input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, { ATTR_ENTITY_ID: self.state.entity_id, input_select.ATTR_OPTION: option, }, blocking=True, context=data.context, ) return if self.state.domain == humidifier.DOMAIN: requested_mode = settings["mode"] await self.hass.services.async_call( humidifier.DOMAIN, humidifier.SERVICE_SET_MODE, { ATTR_MODE: requested_mode, ATTR_ENTITY_ID: self.state.entity_id, }, blocking=True, context=data.context, ) return if self.state.domain == light.DOMAIN: requested_effect = settings["effect"] await self.hass.services.async_call( light.DOMAIN, SERVICE_TURN_ON, { ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_EFFECT: requested_effect, }, blocking=True, context=data.context, ) return if self.state.domain != media_player.DOMAIN: _LOGGER.info( "Received an Options command for unrecognised domain %s", self.state.domain, ) return sound_mode = settings.get("sound mode") if sound_mode: await self.hass.services.async_call( media_player.DOMAIN, media_player.SERVICE_SELECT_SOUND_MODE, { ATTR_ENTITY_ID: self.state.entity_id, media_player.ATTR_SOUND_MODE: sound_mode, }, blocking=True, context=data.context, ) @register_trait class InputSelectorTrait(_Trait): """Trait to set modes. https://developers.google.com/assistant/smarthome/traits/inputselector """ name = TRAIT_INPUTSELECTOR commands = [COMMAND_INPUT, COMMAND_NEXT_INPUT, COMMAND_PREVIOUS_INPUT] SYNONYMS = {} @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == media_player.DOMAIN and ( features & media_player.SUPPORT_SELECT_SOURCE ): return True return False def sync_attributes(self): """Return mode attributes for a sync request.""" attrs = self.state.attributes inputs = [ {"key": source, "names": [{"name_synonym": [source], "lang": "en"}]} for source in attrs.get(media_player.ATTR_INPUT_SOURCE_LIST, []) ] payload = {"availableInputs": inputs, "orderedInputs": True} return payload def query_attributes(self): """Return current modes.""" attrs = self.state.attributes return {"currentInput": attrs.get(media_player.ATTR_INPUT_SOURCE, "")} async def execute(self, command, data, params, challenge): """Execute an SetInputSource command.""" sources = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE_LIST) or [] source = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE) if command == COMMAND_INPUT: requested_source = params.get("newInput") elif command == COMMAND_NEXT_INPUT: requested_source = _next_selected(sources, source) elif command == COMMAND_PREVIOUS_INPUT: requested_source = _next_selected(list(reversed(sources)), source) else: raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command") if requested_source not in sources: raise SmartHomeError(ERR_UNSUPPORTED_INPUT, "Unsupported input") await self.hass.services.async_call( media_player.DOMAIN, media_player.SERVICE_SELECT_SOURCE, { ATTR_ENTITY_ID: self.state.entity_id, media_player.ATTR_INPUT_SOURCE: requested_source, }, blocking=True, context=data.context, ) @register_trait class OpenCloseTrait(_Trait): """Trait to open and close a cover. https://developers.google.com/actions/smarthome/traits/openclose """ # Cover device classes that require 2FA COVER_2FA = ( cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE, ) name = TRAIT_OPENCLOSE commands = [COMMAND_OPENCLOSE, COMMAND_OPENCLOSE_RELATIVE] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == cover.DOMAIN: return True return domain == binary_sensor.DOMAIN and device_class in ( binary_sensor.DEVICE_CLASS_DOOR, binary_sensor.DEVICE_CLASS_GARAGE_DOOR, binary_sensor.DEVICE_CLASS_LOCK, binary_sensor.DEVICE_CLASS_OPENING, binary_sensor.DEVICE_CLASS_WINDOW, ) @staticmethod def might_2fa(domain, features, device_class): """Return if the trait might ask for 2FA.""" return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA def sync_attributes(self): """Return opening direction.""" response = {} features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if self.state.domain == binary_sensor.DOMAIN: response["queryOnlyOpenClose"] = True response["discreteOnlyOpenClose"] = True elif ( self.state.domain == cover.DOMAIN and features & cover.SUPPORT_SET_POSITION == 0 ): response["discreteOnlyOpenClose"] = True if ( features & cover.SUPPORT_OPEN == 0 and features & cover.SUPPORT_CLOSE == 0 ): response["queryOnlyOpenClose"] = True if self.state.attributes.get(ATTR_ASSUMED_STATE): response["commandOnlyOpenClose"] = True return response def query_attributes(self): """Return state query attributes.""" domain = self.state.domain response = {} # When it's an assumed state, we will return empty state # This shouldn't happen because we set `commandOnlyOpenClose` # but Google still queries. Erroring here will cause device # to show up offline. if self.state.attributes.get(ATTR_ASSUMED_STATE): return response if domain == cover.DOMAIN: if self.state.state == STATE_UNKNOWN: raise SmartHomeError( ERR_NOT_SUPPORTED, "Querying state is not supported" ) position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION) if position is not None: response["openPercent"] = position elif self.state.state != cover.STATE_CLOSED: response["openPercent"] = 100 else: response["openPercent"] = 0 elif domain == binary_sensor.DOMAIN: if self.state.state == STATE_ON: response["openPercent"] = 100 else: response["openPercent"] = 0 return response async def execute(self, command, data, params, challenge): """Execute an Open, close, Set position command.""" domain = self.state.domain features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if domain == cover.DOMAIN: svc_params = {ATTR_ENTITY_ID: self.state.entity_id} should_verify = False if command == COMMAND_OPENCLOSE_RELATIVE: position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION) if position is None: raise SmartHomeError( ERR_NOT_SUPPORTED, "Current position not know for relative command", ) position = max(0, min(100, position + params["openRelativePercent"])) else: position = params["openPercent"] if position == 0: service = cover.SERVICE_CLOSE_COVER should_verify = False elif position == 100: service = cover.SERVICE_OPEN_COVER should_verify = True elif features & cover.SUPPORT_SET_POSITION: service = cover.SERVICE_SET_COVER_POSITION if position > 0: should_verify = True svc_params[cover.ATTR_POSITION] = position else: raise SmartHomeError( ERR_NOT_SUPPORTED, "No support for partial open close" ) if ( should_verify and self.state.attributes.get(ATTR_DEVICE_CLASS) in OpenCloseTrait.COVER_2FA ): _verify_pin_challenge(data, self.state, challenge) await self.hass.services.async_call( cover.DOMAIN, service, svc_params, blocking=True, context=data.context ) @register_trait class VolumeTrait(_Trait): """Trait to control volume of a device. https://developers.google.com/actions/smarthome/traits/volume """ name = TRAIT_VOLUME commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE, COMMAND_MUTE] @staticmethod def supported(domain, features, device_class): """Test if trait is supported.""" if domain == media_player.DOMAIN: return features & ( media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_STEP ) return False def sync_attributes(self): """Return volume attributes for a sync request.""" features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) return { "volumeCanMuteAndUnmute": bool(features & media_player.SUPPORT_VOLUME_MUTE), "commandOnlyVolume": self.state.attributes.get(ATTR_ASSUMED_STATE, False), # Volume amounts in SET_VOLUME and VOLUME_RELATIVE are on a scale # from 0 to this value. "volumeMaxLevel": 100, # Default change for queries like "Hey Google, volume up". # 10% corresponds to the default behavior for the # media_player.volume{up,down} services. "levelStepSize": 10, } def query_attributes(self): """Return volume query attributes.""" response = {} level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL) if level is not None: # Convert 0.0-1.0 to 0-100 response["currentVolume"] = int(level * 100) muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED) if muted is not None: response["isMuted"] = bool(muted) return response async def _set_volume_absolute(self, data, level): await self.hass.services.async_call( media_player.DOMAIN, media_player.SERVICE_VOLUME_SET, { ATTR_ENTITY_ID: self.state.entity_id, media_player.ATTR_MEDIA_VOLUME_LEVEL: level, }, blocking=True, context=data.context, ) async def _execute_set_volume(self, data, params): level = max(0, min(100, params["volumeLevel"])) if not ( self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & media_player.SUPPORT_VOLUME_SET ): raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported") await self._set_volume_absolute(data, level / 100) async def _execute_volume_relative(self, data, params): relative = params["relativeSteps"] features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if features & media_player.SUPPORT_VOLUME_SET: current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL) target = max(0.0, min(1.0, current + relative / 100)) await self._set_volume_absolute(data, target) elif features & media_player.SUPPORT_VOLUME_STEP: svc = media_player.SERVICE_VOLUME_UP if relative < 0: svc = media_player.SERVICE_VOLUME_DOWN relative = -relative for _ in range(relative): await self.hass.services.async_call( media_player.DOMAIN, svc, {ATTR_ENTITY_ID: self.state.entity_id}, blocking=True, context=data.context, ) else: raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported") async def _execute_mute(self, data, params): mute = params["mute"] if not ( self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & media_player.SUPPORT_VOLUME_MUTE ): raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported") await self.hass.services.async_call( media_player.DOMAIN, media_player.SERVICE_VOLUME_MUTE, { ATTR_ENTITY_ID: self.state.entity_id, media_player.ATTR_MEDIA_VOLUME_MUTED: mute, }, blocking=True, context=data.context, ) async def execute(self, command, data, params, challenge): """Execute a volume command.""" if command == COMMAND_SET_VOLUME: await self._execute_set_volume(data, params) elif command == COMMAND_VOLUME_RELATIVE: await self._execute_volume_relative(data, params) elif command == COMMAND_MUTE: await self._execute_mute(data, params) else: raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported") def _verify_pin_challenge(data, state, challenge): """Verify a pin challenge.""" if not data.config.should_2fa(state): return if not data.config.secure_devices_pin: raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up") if not challenge: raise ChallengeNeeded(CHALLENGE_PIN_NEEDED) pin = challenge.get("pin") if pin != data.config.secure_devices_pin: raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED) def _verify_ack_challenge(data, state, challenge): """Verify an ack challenge.""" if not data.config.should_2fa(state): return if not challenge or not challenge.get("ack"): raise ChallengeNeeded(CHALLENGE_ACK_NEEDED) MEDIA_COMMAND_SUPPORT_MAPPING = { COMMAND_MEDIA_NEXT: media_player.SUPPORT_NEXT_TRACK, COMMAND_MEDIA_PAUSE: media_player.SUPPORT_PAUSE, COMMAND_MEDIA_PREVIOUS: media_player.SUPPORT_PREVIOUS_TRACK, COMMAND_MEDIA_RESUME: media_player.SUPPORT_PLAY, COMMAND_MEDIA_SEEK_RELATIVE: media_player.SUPPORT_SEEK, COMMAND_MEDIA_SEEK_TO_POSITION: media_player.SUPPORT_SEEK, COMMAND_MEDIA_SHUFFLE: media_player.SUPPORT_SHUFFLE_SET, COMMAND_MEDIA_STOP: media_player.SUPPORT_STOP, } MEDIA_COMMAND_ATTRIBUTES = { COMMAND_MEDIA_NEXT: "NEXT", COMMAND_MEDIA_PAUSE: "PAUSE", COMMAND_MEDIA_PREVIOUS: "PREVIOUS", COMMAND_MEDIA_RESUME: "RESUME", COMMAND_MEDIA_SEEK_RELATIVE: "SEEK_RELATIVE", COMMAND_MEDIA_SEEK_TO_POSITION: "SEEK_TO_POSITION", COMMAND_MEDIA_SHUFFLE: "SHUFFLE", COMMAND_MEDIA_STOP: "STOP", } @register_trait class TransportControlTrait(_Trait): """Trait to control media playback. https://developers.google.com/actions/smarthome/traits/transportcontrol """ name = TRAIT_TRANSPORT_CONTROL commands = [ COMMAND_MEDIA_NEXT, COMMAND_MEDIA_PAUSE, COMMAND_MEDIA_PREVIOUS, COMMAND_MEDIA_RESUME, COMMAND_MEDIA_SEEK_RELATIVE, COMMAND_MEDIA_SEEK_TO_POSITION, COMMAND_MEDIA_SHUFFLE, COMMAND_MEDIA_STOP, ] @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" if domain == media_player.DOMAIN: for feature in MEDIA_COMMAND_SUPPORT_MAPPING.values(): if features & feature: return True return False def sync_attributes(self): """Return opening direction.""" response = {} if self.state.domain == media_player.DOMAIN: features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) support = [] for command, feature in MEDIA_COMMAND_SUPPORT_MAPPING.items(): if features & feature: support.append(MEDIA_COMMAND_ATTRIBUTES[command]) response["transportControlSupportedCommands"] = support return response def query_attributes(self): """Return the attributes of this trait for this entity.""" return {} async def execute(self, command, data, params, challenge): """Execute a media command.""" service_attrs = {ATTR_ENTITY_ID: self.state.entity_id} if command == COMMAND_MEDIA_SEEK_RELATIVE: service = media_player.SERVICE_MEDIA_SEEK rel_position = params["relativePositionMs"] / 1000 seconds_since = 0 # Default to 0 seconds if self.state.state == STATE_PLAYING: now = dt.utcnow() upd_at = self.state.attributes.get( media_player.ATTR_MEDIA_POSITION_UPDATED_AT, now ) seconds_since = (now - upd_at).total_seconds() position = self.state.attributes.get(media_player.ATTR_MEDIA_POSITION, 0) max_position = self.state.attributes.get( media_player.ATTR_MEDIA_DURATION, 0 ) service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min( max(position + seconds_since + rel_position, 0), max_position ) elif command == COMMAND_MEDIA_SEEK_TO_POSITION: service = media_player.SERVICE_MEDIA_SEEK max_position = self.state.attributes.get( media_player.ATTR_MEDIA_DURATION, 0 ) service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min( max(params["absPositionMs"] / 1000, 0), max_position ) elif command == COMMAND_MEDIA_NEXT: service = media_player.SERVICE_MEDIA_NEXT_TRACK elif command == COMMAND_MEDIA_PAUSE: service = media_player.SERVICE_MEDIA_PAUSE elif command == COMMAND_MEDIA_PREVIOUS: service = media_player.SERVICE_MEDIA_PREVIOUS_TRACK elif command == COMMAND_MEDIA_RESUME: service = media_player.SERVICE_MEDIA_PLAY elif command == COMMAND_MEDIA_SHUFFLE: service = media_player.SERVICE_SHUFFLE_SET # Google Assistant only supports enabling shuffle service_attrs[media_player.ATTR_MEDIA_SHUFFLE] = True elif command == COMMAND_MEDIA_STOP: service = media_player.SERVICE_MEDIA_STOP else: raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported") await self.hass.services.async_call( media_player.DOMAIN, service, service_attrs, blocking=True, context=data.context, ) @register_trait class MediaStateTrait(_Trait): """Trait to get media playback state. https://developers.google.com/actions/smarthome/traits/mediastate """ name = TRAIT_MEDIA_STATE commands = [] activity_lookup = { STATE_OFF: "INACTIVE", STATE_IDLE: "STANDBY", STATE_PLAYING: "ACTIVE", STATE_ON: "STANDBY", STATE_PAUSED: "STANDBY", STATE_STANDBY: "STANDBY", STATE_UNAVAILABLE: "INACTIVE", STATE_UNKNOWN: "INACTIVE", } playback_lookup = { STATE_OFF: "STOPPED", STATE_IDLE: "STOPPED", STATE_PLAYING: "PLAYING", STATE_ON: "STOPPED", STATE_PAUSED: "PAUSED", STATE_STANDBY: "STOPPED", STATE_UNAVAILABLE: "STOPPED", STATE_UNKNOWN: "STOPPED", } @staticmethod def supported(domain, features, device_class): """Test if state is supported.""" return domain == media_player.DOMAIN def sync_attributes(self): """Return attributes for a sync request.""" return {"supportActivityState": True, "supportPlaybackState": True} def query_attributes(self): """Return the attributes of this trait for this entity.""" return { "activityState": self.activity_lookup.get(self.state.state, "INACTIVE"), "playbackState": self.playback_lookup.get(self.state.state, "STOPPED"), }
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/google_assistant/trait.py
"""Support for Z-Wave lights.""" from __future__ import annotations import logging from typing import Any, Callable from zwave_js_server.client import Client as ZwaveClient from zwave_js_server.const import ColorComponent, CommandClass from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, DOMAIN as LIGHT_DOMAIN, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.dispatcher import async_dispatcher_connect import homeassistant.util.color as color_util from .const import DATA_CLIENT, DATA_UNSUBSCRIBE, DOMAIN from .discovery import ZwaveDiscoveryInfo from .entity import ZWaveBaseEntity LOGGER = logging.getLogger(__name__) MULTI_COLOR_MAP = { ColorComponent.WARM_WHITE: "warmWhite", ColorComponent.COLD_WHITE: "coldWhite", ColorComponent.RED: "red", ColorComponent.GREEN: "green", ColorComponent.BLUE: "blue", ColorComponent.AMBER: "amber", ColorComponent.CYAN: "cyan", ColorComponent.PURPLE: "purple", } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable ) -> None: """Set up Z-Wave Light from Config Entry.""" client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT] @callback def async_add_light(info: ZwaveDiscoveryInfo) -> None: """Add Z-Wave Light.""" light = ZwaveLight(config_entry, client, info) async_add_entities([light]) hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append( async_dispatcher_connect( hass, f"{DOMAIN}_{config_entry.entry_id}_add_{LIGHT_DOMAIN}", async_add_light, ) ) def byte_to_zwave_brightness(value: int) -> int: """Convert brightness in 0-255 scale to 0-99 scale. `value` -- (int) Brightness byte value from 0-255. """ if value > 0: return max(1, round((value / 255) * 99)) return 0 class ZwaveLight(ZWaveBaseEntity, LightEntity): """Representation of a Z-Wave light.""" def __init__( self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo ) -> None: """Initialize the light.""" super().__init__(config_entry, client, info) self._supports_color = False self._supports_white_value = False self._supports_color_temp = False self._hs_color: tuple[float, float] | None = None self._white_value: int | None = None self._color_temp: int | None = None self._min_mireds = 153 # 6500K as a safe default self._max_mireds = 370 # 2700K as a safe default self._supported_features = SUPPORT_BRIGHTNESS # get additional (optional) values and set features self._target_value = self.get_zwave_value("targetValue") self._dimming_duration = self.get_zwave_value("duration") if self._dimming_duration is not None: self._supported_features |= SUPPORT_TRANSITION self._calculate_color_values() if self._supports_color: self._supported_features |= SUPPORT_COLOR if self._supports_color_temp: self._supported_features |= SUPPORT_COLOR_TEMP if self._supports_white_value: self._supported_features |= SUPPORT_WHITE_VALUE @callback def on_value_update(self) -> None: """Call when a watched value is added or updated.""" self._calculate_color_values() @property def brightness(self) -> int: """Return the brightness of this light between 0..255. Z-Wave multilevel switches use a range of [0, 99] to control brightness. """ if self.info.primary_value.value is not None: return round((self.info.primary_value.value / 99) * 255) return 0 @property def is_on(self) -> bool: """Return true if device is on (brightness above 0).""" return self.brightness > 0 @property def hs_color(self) -> tuple[float, float] | None: """Return the hs color.""" return self._hs_color @property def white_value(self) -> int | None: """Return the white value of this light between 0..255.""" return self._white_value @property def color_temp(self) -> int | None: """Return the color temperature.""" return self._color_temp @property def min_mireds(self) -> int: """Return the coldest color_temp that this light supports.""" return self._min_mireds @property def max_mireds(self) -> int: """Return the warmest color_temp that this light supports.""" return self._max_mireds @property def supported_features(self) -> int: """Flag supported features.""" return self._supported_features async def async_turn_on(self, **kwargs: Any) -> None: """Turn the device on.""" # RGB/HS color hs_color = kwargs.get(ATTR_HS_COLOR) if hs_color is not None and self._supports_color: red, green, blue = color_util.color_hs_to_RGB(*hs_color) colors = { ColorComponent.RED: red, ColorComponent.GREEN: green, ColorComponent.BLUE: blue, } if self._supports_color_temp: # turn of white leds when setting rgb colors[ColorComponent.WARM_WHITE] = 0 colors[ColorComponent.COLD_WHITE] = 0 await self._async_set_colors(colors) # Color temperature color_temp = kwargs.get(ATTR_COLOR_TEMP) if color_temp is not None and self._supports_color_temp: # Limit color temp to min/max values cold = max( 0, min( 255, round( (self._max_mireds - color_temp) / (self._max_mireds - self._min_mireds) * 255 ), ), ) warm = 255 - cold await self._async_set_colors( { # turn off color leds when setting color temperature ColorComponent.RED: 0, ColorComponent.GREEN: 0, ColorComponent.BLUE: 0, ColorComponent.WARM_WHITE: warm, ColorComponent.COLD_WHITE: cold, } ) # White value white_value = kwargs.get(ATTR_WHITE_VALUE) if white_value is not None and self._supports_white_value: # white led brightness is controlled by white level # rgb leds (if any) can be on at the same time await self._async_set_colors( { ColorComponent.WARM_WHITE: white_value, ColorComponent.COLD_WHITE: white_value, } ) # set brightness await self._async_set_brightness( kwargs.get(ATTR_BRIGHTNESS), kwargs.get(ATTR_TRANSITION) ) async def async_turn_off(self, **kwargs: Any) -> None: """Turn the light off.""" await self._async_set_brightness(0, kwargs.get(ATTR_TRANSITION)) async def _async_set_colors(self, colors: dict[ColorComponent, int]) -> None: """Set (multiple) defined colors to given value(s).""" # prefer the (new) combined color property # https://github.com/zwave-js/node-zwave-js/pull/1782 combined_color_val = self.get_zwave_value( "targetColor", CommandClass.SWITCH_COLOR, value_property_key=None, ) if combined_color_val and isinstance(combined_color_val.value, dict): colors_dict = {} for color, value in colors.items(): color_name = MULTI_COLOR_MAP[color] colors_dict[color_name] = value # set updated color object await self.info.node.async_set_value(combined_color_val, colors_dict) return # fallback to setting the color(s) one by one if multicolor fails # not sure this is needed at all, but just in case for color, value in colors.items(): await self._async_set_color(color, value) async def _async_set_color(self, color: ColorComponent, new_value: int) -> None: """Set defined color to given value.""" # actually set the new color value target_zwave_value = self.get_zwave_value( "targetColor", CommandClass.SWITCH_COLOR, value_property_key=color.value, ) if target_zwave_value is None: # guard for unsupported color return await self.info.node.async_set_value(target_zwave_value, new_value) async def _async_set_brightness( self, brightness: int | None, transition: int | None = None ) -> None: """Set new brightness to light.""" if brightness is None: # Level 255 means to set it to previous value. zwave_brightness = 255 else: # Zwave multilevel switches use a range of [0, 99] to control brightness. zwave_brightness = byte_to_zwave_brightness(brightness) # set transition value before sending new brightness await self._async_set_transition_duration(transition) # setting a value requires setting targetValue await self.info.node.async_set_value(self._target_value, zwave_brightness) async def _async_set_transition_duration(self, duration: int | None = None) -> None: """Set the transition time for the brightness value.""" if self._dimming_duration is None: return # pylint: disable=fixme,unreachable # TODO: setting duration needs to be fixed upstream # https://github.com/zwave-js/node-zwave-js/issues/1321 return if duration is None: # type: ignore # no transition specified by user, use defaults duration = 7621 # anything over 7620 uses the factory default else: # pragma: no cover # transition specified by user transition = duration if transition <= 127: duration = transition else: minutes = round(transition / 60) LOGGER.debug( "Transition rounded to %d minutes for %s", minutes, self.entity_id, ) duration = minutes + 128 # only send value if it differs from current # this prevents sending a command for nothing if self._dimming_duration.value != duration: # pragma: no cover await self.info.node.async_set_value(self._dimming_duration, duration) @callback def _calculate_color_values(self) -> None: """Calculate light colors.""" # NOTE: We lookup all values here (instead of relying on the multicolor one) # to find out what colors are supported # as this is a simple lookup by key, this not heavy red_val = self.get_zwave_value( "currentColor", CommandClass.SWITCH_COLOR, value_property_key=ColorComponent.RED.value, ) green_val = self.get_zwave_value( "currentColor", CommandClass.SWITCH_COLOR, value_property_key=ColorComponent.GREEN.value, ) blue_val = self.get_zwave_value( "currentColor", CommandClass.SWITCH_COLOR, value_property_key=ColorComponent.BLUE.value, ) ww_val = self.get_zwave_value( "currentColor", CommandClass.SWITCH_COLOR, value_property_key=ColorComponent.WARM_WHITE.value, ) cw_val = self.get_zwave_value( "currentColor", CommandClass.SWITCH_COLOR, value_property_key=ColorComponent.COLD_WHITE.value, ) # prefer the (new) combined color property # https://github.com/zwave-js/node-zwave-js/pull/1782 combined_color_val = self.get_zwave_value( "currentColor", CommandClass.SWITCH_COLOR, value_property_key=None, ) if combined_color_val and isinstance(combined_color_val.value, dict): multi_color = combined_color_val.value else: multi_color = {} # RGB support if red_val and green_val and blue_val: # prefer values from the multicolor property red = multi_color.get("red", red_val.value) green = multi_color.get("green", green_val.value) blue = multi_color.get("blue", blue_val.value) self._supports_color = True # convert to HS self._hs_color = color_util.color_RGB_to_hs(red, green, blue) # color temperature support if ww_val and cw_val: self._supports_color_temp = True warm_white = multi_color.get("warmWhite", ww_val.value) cold_white = multi_color.get("coldWhite", cw_val.value) # Calculate color temps based on whites if cold_white or warm_white: self._color_temp = round( self._max_mireds - ((cold_white / 255) * (self._max_mireds - self._min_mireds)) ) else: self._color_temp = None # only one white channel (warm white) = white_level support elif ww_val: self._supports_white_value = True self._white_value = multi_color.get("warmWhite", ww_val.value) # only one white channel (cool white) = white_level support elif cw_val: self._supports_white_value = True self._white_value = multi_color.get("coldWhite", cw_val.value)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/zwave_js/light.py
"""Support for Acmeda Roller Blinds.""" from homeassistant.components.cover import ( ATTR_POSITION, SUPPORT_CLOSE, SUPPORT_CLOSE_TILT, SUPPORT_OPEN, SUPPORT_OPEN_TILT, SUPPORT_SET_POSITION, SUPPORT_SET_TILT_POSITION, SUPPORT_STOP, SUPPORT_STOP_TILT, CoverEntity, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .base import AcmedaBase from .const import ACMEDA_HUB_UPDATE, DOMAIN from .helpers import async_add_acmeda_entities async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Acmeda Rollers from a config entry.""" hub = hass.data[DOMAIN][config_entry.entry_id] current = set() @callback def async_add_acmeda_covers(): async_add_acmeda_entities( hass, AcmedaCover, config_entry, current, async_add_entities ) hub.cleanup_callbacks.append( async_dispatcher_connect( hass, ACMEDA_HUB_UPDATE.format(config_entry.entry_id), async_add_acmeda_covers, ) ) class AcmedaCover(AcmedaBase, CoverEntity): """Representation of a Acmeda cover device.""" @property def current_cover_position(self): """Return the current position of the roller blind. None is unknown, 0 is closed, 100 is fully open. """ position = None if self.roller.type != 7: position = 100 - self.roller.closed_percent return position @property def current_cover_tilt_position(self): """Return the current tilt of the roller blind. None is unknown, 0 is closed, 100 is fully open. """ position = None if self.roller.type in [7, 10]: position = 100 - self.roller.closed_percent return position @property def supported_features(self): """Flag supported features.""" supported_features = 0 if self.current_cover_position is not None: supported_features |= ( SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP | SUPPORT_SET_POSITION ) if self.current_cover_tilt_position is not None: supported_features |= ( SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT | SUPPORT_SET_TILT_POSITION ) return supported_features @property def is_closed(self): """Return if the cover is closed.""" return self.roller.closed_percent == 100 async def async_close_cover(self, **kwargs): """Close the roller.""" await self.roller.move_down() async def async_open_cover(self, **kwargs): """Open the roller.""" await self.roller.move_up() async def async_stop_cover(self, **kwargs): """Stop the roller.""" await self.roller.move_stop() async def async_set_cover_position(self, **kwargs): """Move the roller shutter to a specific position.""" await self.roller.move_to(100 - kwargs[ATTR_POSITION]) async def async_close_cover_tilt(self, **kwargs): """Close the roller.""" await self.roller.move_down() async def async_open_cover_tilt(self, **kwargs): """Open the roller.""" await self.roller.move_up() async def async_stop_cover_tilt(self, **kwargs): """Stop the roller.""" await self.roller.move_stop() async def async_set_cover_tilt(self, **kwargs): """Tilt the roller shutter to a specific position.""" await self.roller.move_to(100 - kwargs[ATTR_POSITION])
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/acmeda/cover.py
"""Support for Nest Cameras.""" from datetime import timedelta import logging import requests from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_ON_OFF, Camera from homeassistant.util.dt import utcnow from .const import DATA_NEST, DOMAIN _LOGGER = logging.getLogger(__name__) NEST_BRAND = "Nest" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({}) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up a Nest Cam. No longer in use. """ async def async_setup_legacy_entry(hass, entry, async_add_entities): """Set up a Nest sensor based on a config entry.""" camera_devices = await hass.async_add_executor_job(hass.data[DATA_NEST].cameras) cameras = [NestCamera(structure, device) for structure, device in camera_devices] async_add_entities(cameras, True) class NestCamera(Camera): """Representation of a Nest Camera.""" def __init__(self, structure, device): """Initialize a Nest Camera.""" super().__init__() self.structure = structure self.device = device self._location = None self._name = None self._online = None self._is_streaming = None self._is_video_history_enabled = False # Default to non-NestAware subscribed, but will be fixed during update self._time_between_snapshots = timedelta(seconds=30) self._last_image = None self._next_snapshot_at = None @property def name(self): """Return the name of the nest, if any.""" return self._name @property def unique_id(self): """Return the serial number.""" return self.device.device_id @property def device_info(self): """Return information about the device.""" return { "identifiers": {(DOMAIN, self.device.device_id)}, "name": self.device.name_long, "manufacturer": "Nest Labs", "model": "Camera", } @property def should_poll(self): """Nest camera should poll periodically.""" return True @property def is_recording(self): """Return true if the device is recording.""" return self._is_streaming @property def brand(self): """Return the brand of the camera.""" return NEST_BRAND @property def supported_features(self): """Nest Cam support turn on and off.""" return SUPPORT_ON_OFF @property def is_on(self): """Return true if on.""" return self._online and self._is_streaming def turn_off(self): """Turn off camera.""" _LOGGER.debug("Turn off camera %s", self._name) # Calling Nest API in is_streaming setter. # device.is_streaming would not immediately change until the process # finished in Nest Cam. self.device.is_streaming = False def turn_on(self): """Turn on camera.""" if not self._online: _LOGGER.error("Camera %s is offline", self._name) return _LOGGER.debug("Turn on camera %s", self._name) # Calling Nest API in is_streaming setter. # device.is_streaming would not immediately change until the process # finished in Nest Cam. self.device.is_streaming = True def update(self): """Cache value from Python-nest.""" self._location = self.device.where self._name = self.device.name self._online = self.device.online self._is_streaming = self.device.is_streaming self._is_video_history_enabled = self.device.is_video_history_enabled if self._is_video_history_enabled: # NestAware allowed 10/min self._time_between_snapshots = timedelta(seconds=6) else: # Otherwise, 2/min self._time_between_snapshots = timedelta(seconds=30) def _ready_for_snapshot(self, now): return self._next_snapshot_at is None or now > self._next_snapshot_at def camera_image(self): """Return a still image response from the camera.""" now = utcnow() if self._ready_for_snapshot(now): url = self.device.snapshot_url try: response = requests.get(url) except requests.exceptions.RequestException as error: _LOGGER.error("Error getting camera image: %s", error) return None self._next_snapshot_at = now + self._time_between_snapshots self._last_image = response.content return self._last_image
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/nest/legacy/camera.py
"""KIRA interface to receive UDP packets from an IR-IP bridge.""" import logging import os import pykira import voluptuous as vol from voluptuous.error import Error as VoluptuousError import yaml from homeassistant.const import ( CONF_CODE, CONF_DEVICE, CONF_HOST, CONF_NAME, CONF_PORT, CONF_REPEAT, CONF_SENSORS, CONF_TYPE, EVENT_HOMEASSISTANT_STOP, STATE_UNKNOWN, ) from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv DOMAIN = "kira" _LOGGER = logging.getLogger(__name__) DEFAULT_HOST = "0.0.0.0" DEFAULT_PORT = 65432 CONF_REMOTES = "remotes" CONF_SENSOR = "sensor" CONF_REMOTE = "remote" CODES_YAML = f"{DOMAIN}_codes.yaml" CODE_SCHEMA = vol.Schema( { vol.Required(CONF_NAME): cv.string, vol.Required(CONF_CODE): cv.string, vol.Optional(CONF_TYPE): cv.string, vol.Optional(CONF_DEVICE): cv.string, vol.Optional(CONF_REPEAT): cv.positive_int, } ) SENSOR_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME, default=DOMAIN): vol.Exclusive(cv.string, "sensors"), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, } ) REMOTE_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME, default=DOMAIN): vol.Exclusive(cv.string, "remotes"), vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_SENSORS): [SENSOR_SCHEMA], vol.Optional(CONF_REMOTES): [REMOTE_SCHEMA], } ) }, extra=vol.ALLOW_EXTRA, ) def load_codes(path): """Load KIRA codes from specified file.""" codes = [] if os.path.exists(path): with open(path) as code_file: data = yaml.safe_load(code_file) or [] for code in data: try: codes.append(CODE_SCHEMA(code)) except VoluptuousError as exception: # keep going _LOGGER.warning("KIRA code invalid data: %s", exception) else: with open(path, "w") as code_file: code_file.write("") return codes def setup(hass, config): """Set up the KIRA component.""" sensors = config.get(DOMAIN, {}).get(CONF_SENSORS, []) remotes = config.get(DOMAIN, {}).get(CONF_REMOTES, []) # If no sensors or remotes were specified, add a sensor if not (sensors or remotes): sensors.append({}) codes = load_codes(hass.config.path(CODES_YAML)) hass.data[DOMAIN] = {CONF_SENSOR: {}, CONF_REMOTE: {}} def load_module(platform, idx, module_conf): """Set up the KIRA module and load platform.""" # note: module_name is not the HA device name. it's just a unique name # to ensure the component and platform can share information module_name = ("%s_%d" % (DOMAIN, idx)) if idx else DOMAIN device_name = module_conf.get(CONF_NAME, DOMAIN) port = module_conf.get(CONF_PORT, DEFAULT_PORT) host = module_conf.get(CONF_HOST, DEFAULT_HOST) if platform == CONF_SENSOR: module = pykira.KiraReceiver(host, port) module.start() else: module = pykira.KiraModule(host, port) hass.data[DOMAIN][platform][module_name] = module for code in codes: code_tuple = (code.get(CONF_NAME), code.get(CONF_DEVICE, STATE_UNKNOWN)) module.registerCode(code_tuple, code.get(CONF_CODE)) discovery.load_platform( hass, platform, DOMAIN, {"name": module_name, "device": device_name}, config ) for idx, module_conf in enumerate(sensors): load_module(CONF_SENSOR, idx, module_conf) for idx, module_conf in enumerate(remotes): load_module(CONF_REMOTE, idx, module_conf) def _stop_kira(_event): """Stop the KIRA receiver.""" for receiver in hass.data[DOMAIN][CONF_SENSOR].values(): receiver.stop() _LOGGER.info("Terminated receivers") hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_kira) return True
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/kira/__init__.py
"""Errors for the cert_expiry integration.""" from homeassistant.exceptions import HomeAssistantError class CertExpiryException(HomeAssistantError): """Base class for cert_expiry exceptions.""" class TemporaryFailure(CertExpiryException): """Temporary failure has occurred.""" class ValidationFailure(CertExpiryException): """Certificate validation failure has occurred.""" class ResolveFailed(TemporaryFailure): """Name resolution failed.""" class ConnectionTimeout(TemporaryFailure): """Network connection timed out.""" class ConnectionRefused(TemporaryFailure): """Network connection refused."""
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/cert_expiry/errors.py
"""Config flow for UpCloud.""" import logging import requests.exceptions import upcloud_api import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME from homeassistant.core import callback from .const import DEFAULT_SCAN_INTERVAL, DOMAIN _LOGGER = logging.getLogger(__name__) class UpCloudConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """UpCloud config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL username: str password: str async def async_step_user(self, user_input=None): """Handle user initiated flow.""" if user_input is None: return self._async_show_form(step_id="user") await self.async_set_unique_id(user_input[CONF_USERNAME]) manager = upcloud_api.CloudManager( user_input[CONF_USERNAME], user_input[CONF_PASSWORD] ) errors = {} try: await self.hass.async_add_executor_job(manager.authenticate) except upcloud_api.UpCloudAPIError: errors["base"] = "invalid_auth" _LOGGER.debug("invalid_auth", exc_info=True) except requests.exceptions.RequestException: errors["base"] = "cannot_connect" _LOGGER.debug("cannot_connect", exc_info=True) if errors: return self._async_show_form( step_id="user", user_input=user_input, errors=errors ) return self.async_create_entry(title=user_input[CONF_USERNAME], data=user_input) async def async_step_import(self, user_input=None): """Handle import initiated flow.""" await self.async_set_unique_id(user_input[CONF_USERNAME]) self._abort_if_unique_id_configured() return await self.async_step_user(user_input=user_input) @callback def _async_show_form(self, step_id, user_input=None, errors=None): """Show our form.""" if user_input is None: user_input = {} return self.async_show_form( step_id=step_id, data_schema=vol.Schema( { vol.Required( CONF_USERNAME, default=user_input.get(CONF_USERNAME, "") ): str, vol.Required( CONF_PASSWORD, default=user_input.get(CONF_PASSWORD, "") ): str, } ), errors=errors or {}, ) @staticmethod @callback def async_get_options_flow(config_entry): """Get options flow.""" return UpCloudOptionsFlow(config_entry) class UpCloudOptionsFlow(config_entries.OptionsFlow): """UpCloud options flow.""" def __init__(self, config_entry: config_entries.ConfigEntry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Handle options flow.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) data_schema = vol.Schema( { vol.Optional( CONF_SCAN_INTERVAL, default=self.config_entry.options.get(CONF_SCAN_INTERVAL) or DEFAULT_SCAN_INTERVAL.seconds, ): vol.All(vol.Coerce(int), vol.Range(min=30)), } ) return self.async_show_form(step_id="init", data_schema=data_schema)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/upcloud/config_flow.py
"""Support for Snips on-device ASR and NLU.""" from datetime import timedelta import json import logging import voluptuous as vol from homeassistant.components import mqtt from homeassistant.core import callback from homeassistant.helpers import config_validation as cv, intent DOMAIN = "snips" CONF_INTENTS = "intents" CONF_ACTION = "action" CONF_FEEDBACK = "feedback_sounds" CONF_PROBABILITY = "probability_threshold" CONF_SITE_IDS = "site_ids" SERVICE_SAY = "say" SERVICE_SAY_ACTION = "say_action" SERVICE_FEEDBACK_ON = "feedback_on" SERVICE_FEEDBACK_OFF = "feedback_off" INTENT_TOPIC = "hermes/intent/#" FEEDBACK_ON_TOPIC = "hermes/feedback/sound/toggleOn" FEEDBACK_OFF_TOPIC = "hermes/feedback/sound/toggleOff" ATTR_TEXT = "text" ATTR_SITE_ID = "site_id" ATTR_CUSTOM_DATA = "custom_data" ATTR_CAN_BE_ENQUEUED = "can_be_enqueued" ATTR_INTENT_FILTER = "intent_filter" _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_FEEDBACK): cv.boolean, vol.Optional(CONF_PROBABILITY, default=0): vol.Coerce(float), vol.Optional(CONF_SITE_IDS, default=["default"]): vol.All( cv.ensure_list, [cv.string] ), } ) }, extra=vol.ALLOW_EXTRA, ) INTENT_SCHEMA = vol.Schema( { vol.Required("input"): str, vol.Required("intent"): {vol.Required("intentName"): str}, vol.Optional("slots"): [ { vol.Required("slotName"): str, vol.Required("value"): { vol.Required("kind"): str, vol.Optional("value"): cv.match_all, vol.Optional("rawValue"): cv.match_all, }, } ], }, extra=vol.ALLOW_EXTRA, ) SERVICE_SCHEMA_SAY = vol.Schema( { vol.Required(ATTR_TEXT): str, vol.Optional(ATTR_SITE_ID, default="default"): str, vol.Optional(ATTR_CUSTOM_DATA, default=""): str, } ) SERVICE_SCHEMA_SAY_ACTION = vol.Schema( { vol.Required(ATTR_TEXT): str, vol.Optional(ATTR_SITE_ID, default="default"): str, vol.Optional(ATTR_CUSTOM_DATA, default=""): str, vol.Optional(ATTR_CAN_BE_ENQUEUED, default=True): cv.boolean, vol.Optional(ATTR_INTENT_FILTER): vol.All(cv.ensure_list), } ) SERVICE_SCHEMA_FEEDBACK = vol.Schema( {vol.Optional(ATTR_SITE_ID, default="default"): str} ) async def async_setup(hass, config): """Activate Snips component.""" @callback def async_set_feedback(site_ids, state): """Set Feedback sound state.""" site_ids = site_ids if site_ids else config[DOMAIN].get(CONF_SITE_IDS) topic = FEEDBACK_ON_TOPIC if state else FEEDBACK_OFF_TOPIC for site_id in site_ids: payload = json.dumps({"siteId": site_id}) hass.components.mqtt.async_publish( FEEDBACK_ON_TOPIC, "", qos=0, retain=False ) hass.components.mqtt.async_publish( topic, payload, qos=int(state), retain=state ) if CONF_FEEDBACK in config[DOMAIN]: async_set_feedback(None, config[DOMAIN][CONF_FEEDBACK]) async def message_received(msg): """Handle new messages on MQTT.""" _LOGGER.debug("New intent: %s", msg.payload) try: request = json.loads(msg.payload) except TypeError: _LOGGER.error("Received invalid JSON: %s", msg.payload) return if request["intent"]["confidenceScore"] < config[DOMAIN].get(CONF_PROBABILITY): _LOGGER.warning( "Intent below probaility threshold %s < %s", request["intent"]["confidenceScore"], config[DOMAIN].get(CONF_PROBABILITY), ) return try: request = INTENT_SCHEMA(request) except vol.Invalid as err: _LOGGER.error("Intent has invalid schema: %s. %s", err, request) return if request["intent"]["intentName"].startswith("user_"): intent_type = request["intent"]["intentName"].split("__")[-1] else: intent_type = request["intent"]["intentName"].split(":")[-1] slots = {} for slot in request.get("slots", []): slots[slot["slotName"]] = {"value": resolve_slot_values(slot)} slots["{}_raw".format(slot["slotName"])] = {"value": slot["rawValue"]} slots["site_id"] = {"value": request.get("siteId")} slots["session_id"] = {"value": request.get("sessionId")} slots["confidenceScore"] = {"value": request["intent"]["confidenceScore"]} try: intent_response = await intent.async_handle( hass, DOMAIN, intent_type, slots, request["input"] ) notification = {"sessionId": request.get("sessionId", "default")} if "plain" in intent_response.speech: notification["text"] = intent_response.speech["plain"]["speech"] _LOGGER.debug("send_response %s", json.dumps(notification)) mqtt.async_publish( hass, "hermes/dialogueManager/endSession", json.dumps(notification) ) except intent.UnknownIntent: _LOGGER.warning( "Received unknown intent %s", request["intent"]["intentName"] ) except intent.IntentError: _LOGGER.exception("Error while handling intent: %s", intent_type) await hass.components.mqtt.async_subscribe(INTENT_TOPIC, message_received) async def snips_say(call): """Send a Snips notification message.""" notification = { "siteId": call.data.get(ATTR_SITE_ID, "default"), "customData": call.data.get(ATTR_CUSTOM_DATA, ""), "init": {"type": "notification", "text": call.data.get(ATTR_TEXT)}, } mqtt.async_publish( hass, "hermes/dialogueManager/startSession", json.dumps(notification) ) return async def snips_say_action(call): """Send a Snips action message.""" notification = { "siteId": call.data.get(ATTR_SITE_ID, "default"), "customData": call.data.get(ATTR_CUSTOM_DATA, ""), "init": { "type": "action", "text": call.data.get(ATTR_TEXT), "canBeEnqueued": call.data.get(ATTR_CAN_BE_ENQUEUED, True), "intentFilter": call.data.get(ATTR_INTENT_FILTER, []), }, } mqtt.async_publish( hass, "hermes/dialogueManager/startSession", json.dumps(notification) ) return async def feedback_on(call): """Turn feedback sounds on.""" async_set_feedback(call.data.get(ATTR_SITE_ID), True) async def feedback_off(call): """Turn feedback sounds off.""" async_set_feedback(call.data.get(ATTR_SITE_ID), False) hass.services.async_register( DOMAIN, SERVICE_SAY, snips_say, schema=SERVICE_SCHEMA_SAY ) hass.services.async_register( DOMAIN, SERVICE_SAY_ACTION, snips_say_action, schema=SERVICE_SCHEMA_SAY_ACTION ) hass.services.async_register( DOMAIN, SERVICE_FEEDBACK_ON, feedback_on, schema=SERVICE_SCHEMA_FEEDBACK ) hass.services.async_register( DOMAIN, SERVICE_FEEDBACK_OFF, feedback_off, schema=SERVICE_SCHEMA_FEEDBACK ) return True def resolve_slot_values(slot): """Convert snips builtin types to usable values.""" if "value" in slot["value"]: value = slot["value"]["value"] else: value = slot["rawValue"] if slot.get("entity") == "snips/duration": delta = timedelta( weeks=slot["value"]["weeks"], days=slot["value"]["days"], hours=slot["value"]["hours"], minutes=slot["value"]["minutes"], seconds=slot["value"]["seconds"], ) value = delta.seconds return value
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/snips/__init__.py
"""Config flow for Network UPS Tools (NUT) integration.""" import logging import voluptuous as vol from homeassistant import config_entries, core, exceptions from homeassistant.const import ( CONF_ALIAS, CONF_BASE, CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_RESOURCES, CONF_SCAN_INTERVAL, CONF_USERNAME, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from . import PyNUTData, find_resources_in_config_entry from .const import ( DEFAULT_HOST, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DOMAIN, KEY_STATUS, KEY_STATUS_DISPLAY, SENSOR_NAME, SENSOR_TYPES, ) _LOGGER = logging.getLogger(__name__) SENSOR_DICT = { sensor_id: sensor_spec[SENSOR_NAME] for sensor_id, sensor_spec in SENSOR_TYPES.items() } def _base_schema(discovery_info): """Generate base schema.""" base_schema = {} if not discovery_info: base_schema.update( { vol.Optional(CONF_HOST, default=DEFAULT_HOST): str, vol.Optional(CONF_PORT, default=DEFAULT_PORT): int, } ) base_schema.update( {vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str} ) return vol.Schema(base_schema) def _resource_schema_base(available_resources, selected_resources): """Resource selection schema.""" known_available_resources = { sensor_id: sensor[SENSOR_NAME] for sensor_id, sensor in SENSOR_TYPES.items() if sensor_id in available_resources } if KEY_STATUS in known_available_resources: known_available_resources[KEY_STATUS_DISPLAY] = SENSOR_TYPES[ KEY_STATUS_DISPLAY ][SENSOR_NAME] return { vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select( known_available_resources ) } def _ups_schema(ups_list): """UPS selection schema.""" return vol.Schema({vol.Required(CONF_ALIAS): vol.In(ups_list)}) async def validate_input(hass: core.HomeAssistant, data): """Validate the user input allows us to connect. Data has the keys from _base_schema with values provided by the user. """ host = data[CONF_HOST] port = data[CONF_PORT] alias = data.get(CONF_ALIAS) username = data.get(CONF_USERNAME) password = data.get(CONF_PASSWORD) data = PyNUTData(host, port, alias, username, password) await hass.async_add_executor_job(data.update) status = data.status if not status: raise CannotConnect return {"ups_list": data.ups_list, "available_resources": status} def _format_host_port_alias(user_input): """Format a host, port, and alias so it can be used for comparison or display.""" host = user_input[CONF_HOST] port = user_input[CONF_PORT] alias = user_input.get(CONF_ALIAS) if alias: return f"{alias}@{host}:{port}" return f"{host}:{port}" class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Network UPS Tools (NUT).""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self): """Initialize the nut config flow.""" self.nut_config = {} self.available_resources = {} self.discovery_info = {} self.ups_list = None self.title = None async def async_step_zeroconf(self, discovery_info): """Prepare configuration for a discovered nut device.""" self.discovery_info = discovery_info await self._async_handle_discovery_without_unique_id() self.context["title_placeholders"] = { CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT), CONF_HOST: discovery_info[CONF_HOST], } return await self.async_step_user() async def async_step_user(self, user_input=None): """Handle the user input.""" errors = {} if user_input is not None: if self.discovery_info: user_input.update( { CONF_HOST: self.discovery_info[CONF_HOST], CONF_PORT: self.discovery_info.get(CONF_PORT, DEFAULT_PORT), } ) info, errors = await self._async_validate_or_error(user_input) if not errors: self.nut_config.update(user_input) if len(info["ups_list"]) > 1: self.ups_list = info["ups_list"] return await self.async_step_ups() if self._host_port_alias_already_configured(self.nut_config): return self.async_abort(reason="already_configured") self.available_resources.update(info["available_resources"]) return await self.async_step_resources() return self.async_show_form( step_id="user", data_schema=_base_schema(self.discovery_info), errors=errors ) async def async_step_ups(self, user_input=None): """Handle the picking the ups.""" errors = {} if user_input is not None: self.nut_config.update(user_input) if self._host_port_alias_already_configured(self.nut_config): return self.async_abort(reason="already_configured") info, errors = await self._async_validate_or_error(self.nut_config) if not errors: self.available_resources.update(info["available_resources"]) return await self.async_step_resources() return self.async_show_form( step_id="ups", data_schema=_ups_schema(self.ups_list), errors=errors, ) async def async_step_resources(self, user_input=None): """Handle the picking the resources.""" if user_input is None: return self.async_show_form( step_id="resources", data_schema=vol.Schema( _resource_schema_base(self.available_resources, []) ), ) self.nut_config.update(user_input) title = _format_host_port_alias(self.nut_config) return self.async_create_entry(title=title, data=self.nut_config) def _host_port_alias_already_configured(self, user_input): """See if we already have a nut entry matching user input configured.""" existing_host_port_aliases = { _format_host_port_alias(entry.data) for entry in self._async_current_entries() if CONF_HOST in entry.data } return _format_host_port_alias(user_input) in existing_host_port_aliases async def _async_validate_or_error(self, config): errors = {} info = {} try: info = await validate_input(self.hass, config) except CannotConnect: errors[CONF_BASE] = "cannot_connect" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors[CONF_BASE] = "unknown" return info, errors @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return OptionsFlowHandler(config_entry) class OptionsFlowHandler(config_entries.OptionsFlow): """Handle a option flow for nut.""" def __init__(self, config_entry: config_entries.ConfigEntry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Handle options flow.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) resources = find_resources_in_config_entry(self.config_entry) scan_interval = self.config_entry.options.get( CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL ) errors = {} try: info = await validate_input(self.hass, self.config_entry.data) except CannotConnect: errors[CONF_BASE] = "cannot_connect" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors[CONF_BASE] = "unknown" if errors: return self.async_show_form(step_id="abort", errors=errors) base_schema = _resource_schema_base(info["available_resources"], resources) base_schema[ vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval) ] = cv.positive_int return self.async_show_form( step_id="init", data_schema=vol.Schema(base_schema), errors=errors ) async def async_step_abort(self, user_input=None): """Abort options flow.""" return self.async_create_entry(title="", data=self.config_entry.options) class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect."""
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/nut/config_flow.py
"""The fritzbox_callmonitor integration.""" from asyncio import gather import logging from fritzconnection.core.exceptions import FritzConnectionException, FritzSecurityError from requests.exceptions import ConnectionError as RequestsConnectionError from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME from homeassistant.exceptions import ConfigEntryNotReady from .base import FritzBoxPhonebook from .const import ( CONF_PHONEBOOK, CONF_PREFIXES, DOMAIN, FRITZBOX_PHONEBOOK, PLATFORMS, UNDO_UPDATE_LISTENER, ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry): """Set up the fritzbox_callmonitor platforms.""" fritzbox_phonebook = FritzBoxPhonebook( host=config_entry.data[CONF_HOST], username=config_entry.data[CONF_USERNAME], password=config_entry.data[CONF_PASSWORD], phonebook_id=config_entry.data[CONF_PHONEBOOK], prefixes=config_entry.options.get(CONF_PREFIXES), ) try: await hass.async_add_executor_job(fritzbox_phonebook.init_phonebook) except FritzSecurityError as ex: _LOGGER.error( "User has insufficient permissions to access AVM FRITZ!Box settings and its phonebooks: %s", ex, ) return False except FritzConnectionException as ex: _LOGGER.error("Invalid authentication: %s", ex) return False except RequestsConnectionError as ex: _LOGGER.error("Unable to connect to AVM FRITZ!Box call monitor: %s", ex) raise ConfigEntryNotReady from ex undo_listener = config_entry.add_update_listener(update_listener) hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][config_entry.entry_id] = { FRITZBOX_PHONEBOOK: fritzbox_phonebook, UNDO_UPDATE_LISTENER: undo_listener, } for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, platform) ) return True async def async_unload_entry(hass, config_entry): """Unloading the fritzbox_callmonitor platforms.""" unload_ok = all( await gather( *[ hass.config_entries.async_forward_entry_unload(config_entry, platform) for platform in PLATFORMS ] ) ) hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]() if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) return unload_ok async def update_listener(hass, config_entry): """Update listener to reload after option has changed.""" await hass.config_entries.async_reload(config_entry.entry_id)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/fritzbox_callmonitor/__init__.py
"""Support for Axis binary sensors.""" from datetime import timedelta from axis.event_stream import ( CLASS_INPUT, CLASS_LIGHT, CLASS_MOTION, CLASS_OUTPUT, CLASS_PTZ, CLASS_SOUND, FenceGuard, LoiteringGuard, MotionGuard, ObjectAnalytics, Vmd4, ) from homeassistant.components.binary_sensor import ( DEVICE_CLASS_CONNECTIVITY, DEVICE_CLASS_LIGHT, DEVICE_CLASS_MOTION, DEVICE_CLASS_SOUND, BinarySensorEntity, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.event import async_track_point_in_utc_time from homeassistant.util.dt import utcnow from .axis_base import AxisEventBase from .const import DOMAIN as AXIS_DOMAIN DEVICE_CLASS = { CLASS_INPUT: DEVICE_CLASS_CONNECTIVITY, CLASS_LIGHT: DEVICE_CLASS_LIGHT, CLASS_MOTION: DEVICE_CLASS_MOTION, CLASS_SOUND: DEVICE_CLASS_SOUND, } async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Axis binary sensor.""" device = hass.data[AXIS_DOMAIN][config_entry.unique_id] @callback def async_add_sensor(event_id): """Add binary sensor from Axis device.""" event = device.api.event[event_id] if event.CLASS not in (CLASS_OUTPUT, CLASS_PTZ) and not ( event.CLASS == CLASS_LIGHT and event.TYPE == "Light" ): async_add_entities([AxisBinarySensor(event, device)]) device.listeners.append( async_dispatcher_connect(hass, device.signal_new_event, async_add_sensor) ) class AxisBinarySensor(AxisEventBase, BinarySensorEntity): """Representation of a binary Axis event.""" def __init__(self, event, device): """Initialize the Axis binary sensor.""" super().__init__(event, device) self.cancel_scheduled_update = None @callback def update_callback(self, no_delay=False): """Update the sensor's state, if needed. Parameter no_delay is True when device_event_reachable is sent. """ @callback def scheduled_update(now): """Timer callback for sensor update.""" self.cancel_scheduled_update = None self.async_write_ha_state() if self.cancel_scheduled_update is not None: self.cancel_scheduled_update() self.cancel_scheduled_update = None if self.is_on or self.device.option_trigger_time == 0 or no_delay: self.async_write_ha_state() return self.cancel_scheduled_update = async_track_point_in_utc_time( self.hass, scheduled_update, utcnow() + timedelta(seconds=self.device.option_trigger_time), ) @property def is_on(self): """Return true if event is active.""" return self.event.is_tripped @property def name(self): """Return the name of the event.""" if ( self.event.CLASS == CLASS_INPUT and self.event.id in self.device.api.vapix.ports and self.device.api.vapix.ports[self.event.id].name ): return ( f"{self.device.name} {self.device.api.vapix.ports[self.event.id].name}" ) if self.event.CLASS == CLASS_MOTION: for event_class, event_data in ( (FenceGuard, self.device.api.vapix.fence_guard), (LoiteringGuard, self.device.api.vapix.loitering_guard), (MotionGuard, self.device.api.vapix.motion_guard), (ObjectAnalytics, self.device.api.vapix.object_analytics), (Vmd4, self.device.api.vapix.vmd4), ): if ( isinstance(self.event, event_class) and event_data and self.event.id in event_data ): return f"{self.device.name} {self.event.TYPE} {event_data[self.event.id].name}" return super().name @property def device_class(self): """Return the class of the sensor.""" return DEVICE_CLASS.get(self.event.CLASS)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/axis/binary_sensor.py
"""Support for Axis lights.""" from axis.event_stream import CLASS_LIGHT from homeassistant.components.light import ( ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, LightEntity, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .axis_base import AxisEventBase from .const import DOMAIN as AXIS_DOMAIN async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Axis light.""" device = hass.data[AXIS_DOMAIN][config_entry.unique_id] if ( device.api.vapix.light_control is None or len(device.api.vapix.light_control) == 0 ): return @callback def async_add_sensor(event_id): """Add light from Axis device.""" event = device.api.event[event_id] if event.CLASS == CLASS_LIGHT and event.TYPE == "Light": async_add_entities([AxisLight(event, device)]) device.listeners.append( async_dispatcher_connect(hass, device.signal_new_event, async_add_sensor) ) class AxisLight(AxisEventBase, LightEntity): """Representation of a light Axis event.""" def __init__(self, event, device): """Initialize the Axis light.""" super().__init__(event, device) self.light_id = f"led{self.event.id}" self.current_intensity = 0 self.max_intensity = 0 self._features = SUPPORT_BRIGHTNESS async def async_added_to_hass(self) -> None: """Subscribe lights events.""" await super().async_added_to_hass() current_intensity = ( await self.device.api.vapix.light_control.get_current_intensity( self.light_id ) ) self.current_intensity = current_intensity["data"]["intensity"] max_intensity = await self.device.api.vapix.light_control.get_valid_intensity( self.light_id ) self.max_intensity = max_intensity["data"]["ranges"][0]["high"] @property def supported_features(self): """Flag supported features.""" return self._features @property def name(self): """Return the name of the light.""" light_type = self.device.api.vapix.light_control[self.light_id].light_type return f"{self.device.name} {light_type} {self.event.TYPE} {self.event.id}" @property def is_on(self): """Return true if light is on.""" return self.event.is_tripped @property def brightness(self): """Return the brightness of this light between 0..255.""" return int((self.current_intensity / self.max_intensity) * 255) async def async_turn_on(self, **kwargs): """Turn on light.""" if not self.is_on: await self.device.api.vapix.light_control.activate_light(self.light_id) if ATTR_BRIGHTNESS in kwargs: intensity = int((kwargs[ATTR_BRIGHTNESS] / 255) * self.max_intensity) await self.device.api.vapix.light_control.set_manual_intensity( self.light_id, intensity ) async def async_turn_off(self, **kwargs): """Turn off light.""" if self.is_on: await self.device.api.vapix.light_control.deactivate_light(self.light_id) async def async_update(self): """Update brightness.""" current_intensity = ( await self.device.api.vapix.light_control.get_current_intensity( self.light_id ) ) self.current_intensity = current_intensity["data"]["intensity"] @property def should_poll(self): """Brightness needs polling.""" return True
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/axis/light.py
"""The Keenetic Client class.""" import logging from homeassistant.components.binary_sensor import ( DEVICE_CLASS_CONNECTIVITY, BinarySensorEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.dispatcher import async_dispatcher_connect from . import KeeneticRouter from .const import DOMAIN, ROUTER _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ): """Set up device tracker for Keenetic NDMS2 component.""" router: KeeneticRouter = hass.data[DOMAIN][config_entry.entry_id][ROUTER] async_add_entities([RouterOnlineBinarySensor(router)]) class RouterOnlineBinarySensor(BinarySensorEntity): """Representation router connection status.""" def __init__(self, router: KeeneticRouter): """Initialize the APCUPSd binary device.""" self._router = router @property def name(self): """Return the name of the online status sensor.""" return f"{self._router.name} Online" @property def unique_id(self) -> str: """Return a unique identifier for this device.""" return f"online_{self._router.config_entry.entry_id}" @property def is_on(self): """Return true if the UPS is online, else false.""" return self._router.available @property def device_class(self): """Return the class of this device, from component DEVICE_CLASSES.""" return DEVICE_CLASS_CONNECTIVITY @property def should_poll(self) -> bool: """Return False since entity pushes its state to HA.""" return False @property def device_info(self): """Return a client description for device registry.""" return self._router.device_info async def async_added_to_hass(self): """Client entity created.""" self.async_on_remove( async_dispatcher_connect( self.hass, self._router.signal_update, self.async_write_ha_state, ) )
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/keenetic_ndms2/binary_sensor.py
"""Handler for Hass.io.""" import asyncio import logging import os import aiohttp from homeassistant.components.http import ( CONF_SERVER_HOST, CONF_SERVER_PORT, CONF_SSL_CERTIFICATE, ) from homeassistant.const import HTTP_BAD_REQUEST, HTTP_OK, SERVER_PORT from .const import X_HASSIO _LOGGER = logging.getLogger(__name__) class HassioAPIError(RuntimeError): """Return if a API trow a error.""" def _api_bool(funct): """Return a boolean.""" async def _wrapper(*argv, **kwargs): """Wrap function.""" try: data = await funct(*argv, **kwargs) return data["result"] == "ok" except HassioAPIError: return False return _wrapper def api_data(funct): """Return data of an api.""" async def _wrapper(*argv, **kwargs): """Wrap function.""" data = await funct(*argv, **kwargs) if data["result"] == "ok": return data["data"] raise HassioAPIError(data["message"]) return _wrapper class HassIO: """Small API wrapper for Hass.io.""" def __init__( self, loop: asyncio.AbstractEventLoop, websession: aiohttp.ClientSession, ip: str, ) -> None: """Initialize Hass.io API.""" self.loop = loop self.websession = websession self._ip = ip @_api_bool def is_connected(self): """Return true if it connected to Hass.io supervisor. This method return a coroutine. """ return self.send_command("/supervisor/ping", method="get", timeout=15) @api_data def get_info(self): """Return generic Supervisor information. This method return a coroutine. """ return self.send_command("/info", method="get") @api_data def get_host_info(self): """Return data for Host. This method return a coroutine. """ return self.send_command("/host/info", method="get") @api_data def get_os_info(self): """Return data for the OS. This method return a coroutine. """ return self.send_command("/os/info", method="get") @api_data def get_core_info(self): """Return data for Home Asssistant Core. This method returns a coroutine. """ return self.send_command("/core/info", method="get") @api_data def get_supervisor_info(self): """Return data for the Supervisor. This method returns a coroutine. """ return self.send_command("/supervisor/info", method="get") @api_data def get_addon_info(self, addon): """Return data for a Add-on. This method return a coroutine. """ return self.send_command(f"/addons/{addon}/info", method="get") @api_data def get_ingress_panels(self): """Return data for Add-on ingress panels. This method return a coroutine. """ return self.send_command("/ingress/panels", method="get") @_api_bool def restart_homeassistant(self): """Restart Home-Assistant container. This method return a coroutine. """ return self.send_command("/homeassistant/restart") @_api_bool def stop_homeassistant(self): """Stop Home-Assistant container. This method return a coroutine. """ return self.send_command("/homeassistant/stop") @api_data def retrieve_discovery_messages(self): """Return all discovery data from Hass.io API. This method return a coroutine. """ return self.send_command("/discovery", method="get", timeout=60) @api_data def get_discovery_message(self, uuid): """Return a single discovery data message. This method return a coroutine. """ return self.send_command(f"/discovery/{uuid}", method="get") @_api_bool async def update_hass_api(self, http_config, refresh_token): """Update Home Assistant API data on Hass.io.""" port = http_config.get(CONF_SERVER_PORT) or SERVER_PORT options = { "ssl": CONF_SSL_CERTIFICATE in http_config, "port": port, "watchdog": True, "refresh_token": refresh_token.token, } if http_config.get(CONF_SERVER_HOST) is not None: options["watchdog"] = False _LOGGER.warning( "Found incompatible HTTP option 'server_host'. Watchdog feature disabled" ) return await self.send_command("/homeassistant/options", payload=options) @_api_bool def update_hass_timezone(self, timezone): """Update Home-Assistant timezone data on Hass.io. This method return a coroutine. """ return self.send_command("/supervisor/options", payload={"timezone": timezone}) @_api_bool def update_diagnostics(self, diagnostics: bool): """Update Supervisor diagnostics setting. This method return a coroutine. """ return self.send_command( "/supervisor/options", payload={"diagnostics": diagnostics} ) async def send_command(self, command, method="post", payload=None, timeout=10): """Send API command to Hass.io. This method is a coroutine. """ try: request = await self.websession.request( method, f"http://{self._ip}{command}", json=payload, headers={X_HASSIO: os.environ.get("HASSIO_TOKEN", "")}, timeout=aiohttp.ClientTimeout(total=timeout), ) if request.status not in (HTTP_OK, HTTP_BAD_REQUEST): _LOGGER.error("%s return code %d", command, request.status) raise HassioAPIError() answer = await request.json() return answer except asyncio.TimeoutError: _LOGGER.error("Timeout on %s request", command) except aiohttp.ClientError as err: _LOGGER.error("Client error on %s request %s", command, err) raise HassioAPIError()
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/hassio/handler.py
"""Support to select an option from a list.""" from __future__ import annotations import logging import voluptuous as vol from homeassistant.const import ( ATTR_EDITABLE, ATTR_OPTION, CONF_ICON, CONF_ID, CONF_NAME, SERVICE_RELOAD, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import collection import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity import homeassistant.helpers.service from homeassistant.helpers.storage import Store from homeassistant.helpers.typing import ConfigType, ServiceCallType _LOGGER = logging.getLogger(__name__) DOMAIN = "input_select" CONF_INITIAL = "initial" CONF_OPTIONS = "options" ATTR_OPTIONS = "options" ATTR_CYCLE = "cycle" SERVICE_SELECT_OPTION = "select_option" SERVICE_SELECT_NEXT = "select_next" SERVICE_SELECT_PREVIOUS = "select_previous" SERVICE_SELECT_FIRST = "select_first" SERVICE_SELECT_LAST = "select_last" SERVICE_SET_OPTIONS = "set_options" STORAGE_KEY = DOMAIN STORAGE_VERSION = 1 CREATE_FIELDS = { vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)), vol.Required(CONF_OPTIONS): vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]), vol.Optional(CONF_INITIAL): cv.string, vol.Optional(CONF_ICON): cv.icon, } UPDATE_FIELDS = { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_OPTIONS): vol.All(cv.ensure_list, vol.Length(min=1), [cv.string]), vol.Optional(CONF_INITIAL): cv.string, vol.Optional(CONF_ICON): cv.icon, } def _cv_input_select(cfg): """Configure validation helper for input select (voluptuous).""" options = cfg[CONF_OPTIONS] initial = cfg.get(CONF_INITIAL) if initial is not None and initial not in options: raise vol.Invalid( f"initial state {initial} is not part of the options: {','.join(options)}" ) return cfg CONFIG_SCHEMA = vol.Schema( { DOMAIN: cv.schema_with_slug_keys( vol.All( { vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_OPTIONS): vol.All( cv.ensure_list, vol.Length(min=1), [cv.string] ), vol.Optional(CONF_INITIAL): cv.string, vol.Optional(CONF_ICON): cv.icon, }, _cv_input_select, ) ) }, extra=vol.ALLOW_EXTRA, ) RELOAD_SERVICE_SCHEMA = vol.Schema({}) async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up an input select.""" component = EntityComponent(_LOGGER, DOMAIN, hass) id_manager = collection.IDManager() yaml_collection = collection.YamlCollection( logging.getLogger(f"{__name__}.yaml_collection"), id_manager ) collection.sync_entity_lifecycle( hass, DOMAIN, DOMAIN, component, yaml_collection, InputSelect.from_yaml ) storage_collection = InputSelectStorageCollection( Store(hass, STORAGE_VERSION, STORAGE_KEY), logging.getLogger(f"{__name__}.storage_collection"), id_manager, ) collection.sync_entity_lifecycle( hass, DOMAIN, DOMAIN, component, storage_collection, InputSelect ) await yaml_collection.async_load( [{CONF_ID: id_, **cfg} for id_, cfg in config.get(DOMAIN, {}).items()] ) await storage_collection.async_load() collection.StorageCollectionWebsocket( storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS ).async_setup(hass) async def reload_service_handler(service_call: ServiceCallType) -> None: """Reload yaml entities.""" conf = await component.async_prepare_reload(skip_reset=True) if conf is None: conf = {DOMAIN: {}} await yaml_collection.async_load( [{CONF_ID: id_, **cfg} for id_, cfg in conf.get(DOMAIN, {}).items()] ) homeassistant.helpers.service.async_register_admin_service( hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=RELOAD_SERVICE_SCHEMA, ) component.async_register_entity_service( SERVICE_SELECT_OPTION, {vol.Required(ATTR_OPTION): cv.string}, "async_select_option", ) component.async_register_entity_service( SERVICE_SELECT_NEXT, {vol.Optional(ATTR_CYCLE, default=True): bool}, "async_next", ) component.async_register_entity_service( SERVICE_SELECT_PREVIOUS, {vol.Optional(ATTR_CYCLE, default=True): bool}, "async_previous", ) component.async_register_entity_service( SERVICE_SELECT_FIRST, {}, callback(lambda entity, call: entity.async_select_index(0)), ) component.async_register_entity_service( SERVICE_SELECT_LAST, {}, callback(lambda entity, call: entity.async_select_index(-1)), ) component.async_register_entity_service( SERVICE_SET_OPTIONS, { vol.Required(ATTR_OPTIONS): vol.All( cv.ensure_list, vol.Length(min=1), [cv.string] ) }, "async_set_options", ) return True class InputSelectStorageCollection(collection.StorageCollection): """Input storage based collection.""" CREATE_SCHEMA = vol.Schema(vol.All(CREATE_FIELDS, _cv_input_select)) UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS) async def _process_create_data(self, data: dict) -> dict: """Validate the config is valid.""" return self.CREATE_SCHEMA(data) @callback def _get_suggested_id(self, info: dict) -> str: """Suggest an ID based on the config.""" return info[CONF_NAME] async def _update_data(self, data: dict, update_data: dict) -> dict: """Return a new updated data object.""" update_data = self.UPDATE_SCHEMA(update_data) return _cv_input_select({**data, **update_data}) class InputSelect(RestoreEntity): """Representation of a select input.""" def __init__(self, config: dict): """Initialize a select input.""" self._config = config self.editable = True self._current_option = config.get(CONF_INITIAL) @classmethod def from_yaml(cls, config: dict) -> InputSelect: """Return entity instance initialized from yaml storage.""" input_select = cls(config) input_select.entity_id = f"{DOMAIN}.{config[CONF_ID]}" input_select.editable = False return input_select async def async_added_to_hass(self): """Run when entity about to be added.""" await super().async_added_to_hass() if self._current_option is not None: return state = await self.async_get_last_state() if not state or state.state not in self._options: self._current_option = self._options[0] else: self._current_option = state.state @property def should_poll(self): """If entity should be polled.""" return False @property def name(self): """Return the name of the select input.""" return self._config.get(CONF_NAME) @property def icon(self): """Return the icon to be used for this entity.""" return self._config.get(CONF_ICON) @property def _options(self) -> list[str]: """Return a list of selection options.""" return self._config[CONF_OPTIONS] @property def state(self): """Return the state of the component.""" return self._current_option @property def extra_state_attributes(self): """Return the state attributes.""" return {ATTR_OPTIONS: self._config[ATTR_OPTIONS], ATTR_EDITABLE: self.editable} @property def unique_id(self) -> str | None: """Return unique id for the entity.""" return self._config[CONF_ID] @callback def async_select_option(self, option): """Select new option.""" if option not in self._options: _LOGGER.warning( "Invalid option: %s (possible options: %s)", option, ", ".join(self._options), ) return self._current_option = option self.async_write_ha_state() @callback def async_select_index(self, idx): """Select new option by index.""" new_index = idx % len(self._options) self._current_option = self._options[new_index] self.async_write_ha_state() @callback def async_offset_index(self, offset, cycle): """Offset current index.""" current_index = self._options.index(self._current_option) new_index = current_index + offset if cycle: new_index = new_index % len(self._options) else: if new_index < 0: new_index = 0 elif new_index >= len(self._options): new_index = len(self._options) - 1 self._current_option = self._options[new_index] self.async_write_ha_state() @callback def async_next(self, cycle): """Select next option.""" self.async_offset_index(1, cycle) @callback def async_previous(self, cycle): """Select previous option.""" self.async_offset_index(-1, cycle) @callback def async_set_options(self, options): """Set options.""" self._current_option = options[0] self._config[CONF_OPTIONS] = options self.async_write_ha_state() async def async_update_config(self, config: dict) -> None: """Handle when the config is updated.""" self._config = config self.async_write_ha_state()
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/input_select/__init__.py
"""BleBox climate entity.""" from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, CURRENT_HVAC_OFF, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from . import BleBoxEntity, create_blebox_entities async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a BleBox climate entity.""" create_blebox_entities( hass, config_entry, async_add_entities, BleBoxClimateEntity, "climates" ) class BleBoxClimateEntity(BleBoxEntity, ClimateEntity): """Representation of a BleBox climate feature (saunaBox).""" @property def supported_features(self): """Return the supported climate features.""" return SUPPORT_TARGET_TEMPERATURE @property def hvac_mode(self): """Return the desired HVAC mode.""" if self._feature.is_on is None: return None return HVAC_MODE_HEAT if self._feature.is_on else HVAC_MODE_OFF @property def hvac_action(self): """Return the actual current HVAC action.""" is_on = self._feature.is_on if not is_on: return None if is_on is None else CURRENT_HVAC_OFF # NOTE: In practice, there's no need to handle case when is_heating is None return CURRENT_HVAC_HEAT if self._feature.is_heating else CURRENT_HVAC_IDLE @property def hvac_modes(self): """Return a list of possible HVAC modes.""" return [HVAC_MODE_OFF, HVAC_MODE_HEAT] @property def temperature_unit(self): """Return the temperature unit.""" return TEMP_CELSIUS @property def max_temp(self): """Return the maximum temperature supported.""" return self._feature.max_temp @property def min_temp(self): """Return the maximum temperature supported.""" return self._feature.min_temp @property def current_temperature(self): """Return the current temperature.""" return self._feature.current @property def target_temperature(self): """Return the desired thermostat temperature.""" return self._feature.desired async def async_set_hvac_mode(self, hvac_mode): """Set the climate entity mode.""" if hvac_mode == HVAC_MODE_HEAT: await self._feature.async_on() return await self._feature.async_off() async def async_set_temperature(self, **kwargs): """Set the thermostat temperature.""" value = kwargs[ATTR_TEMPERATURE] await self._feature.async_set_temperature(value)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/blebox/climate.py
"""Component to embed TP-Link smart home devices.""" import logging import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_HOST import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import ConfigType, HomeAssistantType from .common import ( ATTR_CONFIG, CONF_DIMMER, CONF_DISCOVERY, CONF_LIGHT, CONF_STRIP, CONF_SWITCH, SmartDevices, async_discover_devices, get_static_devices, ) _LOGGER = logging.getLogger(__name__) DOMAIN = "tplink" TPLINK_HOST_SCHEMA = vol.Schema({vol.Required(CONF_HOST): cv.string}) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_LIGHT, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_SWITCH, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_STRIP, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_DIMMER, default=[]): vol.All( cv.ensure_list, [TPLINK_HOST_SCHEMA] ), vol.Optional(CONF_DISCOVERY, default=True): cv.boolean, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the TP-Link component.""" conf = config.get(DOMAIN) hass.data[DOMAIN] = {} hass.data[DOMAIN][ATTR_CONFIG] = conf if conf is not None: hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) ) return True async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigType): """Set up TPLink from a config entry.""" config_data = hass.data[DOMAIN].get(ATTR_CONFIG) # These will contain the initialized devices lights = hass.data[DOMAIN][CONF_LIGHT] = [] switches = hass.data[DOMAIN][CONF_SWITCH] = [] # Add static devices static_devices = SmartDevices() if config_data is not None: static_devices = get_static_devices(config_data) lights.extend(static_devices.lights) switches.extend(static_devices.switches) # Add discovered devices if config_data is None or config_data[CONF_DISCOVERY]: discovered_devices = await async_discover_devices(hass, static_devices) lights.extend(discovered_devices.lights) switches.extend(discovered_devices.switches) forward_setup = hass.config_entries.async_forward_entry_setup if lights: _LOGGER.debug( "Got %s lights: %s", len(lights), ", ".join([d.host for d in lights]) ) hass.async_create_task(forward_setup(config_entry, "light")) if switches: _LOGGER.debug( "Got %s switches: %s", len(switches), ", ".join([d.host for d in switches]) ) hass.async_create_task(forward_setup(config_entry, "switch")) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" forward_unload = hass.config_entries.async_forward_entry_unload remove_lights = remove_switches = False if hass.data[DOMAIN][CONF_LIGHT]: remove_lights = await forward_unload(entry, "light") if hass.data[DOMAIN][CONF_SWITCH]: remove_switches = await forward_unload(entry, "switch") if remove_lights or remove_switches: hass.data[DOMAIN].clear() return True # We were not able to unload the platforms, either because there # were none or one of the forward_unloads failed. return False
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/tplink/__init__.py
"""Support for the Daikin HVAC.""" import logging import voluptuous as vol from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity from homeassistant.components.climate.const import ( ATTR_FAN_MODE, ATTR_HVAC_MODE, ATTR_PRESET_MODE, ATTR_SWING_MODE, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, PRESET_AWAY, PRESET_BOOST, PRESET_ECO, PRESET_NONE, SUPPORT_FAN_MODE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, CONF_HOST, CONF_NAME, TEMP_CELSIUS import homeassistant.helpers.config_validation as cv from . import DOMAIN as DAIKIN_DOMAIN from .const import ( ATTR_INSIDE_TEMPERATURE, ATTR_OUTSIDE_TEMPERATURE, ATTR_STATE_OFF, ATTR_STATE_ON, ATTR_TARGET_TEMPERATURE, ) _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME): cv.string} ) HA_STATE_TO_DAIKIN = { HVAC_MODE_FAN_ONLY: "fan", HVAC_MODE_DRY: "dry", HVAC_MODE_COOL: "cool", HVAC_MODE_HEAT: "hot", HVAC_MODE_HEAT_COOL: "auto", HVAC_MODE_OFF: "off", } DAIKIN_TO_HA_STATE = { "fan": HVAC_MODE_FAN_ONLY, "dry": HVAC_MODE_DRY, "cool": HVAC_MODE_COOL, "hot": HVAC_MODE_HEAT, "auto": HVAC_MODE_HEAT_COOL, "off": HVAC_MODE_OFF, } HA_PRESET_TO_DAIKIN = { PRESET_AWAY: "on", PRESET_NONE: "off", PRESET_BOOST: "powerful", PRESET_ECO: "econo", } HA_ATTR_TO_DAIKIN = { ATTR_PRESET_MODE: "en_hol", ATTR_HVAC_MODE: "mode", ATTR_FAN_MODE: "f_rate", ATTR_SWING_MODE: "f_dir", ATTR_INSIDE_TEMPERATURE: "htemp", ATTR_OUTSIDE_TEMPERATURE: "otemp", ATTR_TARGET_TEMPERATURE: "stemp", } DAIKIN_ATTR_ADVANCED = "adv" async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Old way of setting up the Daikin HVAC platform. Can only be called when a user accidentally mentions the platform in their config. But even in that case it would have been ignored. """ async def async_setup_entry(hass, entry, async_add_entities): """Set up Daikin climate based on config_entry.""" daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id) async_add_entities([DaikinClimate(daikin_api)], update_before_add=True) class DaikinClimate(ClimateEntity): """Representation of a Daikin HVAC.""" def __init__(self, api): """Initialize the climate device.""" self._api = api self._list = { ATTR_HVAC_MODE: list(HA_STATE_TO_DAIKIN), ATTR_FAN_MODE: self._api.device.fan_rate, ATTR_SWING_MODE: self._api.device.swing_modes, } self._supported_features = SUPPORT_TARGET_TEMPERATURE if ( self._api.device.support_away_mode or self._api.device.support_advanced_modes ): self._supported_features |= SUPPORT_PRESET_MODE if self._api.device.support_fan_rate: self._supported_features |= SUPPORT_FAN_MODE if self._api.device.support_swing_mode: self._supported_features |= SUPPORT_SWING_MODE async def _set(self, settings): """Set device settings using API.""" values = {} for attr in [ATTR_TEMPERATURE, ATTR_FAN_MODE, ATTR_SWING_MODE, ATTR_HVAC_MODE]: value = settings.get(attr) if value is None: continue daikin_attr = HA_ATTR_TO_DAIKIN.get(attr) if daikin_attr is not None: if attr == ATTR_HVAC_MODE: values[daikin_attr] = HA_STATE_TO_DAIKIN[value] elif value in self._list[attr]: values[daikin_attr] = value.lower() else: _LOGGER.error("Invalid value %s for %s", attr, value) # temperature elif attr == ATTR_TEMPERATURE: try: values[HA_ATTR_TO_DAIKIN[ATTR_TARGET_TEMPERATURE]] = str(int(value)) except ValueError: _LOGGER.error("Invalid temperature %s", value) if values: await self._api.device.set(values) @property def supported_features(self): """Return the list of supported features.""" return self._supported_features @property def name(self): """Return the name of the thermostat, if any.""" return self._api.name @property def unique_id(self): """Return a unique ID.""" return self._api.device.mac @property def temperature_unit(self): """Return the unit of measurement which this thermostat uses.""" return TEMP_CELSIUS @property def current_temperature(self): """Return the current temperature.""" return self._api.device.inside_temperature @property def target_temperature(self): """Return the temperature we try to reach.""" return self._api.device.target_temperature @property def target_temperature_step(self): """Return the supported step of target temperature.""" return 1 async def async_set_temperature(self, **kwargs): """Set new target temperature.""" await self._set(kwargs) @property def hvac_mode(self): """Return current operation ie. heat, cool, idle.""" daikin_mode = self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_HVAC_MODE])[1] return DAIKIN_TO_HA_STATE.get(daikin_mode, HVAC_MODE_HEAT_COOL) @property def hvac_modes(self): """Return the list of available operation modes.""" return self._list.get(ATTR_HVAC_MODE) async def async_set_hvac_mode(self, hvac_mode): """Set HVAC mode.""" await self._set({ATTR_HVAC_MODE: hvac_mode}) @property def fan_mode(self): """Return the fan setting.""" return self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_FAN_MODE])[1].title() async def async_set_fan_mode(self, fan_mode): """Set fan mode.""" await self._set({ATTR_FAN_MODE: fan_mode}) @property def fan_modes(self): """List of available fan modes.""" return self._list.get(ATTR_FAN_MODE) @property def swing_mode(self): """Return the fan setting.""" return self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_SWING_MODE])[1].title() async def async_set_swing_mode(self, swing_mode): """Set new target temperature.""" await self._set({ATTR_SWING_MODE: swing_mode}) @property def swing_modes(self): """List of available swing modes.""" return self._list.get(ATTR_SWING_MODE) @property def preset_mode(self): """Return the preset_mode.""" if ( self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_PRESET_MODE])[1] == HA_PRESET_TO_DAIKIN[PRESET_AWAY] ): return PRESET_AWAY if ( HA_PRESET_TO_DAIKIN[PRESET_BOOST] in self._api.device.represent(DAIKIN_ATTR_ADVANCED)[1] ): return PRESET_BOOST if ( HA_PRESET_TO_DAIKIN[PRESET_ECO] in self._api.device.represent(DAIKIN_ATTR_ADVANCED)[1] ): return PRESET_ECO return PRESET_NONE async def async_set_preset_mode(self, preset_mode): """Set preset mode.""" if preset_mode == PRESET_AWAY: await self._api.device.set_holiday(ATTR_STATE_ON) elif preset_mode == PRESET_BOOST: await self._api.device.set_advanced_mode( HA_PRESET_TO_DAIKIN[PRESET_BOOST], ATTR_STATE_ON ) elif preset_mode == PRESET_ECO: await self._api.device.set_advanced_mode( HA_PRESET_TO_DAIKIN[PRESET_ECO], ATTR_STATE_ON ) else: if self.preset_mode == PRESET_AWAY: await self._api.device.set_holiday(ATTR_STATE_OFF) elif self.preset_mode == PRESET_BOOST: await self._api.device.set_advanced_mode( HA_PRESET_TO_DAIKIN[PRESET_BOOST], ATTR_STATE_OFF ) elif self.preset_mode == PRESET_ECO: await self._api.device.set_advanced_mode( HA_PRESET_TO_DAIKIN[PRESET_ECO], ATTR_STATE_OFF ) @property def preset_modes(self): """List of available preset modes.""" ret = [PRESET_NONE] if self._api.device.support_away_mode: ret.append(PRESET_AWAY) if self._api.device.support_advanced_modes: ret += [PRESET_ECO, PRESET_BOOST] return ret async def async_update(self): """Retrieve latest state.""" await self._api.async_update() async def async_turn_on(self): """Turn device on.""" await self._api.device.set({}) async def async_turn_off(self): """Turn device off.""" await self._api.device.set( {HA_ATTR_TO_DAIKIN[ATTR_HVAC_MODE]: HA_STATE_TO_DAIKIN[HVAC_MODE_OFF]} ) @property def device_info(self): """Return a device description for device registry.""" return self._api.device_info
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/daikin/climate.py
"""Describe group states.""" from homeassistant.components.group import GroupIntegrationRegistry from homeassistant.const import STATE_OFF from homeassistant.core import callback from homeassistant.helpers.typing import HomeAssistantType from . import ( STATE_ECO, STATE_ELECTRIC, STATE_GAS, STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_PERFORMANCE, ) @callback def async_describe_on_off_states( hass: HomeAssistantType, registry: GroupIntegrationRegistry ) -> None: """Describe group on off states.""" registry.on_off_states( { STATE_ECO, STATE_ELECTRIC, STATE_PERFORMANCE, STATE_HIGH_DEMAND, STATE_HEAT_PUMP, STATE_GAS, }, STATE_OFF, )
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/water_heater/group.py
"""Describe logbook events.""" from homeassistant.core import callback from .const import DOMAIN, EVENT_ALEXA_SMART_HOME @callback def async_describe_events(hass, async_describe_event): """Describe logbook events.""" @callback def async_describe_logbook_event(event): """Describe a logbook event.""" data = event.data entity_id = data["request"].get("entity_id") if entity_id: state = hass.states.get(entity_id) name = state.name if state else entity_id message = f"send command {data['request']['namespace']}/{data['request']['name']} for {name}" else: message = ( f"send command {data['request']['namespace']}/{data['request']['name']}" ) return {"name": "Amazon Alexa", "message": message, "entity_id": entity_id} async_describe_event(DOMAIN, EVENT_ALEXA_SMART_HOME, async_describe_logbook_event)
"""The tests for the mochad light platform.""" import unittest.mock as mock import pytest from homeassistant.components import light from homeassistant.components.mochad import light as mochad from homeassistant.setup import async_setup_component @pytest.fixture(autouse=True) def pymochad_mock(): """Mock pymochad.""" with mock.patch("homeassistant.components.mochad.light.device") as device: yield device @pytest.fixture def light_mock(hass, brightness): """Mock light.""" controller_mock = mock.MagicMock() dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness} return mochad.MochadLight(hass, controller_mock, dev_dict) async def test_setup_adds_proper_devices(hass): """Test if setup adds devices.""" good_config = { "mochad": {}, "light": { "platform": "mochad", "devices": [{"name": "Light1", "address": "a1"}], }, } assert await async_setup_component(hass, light.DOMAIN, good_config) @pytest.mark.parametrize( "brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")] ) async def test_turn_on_with_no_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on() light_mock.light.send_cmd.assert_called_once_with(expected) @pytest.mark.parametrize( "brightness,expected", [ (32, [mock.call("on"), mock.call("dim 25")]), (256, [mock.call("xdim 45")]), (64, [mock.call("xdim 11")]), ], ) async def test_turn_on_with_brightness(light_mock, expected): """Test turn_on.""" light_mock.turn_on(brightness=45) light_mock.light.send_cmd.assert_has_calls(expected) @pytest.mark.parametrize("brightness", [32]) async def test_turn_off(light_mock): """Test turn_off.""" light_mock.turn_off() light_mock.light.send_cmd.assert_called_once_with("off")
w1ll1am23/home-assistant
tests/components/mochad/test_light.py
homeassistant/components/alexa/logbook.py
# -*- coding: utf-8 -*- # Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.be> # Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com> # Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from django.db.models.fields.files import FieldFile from taiga.base.utils.urls import get_absolute_url from easy_thumbnails.files import get_thumbnailer from easy_thumbnails.exceptions import InvalidImageFormatError def get_thumbnail_url(file_obj, thumbnailer_size): # Ugly hack to temporary ignore tiff files relative_name = file_obj if isinstance(file_obj, FieldFile): relative_name = file_obj.name source_extension = os.path.splitext(relative_name)[1][1:] if source_extension == "tiff": return None try: path_url = get_thumbnailer(file_obj)[thumbnailer_size].url thumb_url = get_absolute_url(path_url) except InvalidImageFormatError: thumb_url = None return thumb_url
# -*- coding: utf-8 -*- # Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz> # Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com> # Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net> # Copyright (C) 2014-2016 Anler Hernández <hello@anler.me> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pytest from unittest.mock import patch from unittest.mock import Mock from .. import factories as f from taiga.projects.history import services pytestmark = pytest.mark.django_db(transaction=True) from taiga.base.utils import json def test_webhooks_when_create_wiki_page(settings): settings.WEBHOOKS_ENABLED = True project = f.ProjectFactory() f.WebhookFactory.create(project=project) f.WebhookFactory.create(project=project) obj = f.WikiPageFactory.create(project=project) with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner) assert send_request_mock.call_count == 2 (webhook_id, url, key, data) = send_request_mock.call_args[0] assert data["action"] == "create" assert data["type"] == "wikipage" assert data["by"]["id"] == obj.owner.id assert "date" in data assert data["data"]["id"] == obj.id def test_webhooks_when_update_wiki_page(settings): settings.WEBHOOKS_ENABLED = True project = f.ProjectFactory() f.WebhookFactory.create(project=project) f.WebhookFactory.create(project=project) obj = f.WikiPageFactory.create(project=project) with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner) assert send_request_mock.call_count == 2 obj.content = "test webhook update" obj.save() with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner, comment="test_comment") assert send_request_mock.call_count == 2 (webhook_id, url, key, data) = send_request_mock.call_args[0] assert data["action"] == "change" assert data["type"] == "wikipage" assert data["by"]["id"] == obj.owner.id assert "date" in data assert data["data"]["id"] == obj.id assert data["data"]["content"] == obj.content assert data["change"]["comment"] == "test_comment" assert data["change"]["diff"]["content_html"]["from"] != data["change"]["diff"]["content_html"]["to"] assert obj.content in data["change"]["diff"]["content_html"]["to"] def test_webhooks_when_delete_wiki_page(settings): settings.WEBHOOKS_ENABLED = True project = f.ProjectFactory() f.WebhookFactory.create(project=project) f.WebhookFactory.create(project=project) obj = f.WikiPageFactory.create(project=project) with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner, delete=True) assert send_request_mock.call_count == 2 (webhook_id, url, key, data) = send_request_mock.call_args[0] assert data["action"] == "delete" assert data["type"] == "wikipage" assert data["by"]["id"] == obj.owner.id assert "date" in data assert "data" in data def test_webhooks_when_update_wiki_page_attachments(settings): settings.WEBHOOKS_ENABLED = True project = f.ProjectFactory() f.WebhookFactory.create(project=project) f.WebhookFactory.create(project=project) obj = f.WikiPageFactory.create(project=project) with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner) assert send_request_mock.call_count == 2 # Create attachments attachment1 = f.WikiAttachmentFactory(project=obj.project, content_object=obj, owner=obj.owner) attachment2 = f.WikiAttachmentFactory(project=obj.project, content_object=obj, owner=obj.owner) with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner, comment="test_comment") assert send_request_mock.call_count == 2 (webhook_id, url, key, data) = send_request_mock.call_args[0] assert data["action"] == "change" assert data["type"] == "wikipage" assert data["by"]["id"] == obj.owner.id assert "date" in data assert data["data"]["id"] == obj.id assert data["change"]["comment"] == "test_comment" assert len(data["change"]["diff"]["attachments"]["new"]) == 2 assert len(data["change"]["diff"]["attachments"]["changed"]) == 0 assert len(data["change"]["diff"]["attachments"]["deleted"]) == 0 # Update attachment attachment1.description = "new attachment description" attachment1.save() with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner, comment="test_comment") assert send_request_mock.call_count == 2 (webhook_id, url, key, data) = send_request_mock.call_args[0] assert data["action"] == "change" assert data["type"] == "wikipage" assert data["by"]["id"] == obj.owner.id assert "date" in data assert data["data"]["id"] == obj.id assert data["change"]["comment"] == "test_comment" assert len(data["change"]["diff"]["attachments"]["new"]) == 0 assert len(data["change"]["diff"]["attachments"]["changed"]) == 1 assert len(data["change"]["diff"]["attachments"]["deleted"]) == 0 # Delete attachment attachment2.delete() with patch('taiga.webhooks.tasks._send_request') as send_request_mock: services.take_snapshot(obj, user=obj.owner, comment="test_comment") assert send_request_mock.call_count == 2 (webhook_id, url, key, data) = send_request_mock.call_args[0] assert data["action"] == "change" assert data["type"] == "wikipage" assert data["by"]["id"] == obj.owner.id assert "date" in data assert data["data"]["id"] == obj.id assert data["change"]["comment"] == "test_comment" assert len(data["change"]["diff"]["attachments"]["new"]) == 0 assert len(data["change"]["diff"]["attachments"]["changed"]) == 0 assert len(data["change"]["diff"]["attachments"]["deleted"]) == 1
xdevelsistemas/taiga-back-community
tests/integration/test_webhooks_wikipages.py
taiga/base/utils/thumbnails.py
from collections.abc import Iterable from numbers import Integral import subprocess import openmc def _run(args, output, cwd): # Launch a subprocess p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) # Capture and re-print OpenMC output in real-time lines = [] while True: # If OpenMC is finished, break loop line = p.stdout.readline() if not line and p.poll() is not None: break lines.append(line) if output: # If user requested output, print to screen print(line, end='') # Raise an exception if return status is non-zero if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, ' '.join(args), ''.join(lines)) def plot_geometry(output=True, openmc_exec='openmc', cwd='.'): """Run OpenMC in plotting mode Parameters ---------- output : bool, optional Capture OpenMC output from standard out openmc_exec : str, optional Path to OpenMC executable cwd : str, optional Path to working directory to run in Raises ------ subprocess.CalledProcessError If the `openmc` executable returns a non-zero status """ _run([openmc_exec, '-p'], output, cwd) def plot_inline(plots, openmc_exec='openmc', cwd='.', convert_exec='convert'): """Display plots inline in a Jupyter notebook. This function requires that you have a program installed to convert PPM files to PNG files. Typically, that would be `ImageMagick <https://www.imagemagick.org>`_ which includes a `convert` command. Parameters ---------- plots : Iterable of openmc.Plot Plots to display openmc_exec : str Path to OpenMC executable cwd : str, optional Path to working directory to run in convert_exec : str, optional Command that can convert PPM files into PNG files Raises ------ subprocess.CalledProcessError If the `openmc` executable returns a non-zero status """ from IPython.display import Image, display if not isinstance(plots, Iterable): plots = [plots] # Create plots.xml openmc.Plots(plots).export_to_xml() # Run OpenMC in geometry plotting mode plot_geometry(False, openmc_exec, cwd) images = [] if plots is not None: for p in plots: if p.filename is not None: ppm_file = '{}.ppm'.format(p.filename) else: ppm_file = 'plot_{}.ppm'.format(p.id) png_file = ppm_file.replace('.ppm', '.png') subprocess.check_call([convert_exec, ppm_file, png_file]) images.append(Image(png_file)) display(*images) def calculate_volumes(threads=None, output=True, cwd='.', openmc_exec='openmc', mpi_args=None): """Run stochastic volume calculations in OpenMC. This function runs OpenMC in stochastic volume calculation mode. To specify the parameters of a volume calculation, one must first create a :class:`openmc.VolumeCalculation` instance and assign it to :attr:`openmc.Settings.volume_calculations`. For example: >>> vol = openmc.VolumeCalculation(domains=[cell1, cell2], samples=100000) >>> settings = openmc.Settings() >>> settings.volume_calculations = [vol] >>> settings.export_to_xml() >>> openmc.calculate_volumes() Parameters ---------- threads : int, optional Number of OpenMP threads. If OpenMC is compiled with OpenMP threading enabled, the default is implementation-dependent but is usually equal to the number of hardware threads available (or a value set by the :envvar:`OMP_NUM_THREADS` environment variable). output : bool, optional Capture OpenMC output from standard out openmc_exec : str, optional Path to OpenMC executable. Defaults to 'openmc'. mpi_args : list of str, optional MPI execute command and any additional MPI arguments to pass, e.g. ['mpiexec', '-n', '8']. cwd : str, optional Path to working directory to run in. Defaults to the current working directory. Raises ------ subprocess.CalledProcessError If the `openmc` executable returns a non-zero status See Also -------- openmc.VolumeCalculation """ args = [openmc_exec, '--volume'] if isinstance(threads, Integral) and threads > 0: args += ['-s', str(threads)] if mpi_args is not None: args = mpi_args + args _run(args, output, cwd) def run(particles=None, threads=None, geometry_debug=False, restart_file=None, tracks=False, output=True, cwd='.', openmc_exec='openmc', mpi_args=None, event_based=False): """Run an OpenMC simulation. Parameters ---------- particles : int, optional Number of particles to simulate per generation. threads : int, optional Number of OpenMP threads. If OpenMC is compiled with OpenMP threading enabled, the default is implementation-dependent but is usually equal to the number of hardware threads available (or a value set by the :envvar:`OMP_NUM_THREADS` environment variable). geometry_debug : bool, optional Turn on geometry debugging during simulation. Defaults to False. restart_file : str, optional Path to restart file to use tracks : bool, optional Write tracks for all particles. Defaults to False. output : bool Capture OpenMC output from standard out cwd : str, optional Path to working directory to run in. Defaults to the current working directory. openmc_exec : str, optional Path to OpenMC executable. Defaults to 'openmc'. mpi_args : list of str, optional MPI execute command and any additional MPI arguments to pass, e.g. ['mpiexec', '-n', '8']. event_based : bool, optional Turns on event-based parallelism, instead of default history-based .. versionadded:: 0.12 Raises ------ subprocess.CalledProcessError If the `openmc` executable returns a non-zero status """ args = [openmc_exec] if isinstance(particles, Integral) and particles > 0: args += ['-n', str(particles)] if isinstance(threads, Integral) and threads > 0: args += ['-s', str(threads)] if geometry_debug: args.append('-g') if event_based: args.append('-e') if isinstance(restart_file, str): args += ['-r', restart_file] if tracks: args.append('-t') if mpi_args is not None: args = mpi_args + args _run(args, output, cwd)
from random import uniform import numpy as np import openmc import pytest def test_rectangular_parallelepiped(): xmin = uniform(-5., 5.) xmax = xmin + uniform(0., 5.) ymin = uniform(-5., 5.) ymax = ymin + uniform(0., 5.) zmin = uniform(-5., 5.) zmax = zmin + uniform(0., 5.) s = openmc.model.RectangularParallelepiped(xmin, xmax, ymin, ymax, zmin, zmax) assert isinstance(s.xmin, openmc.XPlane) assert isinstance(s.xmax, openmc.XPlane) assert isinstance(s.ymin, openmc.YPlane) assert isinstance(s.ymax, openmc.YPlane) assert isinstance(s.zmin, openmc.ZPlane) assert isinstance(s.zmax, openmc.ZPlane) # Make sure boundary condition propagates s.boundary_type = 'reflective' assert s.boundary_type == 'reflective' for axis in 'xyz': assert getattr(s, '{}min'.format(axis)).boundary_type == 'reflective' assert getattr(s, '{}max'.format(axis)).boundary_type == 'reflective' # Check bounding box ll, ur = (+s).bounding_box assert np.all(np.isinf(ll)) assert np.all(np.isinf(ur)) ll, ur = (-s).bounding_box assert ur == pytest.approx((xmax, ymax, zmax)) assert ll == pytest.approx((xmin, ymin, zmin)) # __contains__ on associated half-spaces assert (xmin - 0.1, 0., 0.) in +s assert (xmin - 0.1, 0., 0.) not in -s dx, dy, dz = xmax - xmin, ymax - ymin, zmax - zmin assert (xmin + dx/2, ymin + dy/2, zmin + dz/2) in -s assert (xmin + dx/2, ymin + dy/2, zmin + dz/2) not in +s # translate method t = uniform(-5.0, 5.0) s_t = s.translate((t, t, t)) ll_t, ur_t = (-s_t).bounding_box assert ur_t == pytest.approx(ur + t) assert ll_t == pytest.approx(ll + t) # Make sure repr works repr(s) @pytest.mark.parametrize( "axis, indices", [ ("X", [0, 1, 2]), ("Y", [1, 2, 0]), ("Z", [2, 0, 1]), ] ) def test_right_circular_cylinder(axis, indices): x, y, z = 1.0, -2.5, 3.0 h, r = 5.0, 3.0 s = openmc.model.RightCircularCylinder((x, y, z), h, r, axis=axis.lower()) assert isinstance(s.cyl, getattr(openmc, axis + "Cylinder")) assert isinstance(s.top, getattr(openmc, axis + "Plane")) assert isinstance(s.bottom, getattr(openmc, axis + "Plane")) # Make sure boundary condition propagates s.boundary_type = 'reflective' assert s.boundary_type == 'reflective' assert s.cyl.boundary_type == 'reflective' assert s.bottom.boundary_type == 'reflective' assert s.top.boundary_type == 'reflective' # Check bounding box ll, ur = (+s).bounding_box assert np.all(np.isinf(ll)) assert np.all(np.isinf(ur)) ll, ur = (-s).bounding_box assert ll == pytest.approx((x, y, z) + np.roll([0, -r, -r], indices[0])) assert ur == pytest.approx((x, y, z) + np.roll([h, r, r], indices[0])) # __contains__ on associated half-spaces point_pos = (x, y, z) + np.roll([h/2, r+1, r+1], indices[0]) assert point_pos in +s assert point_pos not in -s point_neg = (x, y, z) + np.roll([h/2, 0, 0], indices[0]) assert point_neg in -s assert point_neg not in +s # translate method t = uniform(-5.0, 5.0) s_t = s.translate((t, t, t)) ll_t, ur_t = (-s_t).bounding_box assert ur_t == pytest.approx(ur + t) assert ll_t == pytest.approx(ll + t) # Make sure repr works repr(s) @pytest.mark.parametrize( "axis, point_pos, point_neg, ll_true", [ ("X", (8., 0., 0.), (12., 0., 0.), (10., -np.inf, -np.inf)), ("Y", (10., -2., 0.), (10., 2., 0.), (-np.inf, 0., -np.inf)), ("Z", (10., 0., -3.), (10., 0., 3.), (-np.inf, -np.inf, 0.)) ] ) def test_cone_one_sided(axis, point_pos, point_neg, ll_true): cone_oneside = getattr(openmc.model, axis + "ConeOneSided") cone_twoside = getattr(openmc, axis + "Cone") plane = getattr(openmc, axis + "Plane") x, y, z = 10., 0., 0. r2 = 4. s = cone_oneside(x, y, z, r2, True) assert isinstance(s.cone, cone_twoside) assert isinstance(s.plane, plane) assert s.up # Make sure boundary condition propagates s.boundary_type = 'reflective' assert s.boundary_type == 'reflective' assert s.cone.boundary_type == 'reflective' assert s.plane.boundary_type == 'transmission' # Check bounding box ll, ur = (+s).bounding_box assert np.all(np.isinf(ll)) assert np.all(np.isinf(ur)) ll, ur = (-s).bounding_box assert np.all(np.isinf(ur)) assert ll == pytest.approx(ll_true) # __contains__ on associated half-spaces assert point_pos in +s assert point_pos not in -s assert point_neg in -s assert point_neg not in +s # translate method t = uniform(-5.0, 5.0) s_t = s.translate((t, t, t)) ll_t, ur_t = (-s_t).bounding_box assert ur_t == pytest.approx(ur + t) assert ll_t == pytest.approx(ll + t) # Make sure repr works repr(s)
liangjg/openmc
tests/unit_tests/test_surface_composite.py
openmc/executor.py
from collections import OrderedDict from collections.abc import Iterable from copy import deepcopy from math import cos, sin, pi from numbers import Real from xml.etree import ElementTree as ET import numpy as np from uncertainties import UFloat import openmc import openmc.checkvalue as cv from ._xml import get_text from .mixin import IDManagerMixin from .region import Region, Complement from .surface import Halfspace class Cell(IDManagerMixin): r"""A region of space defined as the intersection of half-space created by quadric surfaces. Parameters ---------- cell_id : int, optional Unique identifier for the cell. If not specified, an identifier will automatically be assigned. name : str, optional Name of the cell. If not specified, the name is the empty string. fill : openmc.Material or openmc.Universe or openmc.Lattice or None or iterable of openmc.Material, optional Indicates what the region of space is filled with region : openmc.Region, optional Region of space that is assigned to the cell. Attributes ---------- id : int Unique identifier for the cell name : str Name of the cell fill : openmc.Material or openmc.Universe or openmc.Lattice or None or iterable of openmc.Material Indicates what the region of space is filled with. If None, the cell is treated as a void. An iterable of materials is used to fill repeated instances of a cell with different materials. fill_type : {'material', 'universe', 'lattice', 'distribmat', 'void'} Indicates what the cell is filled with. region : openmc.Region or None Region of space that is assigned to the cell. rotation : Iterable of float If the cell is filled with a universe, this array specifies the angles in degrees about the x, y, and z axes that the filled universe should be rotated. The rotation applied is an intrinsic rotation with specified Tait-Bryan angles. That is to say, if the angles are :math:`(\phi, \theta, \psi)`, then the rotation matrix applied is :math:`R_z(\psi) R_y(\theta) R_x(\phi)` or .. math:: \left [ \begin{array}{ccc} \cos\theta \cos\psi & -\cos\phi \sin\psi + \sin\phi \sin\theta \cos\psi & \sin\phi \sin\psi + \cos\phi \sin\theta \cos\psi \\ \cos\theta \sin\psi & \cos\phi \cos\psi + \sin\phi \sin\theta \sin\psi & -\sin\phi \cos\psi + \cos\phi \sin\theta \sin\psi \\ -\sin\theta & \sin\phi \cos\theta & \cos\phi \cos\theta \end{array} \right ] A rotation matrix can also be specified directly by setting this attribute to a nested list (or 2D numpy array) that specifies each element of the matrix. rotation_matrix : numpy.ndarray The rotation matrix defined by the angles specified in the :attr:`Cell.rotation` property. temperature : float or iterable of float Temperature of the cell in Kelvin. Multiple temperatures can be given to give each distributed cell instance a unique temperature. translation : Iterable of float If the cell is filled with a universe, this array specifies a vector that is used to translate (shift) the universe. paths : list of str The paths traversed through the CSG tree to reach each cell instance. This property is initialized by calling the :meth:`Geometry.determine_paths` method. num_instances : int The number of instances of this cell throughout the geometry. volume : float Volume of the cell in cm^3. This can either be set manually or calculated in a stochastic volume calculation and added via the :meth:`Cell.add_volume_information` method. For 'distribmat' cells it is the total volume of all instances. atoms : collections.OrderedDict Mapping of nuclides to the total number of atoms for each nuclide present in the cell, or in all of its instances for a 'distribmat' fill. For example, {'U235': 1.0e22, 'U238': 5.0e22, ...}. .. versionadded:: 0.12 """ next_id = 1 used_ids = set() def __init__(self, cell_id=None, name='', fill=None, region=None): # Initialize Cell class attributes self.id = cell_id self.name = name self.fill = fill self.region = region self._rotation = None self._rotation_matrix = None self._temperature = None self._translation = None self._paths = None self._num_instances = None self._volume = None self._atoms = None def __contains__(self, point): if self.region is None: return True else: return point in self.region def __repr__(self): string = 'Cell\n' string += '{: <16}=\t{}\n'.format('\tID', self.id) string += '{: <16}=\t{}\n'.format('\tName', self.name) if self.fill_type == 'material': string += '{: <16}=\tMaterial {}\n'.format('\tFill', self.fill.id) elif self.fill_type == 'void': string += '{: <16}=\tNone\n'.format('\tFill') elif self.fill_type == 'distribmat': string += '{: <16}=\t{}\n'.format('\tFill', list(map( lambda m: m if m is None else m.id, self.fill))) else: string += '{: <16}=\t{}\n'.format('\tFill', self.fill.id) string += '{: <16}=\t{}\n'.format('\tRegion', self.region) string += '{: <16}=\t{}\n'.format('\tRotation', self.rotation) if self.fill_type == 'material': string += '\t{0: <15}=\t{1}\n'.format('Temperature', self.temperature) string += '{: <16}=\t{}\n'.format('\tTranslation', self.translation) string += '{: <16}=\t{}\n'.format('\tVolume', self.volume) return string @property def name(self): return self._name @property def fill(self): return self._fill @property def fill_type(self): if isinstance(self.fill, openmc.Material): return 'material' elif isinstance(self.fill, openmc.Universe): return 'universe' elif isinstance(self.fill, openmc.Lattice): return 'lattice' elif isinstance(self.fill, Iterable): return 'distribmat' else: return 'void' @property def region(self): return self._region @property def rotation(self): return self._rotation @property def rotation_matrix(self): return self._rotation_matrix @property def temperature(self): return self._temperature @property def translation(self): return self._translation @property def volume(self): return self._volume @property def atoms(self): if self._atoms is None: if self._volume is None: msg = ('Cannot calculate atom content becouse no volume ' 'is set. Use Cell.volume to provide it or perform ' 'a stochastic volume calculation.') raise ValueError(msg) elif self.fill_type == 'void': msg = ('Cell is filled with void. It contains no atoms. ' 'Material must be set to calculate atom content.') raise ValueError(msg) elif self.fill_type in ['lattice', 'universe']: msg = ('Universe and Lattice cells can contain multiple ' 'materials in diffrent proportions. Atom content must ' 'be calculated with stochastic volume calculation.') raise ValueError(msg) elif self.fill_type == 'material': # Get atomic densities self._atoms = self._fill.get_nuclide_atom_densities() # Convert to total number of atoms for key, nuclide in self._atoms.items(): atom = nuclide[1] * self._volume * 1.0e+24 self._atoms[key] = atom elif self.fill_type == 'distribmat': # Assumes that volume is total volume of all instances # Also assumes that all instances have the same volume partial_volume = self.volume / len(self.fill) self._atoms = OrderedDict() for mat in self.fill: for key, nuclide in mat.get_nuclide_atom_densities().items(): # To account for overlap of nuclides between distribmat # we need to append new atoms to any existing value # hence it is necessary to ask for default. atom = self._atoms.setdefault(key, 0) atom += nuclide[1] * partial_volume * 1.0e+24 self._atoms[key] = atom else: msg = 'Unrecognized fill_type: {}'.format(self.fill_type) raise ValueError(msg) return self._atoms @property def paths(self): if self._paths is None: raise ValueError('Cell instance paths have not been determined. ' 'Call the Geometry.determine_paths() method.') return self._paths @property def bounding_box(self): if self.region is not None: return self.region.bounding_box else: return (np.array([-np.inf, -np.inf, -np.inf]), np.array([np.inf, np.inf, np.inf])) @property def num_instances(self): if self._num_instances is None: raise ValueError( 'Number of cell instances have not been determined. Call the ' 'Geometry.determine_paths() method.') return self._num_instances @name.setter def name(self, name): if name is not None: cv.check_type('cell name', name, str) self._name = name else: self._name = '' @fill.setter def fill(self, fill): if fill is not None: if isinstance(fill, Iterable): for i, f in enumerate(fill): if f is not None: cv.check_type('cell.fill[i]', f, openmc.Material) elif not isinstance(fill, (openmc.Material, openmc.Lattice, openmc.Universe)): msg = ('Unable to set Cell ID="{0}" to use a non-Material or ' 'Universe fill "{1}"'.format(self._id, fill)) raise ValueError(msg) self._fill = fill # Info about atom content can now be invalid # (since fill has just changed) self._atoms = None @rotation.setter def rotation(self, rotation): cv.check_length('cell rotation', rotation, 3) self._rotation = np.asarray(rotation) # Save rotation matrix -- the reason we do this instead of having it be # automatically calculated when the rotation_matrix property is accessed # is so that plotting on a rotated geometry can be done faster. if self._rotation.ndim == 2: # User specified rotation matrix directly self._rotation_matrix = self._rotation else: phi, theta, psi = self.rotation*(-pi/180.) c3, s3 = cos(phi), sin(phi) c2, s2 = cos(theta), sin(theta) c1, s1 = cos(psi), sin(psi) self._rotation_matrix = np.array([ [c1*c2, c1*s2*s3 - c3*s1, s1*s3 + c1*c3*s2], [c2*s1, c1*c3 + s1*s2*s3, c3*s1*s2 - c1*s3], [-s2, c2*s3, c2*c3]]) @translation.setter def translation(self, translation): cv.check_type('cell translation', translation, Iterable, Real) cv.check_length('cell translation', translation, 3) self._translation = np.asarray(translation) @temperature.setter def temperature(self, temperature): # Make sure temperatures are positive cv.check_type('cell temperature', temperature, (Iterable, Real)) if isinstance(temperature, Iterable): cv.check_type('cell temperature', temperature, Iterable, Real) for T in temperature: cv.check_greater_than('cell temperature', T, 0.0, True) else: cv.check_greater_than('cell temperature', temperature, 0.0, True) # If this cell is filled with a universe or lattice, propagate # temperatures to all cells contained. Otherwise, simply assign it. if self.fill_type in ('universe', 'lattice'): for c in self.get_all_cells().values(): if c.fill_type == 'material': c._temperature = temperature else: self._temperature = temperature @region.setter def region(self, region): if region is not None: cv.check_type('cell region', region, Region) self._region = region @volume.setter def volume(self, volume): if volume is not None: cv.check_type('cell volume', volume, (Real, UFloat)) cv.check_greater_than('cell volume', volume, 0.0, equality=True) self._volume = volume # Info about atom content can now be invalid # (sice volume has just changed) self._atoms = None def add_volume_information(self, volume_calc): """Add volume information to a cell. Parameters ---------- volume_calc : openmc.VolumeCalculation Results from a stochastic volume calculation """ if volume_calc.domain_type == 'cell': if self.id in volume_calc.volumes: self._volume = volume_calc.volumes[self.id].n self._atoms = volume_calc.atoms[self.id] else: raise ValueError('No volume information found for this cell.') else: raise ValueError('No volume information found for this cell.') def get_nuclides(self): """Returns all nuclides in the cell Returns ------- nuclides : list of str List of nuclide names """ return self.fill.get_nuclides() if self.fill_type != 'void' else [] def get_nuclide_densities(self): """Return all nuclides contained in the cell and their densities Returns ------- nuclides : collections.OrderedDict Dictionary whose keys are nuclide names and values are 2-tuples of (nuclide, density) """ nuclides = OrderedDict() if self.fill_type == 'material': nuclides.update(self.fill.get_nuclide_densities()) elif self.fill_type == 'void': pass else: if self._atoms is not None: volume = self.volume for name, atoms in self._atoms.items(): nuclide = openmc.Nuclide(name) density = 1.0e-24 * atoms.n/volume # density in atoms/b-cm nuclides[name] = (nuclide, density) else: raise RuntimeError( 'Volume information is needed to calculate microscopic cross ' 'sections for cell {}. This can be done by running a ' 'stochastic volume calculation via the ' 'openmc.VolumeCalculation object'.format(self.id)) return nuclides def get_all_cells(self, memo=None): """Return all cells that are contained within this one if it is filled with a universe or lattice Returns ------- cells : collections.orderedDict Dictionary whose keys are cell IDs and values are :class:`Cell` instances """ cells = OrderedDict() if memo and self in memo: return cells if memo is not None: memo.add(self) if self.fill_type in ('universe', 'lattice'): cells.update(self.fill.get_all_cells(memo)) return cells def get_all_materials(self, memo=None): """Return all materials that are contained within the cell Returns ------- materials : collections.OrderedDict Dictionary whose keys are material IDs and values are :class:`Material` instances """ materials = OrderedDict() if self.fill_type == 'material': materials[self.fill.id] = self.fill elif self.fill_type == 'distribmat': for m in self.fill: if m is not None: materials[m.id] = m else: # Append all Cells in each Cell in the Universe to the dictionary cells = self.get_all_cells(memo) for cell in cells.values(): materials.update(cell.get_all_materials(memo)) return materials def get_all_universes(self): """Return all universes that are contained within this one if any of its cells are filled with a universe or lattice. Returns ------- universes : collections.OrderedDict Dictionary whose keys are universe IDs and values are :class:`Universe` instances """ universes = OrderedDict() if self.fill_type == 'universe': universes[self.fill.id] = self.fill universes.update(self.fill.get_all_universes()) elif self.fill_type == 'lattice': universes.update(self.fill.get_all_universes()) return universes def clone(self, clone_materials=True, clone_regions=True, memo=None): """Create a copy of this cell with a new unique ID, and clones the cell's region and fill. Parameters ---------- clone_materials : bool Whether to create separate copies of the materials filling cells contained in this cell, or the material filling this cell. clone_regions : bool Whether to create separate copies of the regions bounding cells contained in this cell, and the region bounding this cell. memo : dict or None A nested dictionary of previously cloned objects. This parameter is used internally and should not be specified by the user. Returns ------- clone : openmc.Cell The clone of this cell """ if memo is None: memo = {} # If no memoize'd clone exists, instantiate one if self not in memo: # Temporarily remove paths paths = self._paths self._paths = None clone = deepcopy(self) clone.id = None clone._num_instances = None # Restore paths on original instance self._paths = paths if self.region is not None: if clone_regions: clone.region = self.region.clone(memo) else: clone.region = self.region if self.fill is not None: if self.fill_type == 'distribmat': if not clone_materials: clone.fill = self.fill else: clone.fill = [fill.clone(memo) if fill is not None else None for fill in self.fill] elif self.fill_type == 'material': if not clone_materials: clone.fill = self.fill else: clone.fill = self.fill.clone(memo) else: clone.fill = self.fill.clone(clone_materials, clone_regions, memo) # Memoize the clone memo[self] = clone return memo[self] def create_xml_subelement(self, xml_element, memo=None): """Add the cell's xml representation to an incoming xml element Parameters ---------- xml_element : xml.etree.ElementTree.Element XML element to be added to memo : set or None A set of object IDs representing geometry entities already written to ``xml_element``. This parameter is used internally and should not be specified by users. Returns ------- None """ element = ET.Element("cell") element.set("id", str(self.id)) if len(self._name) > 0: element.set("name", str(self.name)) if self.fill_type == 'void': element.set("material", "void") elif self.fill_type == 'material': element.set("material", str(self.fill.id)) elif self.fill_type == 'distribmat': element.set("material", ' '.join(['void' if m is None else str(m.id) for m in self.fill])) elif self.fill_type in ('universe', 'lattice'): element.set("fill", str(self.fill.id)) self.fill.create_xml_subelement(xml_element, memo) if self.region is not None: # Set the region attribute with the region specification region = str(self.region) if region.startswith('('): region = region[1:-1] if len(region) > 0: element.set("region", region) # Only surfaces that appear in a region are added to the geometry # file, so the appropriate check is performed here. First we create # a function which is called recursively to navigate through the CSG # tree. When it reaches a leaf (a Halfspace), it creates a <surface> # element for the corresponding surface if none has been created # thus far. def create_surface_elements(node, element, memo=None): if isinstance(node, Halfspace): if memo and node.surface in memo: return if memo is not None: memo.add(node.surface) xml_element.append(node.surface.to_xml_element()) elif isinstance(node, Complement): create_surface_elements(node.node, element, memo) else: for subnode in node: create_surface_elements(subnode, element, memo) # Call the recursive function from the top node create_surface_elements(self.region, xml_element, memo) if self.temperature is not None: if isinstance(self.temperature, Iterable): element.set("temperature", ' '.join( str(t) for t in self.temperature)) else: element.set("temperature", str(self.temperature)) if self.translation is not None: element.set("translation", ' '.join(map(str, self.translation))) if self.rotation is not None: element.set("rotation", ' '.join(map(str, self.rotation.ravel()))) return element @classmethod def from_xml_element(cls, elem, surfaces, materials, get_universe): """Generate cell from XML element Parameters ---------- elem : xml.etree.ElementTree.Element `<cell>` element surfaces : dict Dictionary mapping surface IDs to :class:`openmc.Surface` instances materials : dict Dictionary mapping material IDs to :class:`openmc.Material` instances (defined in :math:`openmc.Geometry.from_xml`) get_universe : function Function returning universe (defined in :meth:`openmc.Geometry.from_xml`) Returns ------- openmc.Cell Cell instance """ cell_id = int(get_text(elem, 'id')) name = get_text(elem, 'name') c = cls(cell_id, name) # Assign material/distributed materials or fill mat_text = get_text(elem, 'material') if mat_text is not None: mat_ids = mat_text.split() if len(mat_ids) > 1: c.fill = [materials[i] for i in mat_ids] else: c.fill = materials[mat_ids[0]] else: fill_id = int(get_text(elem, 'fill')) c.fill = get_universe(fill_id) # Assign region region = get_text(elem, 'region') if region is not None: c.region = Region.from_expression(region, surfaces) # Check for other attributes t = get_text(elem, 'temperature') if t is not None: if ' ' in t: c.temperature = [float(t_i) for t_i in t.split()] else: c.temperature = float(t) for key in ('temperature', 'rotation', 'translation'): value = get_text(elem, key) if value is not None: setattr(c, key, [float(x) for x in value.split()]) # Add this cell to appropriate universe univ_id = int(get_text(elem, 'universe', 0)) get_universe(univ_id).add_cell(c) return c
from random import uniform import numpy as np import openmc import pytest def test_rectangular_parallelepiped(): xmin = uniform(-5., 5.) xmax = xmin + uniform(0., 5.) ymin = uniform(-5., 5.) ymax = ymin + uniform(0., 5.) zmin = uniform(-5., 5.) zmax = zmin + uniform(0., 5.) s = openmc.model.RectangularParallelepiped(xmin, xmax, ymin, ymax, zmin, zmax) assert isinstance(s.xmin, openmc.XPlane) assert isinstance(s.xmax, openmc.XPlane) assert isinstance(s.ymin, openmc.YPlane) assert isinstance(s.ymax, openmc.YPlane) assert isinstance(s.zmin, openmc.ZPlane) assert isinstance(s.zmax, openmc.ZPlane) # Make sure boundary condition propagates s.boundary_type = 'reflective' assert s.boundary_type == 'reflective' for axis in 'xyz': assert getattr(s, '{}min'.format(axis)).boundary_type == 'reflective' assert getattr(s, '{}max'.format(axis)).boundary_type == 'reflective' # Check bounding box ll, ur = (+s).bounding_box assert np.all(np.isinf(ll)) assert np.all(np.isinf(ur)) ll, ur = (-s).bounding_box assert ur == pytest.approx((xmax, ymax, zmax)) assert ll == pytest.approx((xmin, ymin, zmin)) # __contains__ on associated half-spaces assert (xmin - 0.1, 0., 0.) in +s assert (xmin - 0.1, 0., 0.) not in -s dx, dy, dz = xmax - xmin, ymax - ymin, zmax - zmin assert (xmin + dx/2, ymin + dy/2, zmin + dz/2) in -s assert (xmin + dx/2, ymin + dy/2, zmin + dz/2) not in +s # translate method t = uniform(-5.0, 5.0) s_t = s.translate((t, t, t)) ll_t, ur_t = (-s_t).bounding_box assert ur_t == pytest.approx(ur + t) assert ll_t == pytest.approx(ll + t) # Make sure repr works repr(s) @pytest.mark.parametrize( "axis, indices", [ ("X", [0, 1, 2]), ("Y", [1, 2, 0]), ("Z", [2, 0, 1]), ] ) def test_right_circular_cylinder(axis, indices): x, y, z = 1.0, -2.5, 3.0 h, r = 5.0, 3.0 s = openmc.model.RightCircularCylinder((x, y, z), h, r, axis=axis.lower()) assert isinstance(s.cyl, getattr(openmc, axis + "Cylinder")) assert isinstance(s.top, getattr(openmc, axis + "Plane")) assert isinstance(s.bottom, getattr(openmc, axis + "Plane")) # Make sure boundary condition propagates s.boundary_type = 'reflective' assert s.boundary_type == 'reflective' assert s.cyl.boundary_type == 'reflective' assert s.bottom.boundary_type == 'reflective' assert s.top.boundary_type == 'reflective' # Check bounding box ll, ur = (+s).bounding_box assert np.all(np.isinf(ll)) assert np.all(np.isinf(ur)) ll, ur = (-s).bounding_box assert ll == pytest.approx((x, y, z) + np.roll([0, -r, -r], indices[0])) assert ur == pytest.approx((x, y, z) + np.roll([h, r, r], indices[0])) # __contains__ on associated half-spaces point_pos = (x, y, z) + np.roll([h/2, r+1, r+1], indices[0]) assert point_pos in +s assert point_pos not in -s point_neg = (x, y, z) + np.roll([h/2, 0, 0], indices[0]) assert point_neg in -s assert point_neg not in +s # translate method t = uniform(-5.0, 5.0) s_t = s.translate((t, t, t)) ll_t, ur_t = (-s_t).bounding_box assert ur_t == pytest.approx(ur + t) assert ll_t == pytest.approx(ll + t) # Make sure repr works repr(s) @pytest.mark.parametrize( "axis, point_pos, point_neg, ll_true", [ ("X", (8., 0., 0.), (12., 0., 0.), (10., -np.inf, -np.inf)), ("Y", (10., -2., 0.), (10., 2., 0.), (-np.inf, 0., -np.inf)), ("Z", (10., 0., -3.), (10., 0., 3.), (-np.inf, -np.inf, 0.)) ] ) def test_cone_one_sided(axis, point_pos, point_neg, ll_true): cone_oneside = getattr(openmc.model, axis + "ConeOneSided") cone_twoside = getattr(openmc, axis + "Cone") plane = getattr(openmc, axis + "Plane") x, y, z = 10., 0., 0. r2 = 4. s = cone_oneside(x, y, z, r2, True) assert isinstance(s.cone, cone_twoside) assert isinstance(s.plane, plane) assert s.up # Make sure boundary condition propagates s.boundary_type = 'reflective' assert s.boundary_type == 'reflective' assert s.cone.boundary_type == 'reflective' assert s.plane.boundary_type == 'transmission' # Check bounding box ll, ur = (+s).bounding_box assert np.all(np.isinf(ll)) assert np.all(np.isinf(ur)) ll, ur = (-s).bounding_box assert np.all(np.isinf(ur)) assert ll == pytest.approx(ll_true) # __contains__ on associated half-spaces assert point_pos in +s assert point_pos not in -s assert point_neg in -s assert point_neg not in +s # translate method t = uniform(-5.0, 5.0) s_t = s.translate((t, t, t)) ll_t, ur_t = (-s_t).bounding_box assert ur_t == pytest.approx(ur + t) assert ll_t == pytest.approx(ll + t) # Make sure repr works repr(s)
liangjg/openmc
tests/unit_tests/test_surface_composite.py
openmc/cell.py
# -*- coding: utf-8 -*- '''Base TestCase class for OSF unittests. Uses a temporary MongoDB database.''' import abc import datetime as dt import functools import logging import os import re import shutil import tempfile import unittest import uuid import blinker import httpretty import mock import pytest from addons.wiki.models import NodeWikiPage from django.test.utils import override_settings from django.test import TestCase as DjangoTestCase from faker import Factory from framework.auth import User from framework.auth.core import Auth from framework.celery_tasks.handlers import celery_before_request from framework.django.handlers import handlers as django_handlers from framework.flask import rm_handlers from framework.guid.model import Guid from framework.mongo import client as client_proxy from framework.mongo import database as database_proxy from framework.sessions.model import Session from framework.transactions import commands, messages, utils from pymongo.errors import OperationFailure from website import settings from website.app import init_app from website.notifications.listeners import (subscribe_contributor, subscribe_creator) from website.project.model import (MetaSchema, Node, NodeLog, Tag, WatchConfig, ensure_schemas) from website.project.signals import contributor_added, project_created from website.project.views.contributor import notify_added_contributor from website.signals import ALL_SIGNALS from webtest_plus import TestApp from .json_api_test_app import JSONAPITestApp from nose.tools import * # noqa (PEP8 asserts); noqa (PEP8 asserts) logger = logging.getLogger(__name__) def get_default_metaschema(): """This needs to be a method so it gets called after the test database is set up""" try: return MetaSchema.find()[0] except IndexError: ensure_schemas() return MetaSchema.find()[0] try: test_app = init_app(routes=True, set_backends=False) except AssertionError: # Routes have already been set up test_app = init_app(routes=False, set_backends=False) rm_handlers(test_app, django_handlers) test_app.testing = True # Silence some 3rd-party logging and some "loud" internal loggers SILENT_LOGGERS = [ 'api.caching.tasks', 'factory.generate', 'factory.containers', 'framework.analytics', 'framework.auth.core', 'framework.celery_tasks.signals', 'website.app', 'website.archiver.tasks', 'website.mails', 'website.notifications.listeners', 'website.search.elastic_search', 'website.search_migration.migrate', 'website.util.paths', 'requests_oauthlib.oauth2_session', 'raven.base.Client', 'raven.contrib.django.client.DjangoClient', ] for logger_name in SILENT_LOGGERS: logging.getLogger(logger_name).setLevel(logging.CRITICAL) # Fake factory fake = Factory.create() # All Models MODELS = (User, Node, NodeLog, NodeWikiPage, Tag, WatchConfig, Session, Guid) def teardown_database(client=None, database=None): client = client or client_proxy database = database or database_proxy try: commands.rollback(database) except OperationFailure as error: message = utils.get_error_message(error) if messages.NO_TRANSACTION_ERROR not in message: raise client.drop_database(database) @pytest.mark.django_db class DbTestCase(unittest.TestCase): """Base `TestCase` for tests that require a scratch database. """ DB_NAME = getattr(settings, 'TEST_DB_NAME', 'osf_test') @classmethod def setUpClass(cls): super(DbTestCase, cls).setUpClass() # cls._original_db_name = settings.DB_NAME # settings.DB_NAME = cls.DB_NAME cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS settings.ENABLE_EMAIL_SUBSCRIPTIONS = False cls._original_bcrypt_log_rounds = settings.BCRYPT_LOG_ROUNDS settings.BCRYPT_LOG_ROUNDS = 4 # teardown_database(database=database_proxy._get_current_object()) # TODO: With `database` as a `LocalProxy`, we should be able to simply # this logic # set_up_storage( # website.models.MODELS, # storage.MongoStorage, # addons=settings.ADDONS_AVAILABLE, # ) # cls.db = database_proxy @classmethod def tearDownClass(cls): super(DbTestCase, cls).tearDownClass() # teardown_database(database=database_proxy._get_current_object()) # settings.DB_NAME = cls._original_db_name settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions settings.BCRYPT_LOG_ROUNDS = cls._original_bcrypt_log_rounds class AppTestCase(unittest.TestCase): """Base `TestCase` for OSF tests that require the WSGI app (but no database). """ PUSH_CONTEXT = True DISCONNECTED_SIGNALS = { # disconnect notify_add_contributor so that add_contributor does not send "fake" emails in tests contributor_added: [notify_added_contributor] } def setUp(self): super(AppTestCase, self).setUp() self.app = TestApp(test_app) if not self.PUSH_CONTEXT: return self.context = test_app.test_request_context(headers={ 'Remote-Addr': '146.9.219.56', 'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3' }) self.context.push() with self.context: celery_before_request() for signal in self.DISCONNECTED_SIGNALS: for receiver in self.DISCONNECTED_SIGNALS[signal]: signal.disconnect(receiver) def tearDown(self): super(AppTestCase, self).tearDown() if not self.PUSH_CONTEXT: return with mock.patch('website.mailchimp_utils.get_mailchimp_api'): self.context.pop() for signal in self.DISCONNECTED_SIGNALS: for receiver in self.DISCONNECTED_SIGNALS[signal]: signal.connect(receiver) class ApiAppTestCase(unittest.TestCase): """Base `TestCase` for OSF API v2 tests that require the WSGI app (but no database). """ allow_database_queries = True def setUp(self): super(ApiAppTestCase, self).setUp() self.app = JSONAPITestApp() class SearchTestCase(unittest.TestCase): def setUp(self): settings.ELASTIC_INDEX = uuid.uuid4().hex settings.ELASTIC_TIMEOUT = 60 from website.search import elastic_search elastic_search.INDEX = settings.ELASTIC_INDEX elastic_search.create_index(settings.ELASTIC_INDEX) # NOTE: Super is called last to ensure the ES connection can be established before # the httpretty module patches the socket. super(SearchTestCase, self).setUp() def tearDown(self): super(SearchTestCase, self).tearDown() from website.search import elastic_search elastic_search.delete_index(settings.ELASTIC_INDEX) methods = [ httpretty.GET, httpretty.PUT, httpretty.HEAD, httpretty.POST, httpretty.PATCH, httpretty.DELETE, ] def kill(*args, **kwargs): logger.error('httppretty.kill: %s - %s', args, kwargs) raise httpretty.errors.UnmockedError() class MockRequestTestCase(unittest.TestCase): DISABLE_OUTGOING_CONNECTIONS = False def setUp(self): super(MockRequestTestCase, self).setUp() if self.DISABLE_OUTGOING_CONNECTIONS: httpretty.enable() for method in methods: httpretty.register_uri( method, re.compile(r'.*'), body=kill, priority=-1, ) def tearDown(self): super(MockRequestTestCase, self).tearDown() httpretty.reset() httpretty.disable() class OsfTestCase(DbTestCase, AppTestCase, SearchTestCase, MockRequestTestCase): """Base `TestCase` for tests that require both scratch databases and the OSF application. Note: superclasses must call `super` in order for all setup and teardown methods to be called correctly. """ pass class ApiTestCase(DbTestCase, ApiAppTestCase, SearchTestCase, MockRequestTestCase): """Base `TestCase` for tests that require both scratch databases and the OSF API application. Note: superclasses must call `super` in order for all setup and teardown methods to be called correctly. """ def setUp(self): super(ApiTestCase, self).setUp() settings.USE_EMAIL = False class ApiAddonTestCase(ApiTestCase): """Base `TestCase` for tests that require interaction with addons. """ DISABLE_OUTGOING_CONNECTIONS = True @abc.abstractproperty def short_name(self): pass @abc.abstractproperty def addon_type(self): pass @abc.abstractmethod def _apply_auth_configuration(self): pass @abc.abstractmethod def _set_urls(self): pass def _settings_kwargs(self, node, user_settings): return { 'user_settings': self.user_settings, 'folder_id': '1234567890', 'owner': self.node } def setUp(self): super(ApiAddonTestCase, self).setUp() from osf_tests.factories import ( ProjectFactory, AuthUserFactory, ) from addons.base.models import ( BaseOAuthNodeSettings, BaseOAuthUserSettings ) assert self.addon_type in ('CONFIGURABLE', 'OAUTH', 'UNMANAGEABLE', 'INVALID') self.account = None self.node_settings = None self.user_settings = None self.user = AuthUserFactory() self.auth = Auth(self.user) self.node = ProjectFactory(creator=self.user) if self.addon_type not in ('UNMANAGEABLE', 'INVALID'): if self.addon_type in ('OAUTH', 'CONFIGURABLE'): self.account = self.AccountFactory() self.user.external_accounts.add(self.account) self.user.save() self.user_settings = self.user.get_or_add_addon(self.short_name) self.node_settings = self.node.get_or_add_addon(self.short_name, auth=self.auth) if self.addon_type in ('OAUTH', 'CONFIGURABLE'): self.node_settings.set_auth(self.account, self.user) self._apply_auth_configuration() if self.addon_type in ('OAUTH', 'CONFIGURABLE'): assert isinstance(self.node_settings, BaseOAuthNodeSettings) assert isinstance(self.user_settings, BaseOAuthUserSettings) self.node_settings.reload() self.user_settings.reload() self.account_id = self.account._id if self.account else None self.set_urls() def tearDown(self): super(ApiAddonTestCase, self).tearDown() self.user.remove() self.node.remove() if self.node_settings: self.node_settings.remove() if self.user_settings: self.user_settings.remove() if self.account: self.account.remove() @override_settings(ROOT_URLCONF='admin.base.urls') class AdminTestCase(DbTestCase, DjangoTestCase, SearchTestCase, MockRequestTestCase): pass class NotificationTestCase(OsfTestCase): """An `OsfTestCase` to use when testing specific subscription behavior. Use when you'd like to manually create all Node subscriptions and subscriptions for added contributors yourself, and not rely on automatically added ones. """ DISCONNECTED_SIGNALS = { # disconnect signals so that add_contributor does not send "fake" emails in tests contributor_added: [notify_added_contributor, subscribe_contributor], project_created: [subscribe_creator] } def setUp(self): super(NotificationTestCase, self).setUp() def tearDown(self): super(NotificationTestCase, self).tearDown() class ApiWikiTestCase(ApiTestCase): def setUp(self): from osf_tests.factories import AuthUserFactory super(ApiWikiTestCase, self).setUp() self.user = AuthUserFactory() self.non_contributor = AuthUserFactory() def _add_project_wiki_page(self, node, user): from addons.wiki.tests.factories import NodeWikiFactory # API will only return current wiki pages # Mock out update_search. TODO: Remove when StoredFileNode is implemented with mock.patch('osf.models.AbstractNode.update_search'): return NodeWikiFactory(node=node, user=user) # From Flask-Security: https://github.com/mattupstate/flask-security/blob/develop/flask_security/utils.py class CaptureSignals(object): """Testing utility for capturing blinker signals. Context manager which mocks out selected signals and registers which are `sent` on and what arguments were sent. Instantiate with a list of blinker `NamedSignals` to patch. Each signal has its `send` mocked out. """ def __init__(self, signals): """Patch all given signals and make them available as attributes. :param signals: list of signals """ self._records = {} self._receivers = {} for signal in signals: self._records[signal] = [] self._receivers[signal] = functools.partial(self._record, signal) def __getitem__(self, signal): """All captured signals are available via `ctxt[signal]`. """ if isinstance(signal, blinker.base.NamedSignal): return self._records[signal] else: super(CaptureSignals, self).__setitem__(signal) def _record(self, signal, *args, **kwargs): self._records[signal].append((args, kwargs)) def __enter__(self): for signal, receiver in self._receivers.items(): signal.connect(receiver) return self def __exit__(self, type, value, traceback): for signal, receiver in self._receivers.items(): signal.disconnect(receiver) def signals_sent(self): """Return a set of the signals sent. :rtype: list of blinker `NamedSignals`. """ return set([signal for signal, _ in self._records.items() if self._records[signal]]) def capture_signals(): """Factory method that creates a ``CaptureSignals`` with all OSF signals.""" return CaptureSignals(ALL_SIGNALS) def assert_is_redirect(response, msg='Response is a redirect.'): assert 300 <= response.status_code < 400, msg def assert_before(lst, item1, item2): """Assert that item1 appears before item2 in lst.""" assert_less(lst.index(item1), lst.index(item2), '{0!r} appears before {1!r}'.format(item1, item2)) def assert_datetime_equal(dt1, dt2, allowance=500): """Assert that two datetimes are about equal.""" assert abs(dt1 - dt2) < dt.timedelta(milliseconds=allowance)
#!/usr/bin/env python # -*- coding: utf-8 -*- """Views tests for the OSF.""" from __future__ import absolute_import import datetime as dt import httplib as http import json import time import pytz import unittest from flask import request import mock import pytest from nose.tools import * # noqa PEP8 asserts from django.utils import timezone from django.apps import apps from modularodm import Q from modularodm.exceptions import ValidationError from addons.github.tests.factories import GitHubAccountFactory from framework.auth import cas from framework.auth.core import generate_verification_key from framework import auth from framework.auth.campaigns import get_campaigns, is_institution_login, is_native_login, is_proxy_login, campaign_url_for from framework.auth import Auth from framework.auth.cas import get_login_url from framework.auth.core import generate_verification_key from framework.auth.exceptions import InvalidTokenError from framework.auth.utils import impute_names_model, ensure_external_identity_uniqueness from framework.auth.views import login_and_register_handler from framework.celery_tasks import handlers from framework.exceptions import HTTPError from framework.transactions.handlers import no_auto_transaction from tests.factories import MockAddonNodeSettings from website import mailchimp_utils from website import mails, settings from addons.osfstorage import settings as osfstorage_settings from website.models import Node, NodeLog, Pointer from website.profile.utils import add_contributor_json, serialize_unregistered from website.profile.views import fmt_date_or_none, update_osf_help_mails_subscription from website.project.decorators import check_can_access from website.project.model import has_anonymous_link from website.project.signals import contributor_added from website.project.views.contributor import ( deserialize_contributors, notify_added_contributor, send_claim_email, send_claim_registered_email, ) from website.project.views.node import _should_show_wiki_widget, _view_project, abbrev_authors from website.util import api_url_for, web_url_for from website.util import permissions, rubeus from website.views import index from osf.models import Comment from osf.models import OSFUser as User from tests.base import ( assert_is_redirect, capture_signals, fake, get_default_metaschema, OsfTestCase, assert_datetime_equal, ) from tests.base import test_app as mock_app pytestmark = pytest.mark.django_db from osf.models import NodeRelation from osf_tests.factories import ( UserFactory, UnconfirmedUserFactory, UnregUserFactory, AuthUserFactory, PrivateLinkFactory, ProjectFactory, NodeFactory, CommentFactory, CollectionFactory, InstitutionFactory, RegistrationFactory, ApiOAuth2ApplicationFactory, ApiOAuth2PersonalTokenFactory, ProjectWithAddonFactory, PreprintFactory, PreprintProviderFactory, ) class Addon(MockAddonNodeSettings): @property def complete(self): return True def archive_errors(self): return 'Error' class Addon2(MockAddonNodeSettings): @property def complete(self): return True def archive_errors(self): return 'Error' @mock_app.route('/errorexc') def error_exc(): UserFactory() raise RuntimeError @mock_app.route('/error500') def error500(): UserFactory() return 'error', 500 @mock_app.route('/noautotransact') @no_auto_transaction def no_auto_transact(): UserFactory() return 'error', 500 class TestViewsAreAtomic(OsfTestCase): def test_error_response_rolls_back_transaction(self): original_user_count = User.objects.count() self.app.get('/error500', expect_errors=True) assert_equal(User.objects.count(), original_user_count) # Need to set debug = False in order to rollback transactions in transaction_teardown_request mock_app.debug = False try: self.app.get('/errorexc', expect_errors=True) except RuntimeError: pass mock_app.debug = True self.app.get('/noautotransact', expect_errors=True) assert_equal(User.objects.count(), original_user_count + 1) class TestViewingProjectWithPrivateLink(OsfTestCase): def setUp(self): super(TestViewingProjectWithPrivateLink, self).setUp() self.user = AuthUserFactory() # Is NOT a contributor self.project = ProjectFactory(is_public=False) self.link = PrivateLinkFactory() self.link.nodes.add(self.project) self.link.save() self.project_url = self.project.web_url_for('view_project') def test_edit_private_link_empty(self): node = ProjectFactory(creator=self.user) link = PrivateLinkFactory() link.nodes.add(node) link.save() url = node.api_url_for('project_private_link_edit') res = self.app.put_json(url, {'pk': link._id, 'value': ''}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_in('Title cannot be blank', res.body) def test_edit_private_link_invalid(self): node = ProjectFactory(creator=self.user) link = PrivateLinkFactory() link.nodes.add(node) link.save() url = node.api_url_for('project_private_link_edit') res = self.app.put_json(url, {'pk': link._id, 'value': '<a></a>'}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_in('Invalid link name.', res.body) @mock.patch('framework.auth.core.Auth.private_link') def test_can_be_anonymous_for_public_project(self, mock_property): mock_property.return_value(mock.MagicMock()) mock_property.anonymous = True anonymous_link = PrivateLinkFactory(anonymous=True) anonymous_link.nodes.add(self.project) anonymous_link.save() self.project.set_privacy('public') self.project.save() self.project.reload() auth = Auth(user=self.user, private_key=anonymous_link.key) assert_true(has_anonymous_link(self.project, auth)) def test_has_private_link_key(self): res = self.app.get(self.project_url, {'view_only': self.link.key}) assert_equal(res.status_code, 200) def test_not_logged_in_no_key(self): res = self.app.get(self.project_url, {'view_only': None}) assert_is_redirect(res) res = res.follow(expect_errors=True) assert_equal(res.status_code, 301) assert_equal( res.request.path, '/login' ) def test_logged_in_no_private_key(self): res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.FORBIDDEN) def test_logged_in_has_key(self): res = self.app.get( self.project_url, {'view_only': self.link.key}, auth=self.user.auth) assert_equal(res.status_code, 200) @unittest.skip('Skipping for now until we find a way to mock/set the referrer') def test_prepare_private_key(self): res = self.app.get(self.project_url, {'key': self.link.key}) res = res.click('Registrations') assert_is_redirect(res) res = res.follow() assert_equal(res.status_code, 200) assert_equal(res.request.GET['key'], self.link.key) def test_cannot_access_registrations_or_forks_with_anon_key(self): anonymous_link = PrivateLinkFactory(anonymous=True) anonymous_link.nodes.add(self.project) anonymous_link.save() self.project.is_public = False self.project.save() url = self.project_url + 'registrations/?view_only={}'.format(anonymous_link.key) res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, 401) url = self.project_url + 'forks/?view_only={}'.format(anonymous_link.key) res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, 401) def test_can_access_registrations_and_forks_with_not_anon_key(self): link = PrivateLinkFactory(anonymous=False) link.nodes.add(self.project) link.save() self.project.is_public = False self.project.save() url = self.project_url + 'registrations/?view_only={}'.format(self.link.key) res = self.app.get(url) assert_equal(res.status_code, 200) url = self.project_url + 'forks/?view_only={}'.format(self.link.key) res = self.app.get(url) assert_equal(res.status_code, 200) def test_check_can_access_valid(self): contributor = AuthUserFactory() self.project.add_contributor(contributor, auth=Auth(self.project.creator)) self.project.save() assert_true(check_can_access(self.project, contributor)) def test_check_user_access_invalid(self): noncontrib = AuthUserFactory() with assert_raises(HTTPError): check_can_access(self.project, noncontrib) def test_check_user_access_if_user_is_None(self): assert_false(check_can_access(self.project, None)) class TestProjectViews(OsfTestCase): def setUp(self): super(TestProjectViews, self).setUp() self.user1 = AuthUserFactory() self.user1.save() self.consolidate_auth1 = Auth(user=self.user1) self.auth = self.user1.auth self.user2 = AuthUserFactory() self.auth2 = self.user2.auth # A project has 2 contributors self.project = ProjectFactory( title='Ham', description='Honey-baked', creator=self.user1 ) self.project.add_contributor(self.user2, auth=Auth(self.user1)) self.project.save() self.project2 = ProjectFactory( title='Tofu', description='Glazed', creator=self.user1 ) self.project2.add_contributor(self.user2, auth=Auth(self.user1)) self.project2.save() def test_node_setting_with_multiple_matched_institution_email_domains(self): # User has alternate emails matching more than one institution's email domains inst1 = InstitutionFactory(email_domains=['foo.bar']) inst2 = InstitutionFactory(email_domains=['baz.qux']) user = AuthUserFactory() user.emails.append('queen@foo.bar') user.emails.append('brian@baz.qux') user.save() project = ProjectFactory(creator=user) # node settings page loads without error url = project.web_url_for('node_setting') res = self.app.get(url, auth=user.auth) assert_equal(res.status_code, 200) # user is automatically affiliated with institutions # that matched email domains user.reload() assert_in(inst1, user.affiliated_institutions.all()) assert_in(inst2, user.affiliated_institutions.all()) def test_edit_title_empty(self): node = ProjectFactory(creator=self.user1) url = node.api_url_for('edit_node') res = self.app.post_json(url, {'name': 'title', 'value': ''}, auth=self.user1.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_in('Title cannot be blank', res.body) def test_edit_title_invalid(self): node = ProjectFactory(creator=self.user1) url = node.api_url_for('edit_node') res = self.app.post_json(url, {'name': 'title', 'value': '<a></a>'}, auth=self.user1.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_in('Invalid title.', res.body) def test_cannot_remove_only_visible_contributor(self): user1_contrib = self.project.contributor_set.get(user=self.user1) user1_contrib.visible = False user1_contrib.save() url = self.project.api_url_for('project_remove_contributor') res = self.app.post_json( url, {'contributorID': self.user2._id, 'nodeIDs': [self.project._id]}, auth=self.auth, expect_errors=True ) assert_equal(res.status_code, http.FORBIDDEN) assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor') assert_true(self.project.is_contributor(self.user2)) def test_remove_only_visible_contributor_return_false(self): user1_contrib = self.project.contributor_set.get(user=self.user1) user1_contrib.visible = False user1_contrib.save() ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1) assert_false(ret) self.project.reload() assert_true(self.project.is_contributor(self.user2)) def test_can_view_nested_project_as_admin(self): self.parent_project = NodeFactory( title='parent project', category='project', parent=self.project, is_public=False ) self.parent_project.save() self.child_project = NodeFactory( title='child project', category='project', parent=self.parent_project, is_public=False ) self.child_project.save() url = self.child_project.web_url_for('view_project') res = self.app.get(url, auth=self.auth) assert_not_in('Private Project', res.body) assert_in('parent project', res.body) def test_edit_description(self): url = '/api/v1/project/{0}/edit/'.format(self.project._id) self.app.post_json(url, {'name': 'description', 'value': 'Deep-fried'}, auth=self.auth) self.project.reload() assert_equal(self.project.description, 'Deep-fried') def test_project_api_url(self): url = self.project.api_url res = self.app.get(url, auth=self.auth) data = res.json assert_equal(data['node']['category'], 'Project') assert_equal(data['node']['node_type'], 'project') assert_equal(data['node']['title'], self.project.title) assert_equal(data['node']['is_public'], self.project.is_public) assert_equal(data['node']['is_registration'], False) assert_equal(data['node']['id'], self.project._primary_key) assert_true(data['user']['is_contributor']) assert_equal(data['node']['description'], self.project.description) assert_equal(data['node']['url'], self.project.url) assert_equal(data['node']['tags'], list(self.project.tags.values_list('name', flat=True))) assert_in('forked_date', data['node']) assert_in('registered_from_url', data['node']) # TODO: Test "parent" and "user" output def test_add_contributor_post(self): # Two users are added as a contributor via a POST request project = ProjectFactory(creator=self.user1, is_public=True) user2 = UserFactory() user3 = UserFactory() url = '/api/v1/project/{0}/contributors/'.format(project._id) dict2 = add_contributor_json(user2) dict3 = add_contributor_json(user3) dict2.update({ 'permission': 'admin', 'visible': True, }) dict3.update({ 'permission': 'write', 'visible': False, }) self.app.post_json( url, { 'users': [dict2, dict3], 'node_ids': [project._id], }, content_type='application/json', auth=self.auth, ).maybe_follow() project.reload() assert_in(user2, project.contributors) # A log event was added assert_equal(project.logs.latest().action, 'contributor_added') assert_equal(len(project.contributors), 3) assert_equal(project.get_permissions(user2), ['read', 'write', 'admin']) assert_equal(project.get_permissions(user3), ['read', 'write']) def test_manage_permissions(self): url = self.project.api_url + 'contributors/manage/' self.app.post_json( url, { 'contributors': [ {'id': self.project.creator._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': self.user1._id, 'permission': 'read', 'registered': True, 'visible': True}, {'id': self.user2._id, 'permission': 'admin', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) self.project.reload() assert_equal(self.project.get_permissions(self.user1), ['read']) assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin']) def test_manage_permissions_again(self): url = self.project.api_url + 'contributors/manage/' self.app.post_json( url, { 'contributors': [ {'id': self.user1._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': self.user2._id, 'permission': 'admin', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) self.project.reload() self.app.post_json( url, { 'contributors': [ {'id': self.user1._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': self.user2._id, 'permission': 'read', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) self.project.reload() assert_equal(self.project.get_permissions(self.user2), ['read']) assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin']) def test_contributor_manage_reorder(self): # Two users are added as a contributor via a POST request project = ProjectFactory(creator=self.user1, is_public=True) reg_user1, reg_user2 = UserFactory(), UserFactory() project.add_contributors( [ {'user': reg_user1, 'permissions': [ 'read', 'write', 'admin'], 'visible': True}, {'user': reg_user2, 'permissions': [ 'read', 'write', 'admin'], 'visible': False}, ] ) # Add a non-registered user unregistered_user = project.add_unregistered_contributor( fullname=fake.name(), email=fake.email(), auth=self.consolidate_auth1, save=True, ) url = project.api_url + 'contributors/manage/' self.app.post_json( url, { 'contributors': [ {'id': reg_user2._id, 'permission': 'admin', 'registered': True, 'visible': False}, {'id': project.creator._id, 'permission': 'admin', 'registered': True, 'visible': True}, {'id': unregistered_user._id, 'permission': 'admin', 'registered': False, 'visible': True}, {'id': reg_user1._id, 'permission': 'admin', 'registered': True, 'visible': True}, ] }, auth=self.auth, ) project.reload() assert_equal( # Note: Cast ForeignList to list for comparison list(project.contributors), [reg_user2, project.creator, unregistered_user, reg_user1] ) assert_equal( list(project.visible_contributors), [project.creator, unregistered_user, reg_user1] ) def test_project_remove_contributor(self): url = self.project.api_url_for('project_remove_contributor') # User 1 removes user2 payload = {'contributorID': self.user2._id, 'nodeIDs': [self.project._id]} self.app.post(url, json.dumps(payload), content_type='application/json', auth=self.auth).maybe_follow() self.project.reload() assert_not_in(self.user2._id, self.project.contributors) # A log event was added assert_equal(self.project.logs.latest().action, 'contributor_removed') def test_multiple_project_remove_contributor(self): url = self.project.api_url_for('project_remove_contributor') # User 1 removes user2 payload = {'contributorID': self.user2._id, 'nodeIDs': [self.project._id, self.project2._id]} res = self.app.post(url, json.dumps(payload), content_type='application/json', auth=self.auth).maybe_follow() self.project.reload() self.project2.reload() assert_not_in(self.user2._id, self.project.contributors) assert_not_in('/dashboard/', res.json) assert_not_in(self.user2._id, self.project2.contributors) # A log event was added assert_equal(self.project.logs.latest().action, 'contributor_removed') def test_private_project_remove_self_not_admin(self): url = self.project.api_url_for('project_remove_contributor') # user2 removes self payload = {"contributorID": self.user2._id, "nodeIDs": [self.project._id]} res = self.app.post(url, json.dumps(payload), content_type="application/json", auth=self.auth2).maybe_follow() self.project.reload() assert_equal(res.status_code, 200) assert_equal(res.json['redirectUrl'], '/dashboard/') assert_not_in(self.user2._id, self.project.contributors) def test_public_project_remove_self_not_admin(self): url = self.project.api_url_for('project_remove_contributor') # user2 removes self self.public_project = ProjectFactory(creator=self.user1, is_public=True) self.public_project.add_contributor(self.user2, auth=Auth(self.user1)) self.public_project.save() payload = {"contributorID": self.user2._id, "nodeIDs": [self.public_project._id]} res = self.app.post(url, json.dumps(payload), content_type="application/json", auth=self.auth2).maybe_follow() self.public_project.reload() assert_equal(res.status_code, 200) assert_equal(res.json['redirectUrl'], '/' + self.public_project._id + '/') assert_not_in(self.user2._id, self.public_project.contributors) def test_project_remove_other_not_admin(self): url = self.project.api_url_for('project_remove_contributor') # User 1 removes user2 payload = {"contributorID": self.user1._id, "nodeIDs": [self.project._id]} res = self.app.post(url, json.dumps(payload), content_type="application/json", expect_errors=True, auth=self.auth2).maybe_follow() self.project.reload() assert_equal(res.status_code, 403) assert_equal(res.json['message_long'], 'You do not have permission to perform this action. ' 'If this should not have occurred and the issue persists, ' 'please report it to <a href="mailto:support@osf.io">support@osf.io</a>.' ) assert_in(self.user1, self.project.contributors) def test_project_remove_fake_contributor(self): url = self.project.api_url_for('project_remove_contributor') # User 1 removes user2 payload = {'contributorID': 'badid', 'nodeIDs': [self.project._id]} res = self.app.post(url, json.dumps(payload), content_type='application/json', expect_errors=True, auth=self.auth).maybe_follow() self.project.reload() # Assert the contributor id was invalid assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Contributor not found.') assert_not_in('badid', self.project.contributors) def test_project_remove_self_only_admin(self): url = self.project.api_url_for('project_remove_contributor') # User 1 removes user2 payload = {'contributorID': self.user1._id, 'nodeIDs': [self.project._id]} res = self.app.post(url, json.dumps(payload), content_type='application/json', expect_errors=True, auth=self.auth).maybe_follow() self.project.reload() assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Could not remove contributor.') assert_in(self.user1, self.project.contributors) def test_get_contributors_abbrev(self): # create a project with 3 registered contributors project = ProjectFactory(creator=self.user1, is_public=True) reg_user1, reg_user2 = UserFactory(), UserFactory() project.add_contributors( [ {'user': reg_user1, 'permissions': [ 'read', 'write', 'admin'], 'visible': True}, {'user': reg_user2, 'permissions': [ 'read', 'write', 'admin'], 'visible': True}, ] ) # add an unregistered contributor project.add_unregistered_contributor( fullname=fake.name(), email=fake.email(), auth=self.consolidate_auth1, save=True, ) url = project.api_url_for('get_node_contributors_abbrev') res = self.app.get(url, auth=self.auth) assert_equal(len(project.contributors), 4) assert_equal(len(res.json['contributors']), 3) assert_equal(len(res.json['others_count']), 1) assert_equal(res.json['contributors'][0]['separator'], ',') assert_equal(res.json['contributors'][1]['separator'], ',') assert_equal(res.json['contributors'][2]['separator'], ' &') def test_edit_node_title(self): url = '/api/v1/project/{0}/edit/'.format(self.project._id) # The title is changed though posting form data self.app.post_json(url, {'name': 'title', 'value': 'Bacon'}, auth=self.auth).maybe_follow() self.project.reload() # The title was changed assert_equal(self.project.title, 'Bacon') # A log event was saved assert_equal(self.project.logs.latest().action, 'edit_title') def test_make_public(self): self.project.is_public = False self.project.save() url = "/api/v1/project/{0}/permissions/public/".format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) self.project.reload() assert_true(self.project.is_public) assert_equal(res.json['status'], 'success') def test_make_private(self): self.project.is_public = True self.project.save() url = "/api/v1/project/{0}/permissions/private/".format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) self.project.reload() assert_false(self.project.is_public) assert_equal(res.json['status'], 'success') def test_cant_make_public_if_not_admin(self): non_admin = AuthUserFactory() self.project.add_contributor(non_admin, permissions=['read', 'write']) self.project.is_public = False self.project.save() url = "/api/v1/project/{0}/permissions/public/".format(self.project._id) res = self.app.post_json( url, {}, auth=non_admin.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) assert_false(self.project.is_public) def test_cant_make_private_if_not_admin(self): non_admin = AuthUserFactory() self.project.add_contributor(non_admin, permissions=['read', 'write']) self.project.is_public = True self.project.save() url = "/api/v1/project/{0}/permissions/private/".format(self.project._id) res = self.app.post_json( url, {}, auth=non_admin.auth, expect_errors=True, ) assert_equal(res.status_code, http.FORBIDDEN) assert_true(self.project.is_public) def test_add_tag(self): url = self.project.api_url_for('project_add_tag') self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth) self.project.reload() assert_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True)) assert_equal("foo'ta#@%#%^&g?", self.project.logs.latest().params['tag']) def test_remove_tag(self): self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True) assert_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True)) url = self.project.api_url_for('project_remove_tag') self.app.delete_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth) self.project.reload() assert_not_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True)) latest_log = self.project.logs.latest() assert_equal('tag_removed', latest_log.action) assert_equal("foo'ta#@%#%^&g?", latest_log.params['tag']) # Regression test for #OSF-5257 def test_removal_empty_tag_throws_error(self): url = self.project.api_url_for('project_remove_tag') res = self.app.delete_json(url, {'tag': ''}, auth=self.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) # Regression test for #OSF-5257 def test_removal_unknown_tag_throws_error(self): self.project.add_tag('narf', auth=self.consolidate_auth1, save=True) url = self.project.api_url_for('project_remove_tag') res = self.app.delete_json(url, {'tag': 'troz'}, auth=self.auth, expect_errors=True) assert_equal(res.status_code, http.CONFLICT) def test_remove_project(self): url = self.project.api_url res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow() self.project.reload() assert_equal(self.project.is_deleted, True) assert_in('url', res.json) assert_equal(res.json['url'], '/dashboard/') def test_suspended_project(self): node = NodeFactory(parent=self.project, creator=self.user1) node.remove_node(Auth(self.user1)) node.suspended = True node.save() url = node.api_url res = self.app.get(url, auth=Auth(self.user1), expect_errors=True) assert_equal(res.status_code, 451) def test_private_link_edit_name(self): link = PrivateLinkFactory(name='link') link.nodes.add(self.project) link.save() assert_equal(link.name, 'link') url = self.project.api_url + 'private_link/edit/' self.app.put_json( url, {'pk': link._id, 'value': 'new name'}, auth=self.auth, ).maybe_follow() self.project.reload() link.reload() assert_equal(link.name, 'new name') def test_remove_private_link(self): link = PrivateLinkFactory() link.nodes.add(self.project) link.save() url = self.project.api_url_for('remove_private_link') self.app.delete_json( url, {'private_link_id': link._id}, auth=self.auth, ).maybe_follow() self.project.reload() link.reload() assert_true(link.is_deleted) def test_remove_component(self): node = NodeFactory(parent=self.project, creator=self.user1) url = node.api_url res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow() node.reload() assert_equal(node.is_deleted, True) assert_in('url', res.json) assert_equal(res.json['url'], self.project.url) def test_cant_remove_component_if_not_admin(self): node = NodeFactory(parent=self.project, creator=self.user1) non_admin = AuthUserFactory() node.add_contributor( non_admin, permissions=['read', 'write'], save=True, ) url = node.api_url res = self.app.delete_json( url, {}, auth=non_admin.auth, expect_errors=True, ).maybe_follow() assert_equal(res.status_code, http.FORBIDDEN) assert_false(node.is_deleted) def test_view_project_returns_whether_to_show_wiki_widget(self): user = AuthUserFactory() project = ProjectFactory(creator=user, is_public=True) project.add_contributor(user) project.save() url = project.api_url_for('view_project') res = self.app.get(url, auth=user.auth) assert_equal(res.status_code, http.OK) assert_in('show_wiki_widget', res.json['user']) def test_fork_count_does_not_include_deleted_forks(self): user = AuthUserFactory() project = ProjectFactory(creator=user) auth = Auth(project.creator) fork = project.fork_node(auth) project.save() fork.remove_node(auth) fork.save() url = project.api_url_for('view_project') res = self.app.get(url, auth=user.auth) assert_in('fork_count', res.json['node']) assert_equal(0, res.json['node']['fork_count']) def test_statistic_page_redirect(self): url = self.project.web_url_for('project_statistics_redirect') res = self.app.get(url, auth=self.auth) assert_equal(res.status_code, 302) assert_in(self.project.web_url_for('project_statistics', _guid=True), res.location) def test_registration_retraction_redirect(self): url = self.project.web_url_for('node_registration_retraction_redirect') res = self.app.get(url, auth=self.auth) assert_equal(res.status_code, 302) assert_in(self.project.web_url_for('node_registration_retraction_get', _guid=True), res.location) def test_update_node(self): url = self.project.api_url_for('update_node') res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth) assert_equal(res.status_code, 200) self.project.reload() assert_equal(self.project.title, 'newtitle') # Regression test def test_update_node_with_tags(self): self.project.add_tag('cheezebørger', auth=Auth(self.project.creator), save=True) url = self.project.api_url_for('update_node') res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth) assert_equal(res.status_code, 200) self.project.reload() assert_equal(self.project.title, 'newtitle') class TestEditableChildrenViews(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user, is_public=False) self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True) self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False) self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True) self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False) url = self.project.api_url_for('get_editable_children') self.project_results = self.app.get(url, auth=self.user.auth).json def test_get_editable_children(self): assert_equal(len(self.project_results['children']), 4) assert_equal(self.project_results['node']['id'], self.project._id) def test_editable_children_order(self): assert_equal(self.project_results['children'][0]['id'], self.child._id) assert_equal(self.project_results['children'][1]['id'], self.grandchild._id) assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id) assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id) def test_editable_children_indents(self): assert_equal(self.project_results['children'][0]['indent'], 0) assert_equal(self.project_results['children'][1]['indent'], 1) assert_equal(self.project_results['children'][2]['indent'], 2) assert_equal(self.project_results['children'][3]['indent'], 3) def test_editable_children_parents(self): assert_equal(self.project_results['children'][0]['parent_id'], self.project._id) assert_equal(self.project_results['children'][1]['parent_id'], self.child._id) assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id) assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id) def test_editable_children_privacy(self): assert_false(self.project_results['node']['is_public']) assert_true(self.project_results['children'][0]['is_public']) assert_false(self.project_results['children'][1]['is_public']) assert_true(self.project_results['children'][2]['is_public']) assert_false(self.project_results['children'][3]['is_public']) def test_editable_children_titles(self): assert_equal(self.project_results['node']['title'], self.project.title) assert_equal(self.project_results['children'][0]['title'], self.child.title) assert_equal(self.project_results['children'][1]['title'], self.grandchild.title) assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title) assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title) class TestGetNodeTree(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) self.user = AuthUserFactory() self.user2 = AuthUserFactory() def test_get_single_node(self): project = ProjectFactory(creator=self.user) # child = NodeFactory(parent=project, creator=self.user) url = project.api_url_for('get_node_tree') res = self.app.get(url, auth=self.user.auth) node_id = res.json[0]['node']['id'] assert_equal(node_id, project._primary_key) def test_get_node_with_children(self): project = ProjectFactory(creator=self.user) child1 = NodeFactory(parent=project, creator=self.user) child2 = NodeFactory(parent=project, creator=self.user2) child3 = NodeFactory(parent=project, creator=self.user) url = project.api_url_for('get_node_tree') res = self.app.get(url, auth=self.user.auth) tree = res.json[0] parent_node_id = tree['node']['id'] child1_id = tree['children'][0]['node']['id'] child2_id = tree['children'][1]['node']['id'] child3_id = tree['children'][2]['node']['id'] assert_equal(parent_node_id, project._primary_key) assert_equal(child1_id, child1._primary_key) assert_equal(child2_id, child2._primary_key) assert_equal(child3_id, child3._primary_key) def test_get_node_with_child_linked_to_parent(self): project = ProjectFactory(creator=self.user) child1 = NodeFactory(parent=project, creator=self.user) child1.add_pointer(project, Auth(self.user)) child1.save() url = project.api_url_for('get_node_tree') res = self.app.get(url, auth=self.user.auth) tree = res.json[0] parent_node_id = tree['node']['id'] child1_id = tree['children'][0]['node']['id'] assert_equal(child1_id, child1._primary_key) def test_get_node_not_parent_owner(self): project = ProjectFactory(creator=self.user2) child = NodeFactory(parent=project, creator=self.user2) url = project.api_url_for('get_node_tree') res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 200) assert_equal(res.json, []) # Parent node should show because of user2 read access, the children should not def test_get_node_parent_not_admin(self): project = ProjectFactory(creator=self.user) project.add_contributor(self.user2, auth=Auth(self.user)) project.save() child1 = NodeFactory(parent=project, creator=self.user) child2 = NodeFactory(parent=project, creator=self.user) child3 = NodeFactory(parent=project, creator=self.user) url = project.api_url_for('get_node_tree') res = self.app.get(url, auth=self.user2.auth) tree = res.json[0] parent_node_id = tree['node']['id'] children = tree['children'] assert_equal(parent_node_id, project._primary_key) assert_equal(children, []) class TestUserProfile(OsfTestCase): def setUp(self): super(TestUserProfile, self).setUp() self.user = AuthUserFactory() def test_sanitization_of_edit_profile(self): url = api_url_for('edit_profile', uid=self.user._id) post_data = {'name': 'fullname', 'value': 'new<b> name</b> '} request = self.app.post(url, post_data, auth=self.user.auth) assert_equal('new name', request.json['name']) def test_fmt_date_or_none(self): with assert_raises(HTTPError) as cm: #enter a date before 1900 fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227)) # error should be raised because date is before 1900 assert_equal(cm.exception.code, http.BAD_REQUEST) def test_unserialize_social(self): url = api_url_for('unserialize_social') payload = { 'profileWebsites': ['http://frozen.pizza.com/reviews'], 'twitter': 'howtopizza', 'github': 'frozenpizzacode', } self.app.put_json( url, payload, auth=self.user.auth, ) self.user.reload() for key, value in payload.iteritems(): assert_equal(self.user.social[key], value) assert_true(self.user.social['researcherId'] is None) # Regression test for help-desk ticket def test_making_email_primary_is_not_case_sensitive(self): user = AuthUserFactory(username='fred@queen.test') # make confirmed email have different casing user.emails[0] = user.emails[0].capitalize() user.save() url = api_url_for('update_user') res = self.app.put_json( url, {'id': user._id, 'emails': [{'address': 'fred@queen.test', 'primary': True, 'confirmed': True}]}, auth=user.auth ) assert_equal(res.status_code, 200) def test_unserialize_social_validation_failure(self): url = api_url_for('unserialize_social') # profileWebsites URL is invalid payload = { 'profileWebsites': ['http://goodurl.com', 'http://invalidurl'], 'twitter': 'howtopizza', 'github': 'frozenpizzacode', } res = self.app.put_json( url, payload, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Invalid personal URL.') def test_serialize_social_editable(self): self.user.social['twitter'] = 'howtopizza' self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'] self.user.save() url = api_url_for('serialize_social') res = self.app.get( url, auth=self.user.auth, ) assert_equal(res.json.get('twitter'), 'howtopizza') assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']) assert_true(res.json.get('github') is None) assert_true(res.json['editable']) def test_serialize_social_not_editable(self): user2 = AuthUserFactory() self.user.social['twitter'] = 'howtopizza' self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'] self.user.save() url = api_url_for('serialize_social', uid=self.user._id) res = self.app.get( url, auth=user2.auth, ) assert_equal(res.json.get('twitter'), 'howtopizza') assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']) assert_true(res.json.get('github') is None) assert_false(res.json['editable']) def test_serialize_social_addons_editable(self): self.user.add_addon('github') github_account = GitHubAccountFactory() github_account.save() self.user.external_accounts.add(github_account) self.user.save() url = api_url_for('serialize_social') res = self.app.get( url, auth=self.user.auth, ) assert_equal( res.json['addons']['github'], 'abc' ) def test_serialize_social_addons_not_editable(self): user2 = AuthUserFactory() self.user.add_addon('github') github_account = GitHubAccountFactory() github_account.save() self.user.external_accounts.add(github_account) self.user.save() url = api_url_for('serialize_social', uid=self.user._id) res = self.app.get( url, auth=user2.auth, ) assert_not_in('addons', res.json) def test_unserialize_and_serialize_jobs(self): jobs = [{ 'institution': 'an institution', 'department': 'a department', 'title': 'a title', 'startMonth': 'January', 'startYear': '2001', 'endMonth': 'March', 'endYear': '2001', 'ongoing': False, }, { 'institution': 'another institution', 'department': None, 'title': None, 'startMonth': 'May', 'startYear': '2001', 'endMonth': None, 'endYear': None, 'ongoing': True, }] payload = {'contents': jobs} url = api_url_for('unserialize_jobs') self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(len(self.user.jobs), 2) url = api_url_for('serialize_jobs') res = self.app.get( url, auth=self.user.auth, ) for i, job in enumerate(jobs): assert_equal(job, res.json['contents'][i]) def test_unserialize_and_serialize_schools(self): schools = [{ 'institution': 'an institution', 'department': 'a department', 'degree': 'a degree', 'startMonth': 1, 'startYear': '2001', 'endMonth': 5, 'endYear': '2001', 'ongoing': False, }, { 'institution': 'another institution', 'department': None, 'degree': None, 'startMonth': 5, 'startYear': '2001', 'endMonth': None, 'endYear': None, 'ongoing': True, }] payload = {'contents': schools} url = api_url_for('unserialize_schools') self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(len(self.user.schools), 2) url = api_url_for('serialize_schools') res = self.app.get( url, auth=self.user.auth, ) for i, job in enumerate(schools): assert_equal(job, res.json['contents'][i]) def test_unserialize_jobs(self): jobs = [ { 'institution': fake.company(), 'department': fake.catch_phrase(), 'title': fake.bs(), 'startMonth': 5, 'startYear': '2013', 'endMonth': 3, 'endYear': '2014', 'ongoing': False, } ] payload = {'contents': jobs} url = api_url_for('unserialize_jobs') res = self.app.put_json(url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) self.user.reload() # jobs field is updated assert_equal(self.user.jobs, jobs) def test_unserialize_names(self): fake_fullname_w_spaces = ' {} '.format(fake.name()) names = { 'full': fake_fullname_w_spaces, 'given': 'Tea', 'middle': 'Gray', 'family': 'Pot', 'suffix': 'Ms.', } url = api_url_for('unserialize_names') res = self.app.put_json(url, names, auth=self.user.auth) assert_equal(res.status_code, 200) self.user.reload() # user is updated assert_equal(self.user.fullname, fake_fullname_w_spaces.strip()) assert_equal(self.user.given_name, names['given']) assert_equal(self.user.middle_names, names['middle']) assert_equal(self.user.family_name, names['family']) assert_equal(self.user.suffix, names['suffix']) def test_unserialize_schools(self): schools = [ { 'institution': fake.company(), 'department': fake.catch_phrase(), 'degree': fake.bs(), 'startMonth': 5, 'startYear': '2013', 'endMonth': 3, 'endYear': '2014', 'ongoing': False, } ] payload = {'contents': schools} url = api_url_for('unserialize_schools') res = self.app.put_json(url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) self.user.reload() # schools field is updated assert_equal(self.user.schools, schools) def test_unserialize_jobs_valid(self): jobs = [ { 'institution': fake.company(), 'department': fake.catch_phrase(), 'title': fake.bs(), 'startMonth': 5, 'startYear': '2013', 'endMonth': 3, 'endYear': '2014', 'ongoing': False, } ] payload = {'contents': jobs} url = api_url_for('unserialize_jobs') res = self.app.put_json(url, payload, auth=self.user.auth) assert_equal(res.status_code, 200) def test_get_current_user_gravatar_default_size(self): url = api_url_for('current_user_gravatar') res = self.app.get(url, auth=self.user.auth) current_user_gravatar = res.json['gravatar_url'] assert_true(current_user_gravatar is not None) url = api_url_for('get_gravatar', uid=self.user._id) res = self.app.get(url, auth=self.user.auth) my_user_gravatar = res.json['gravatar_url'] assert_equal(current_user_gravatar, my_user_gravatar) def test_get_other_user_gravatar_default_size(self): user2 = AuthUserFactory() url = api_url_for('current_user_gravatar') res = self.app.get(url, auth=self.user.auth) current_user_gravatar = res.json['gravatar_url'] url = api_url_for('get_gravatar', uid=user2._id) res = self.app.get(url, auth=self.user.auth) user2_gravatar = res.json['gravatar_url'] assert_true(user2_gravatar is not None) assert_not_equal(current_user_gravatar, user2_gravatar) def test_get_current_user_gravatar_specific_size(self): url = api_url_for('current_user_gravatar') res = self.app.get(url, auth=self.user.auth) current_user_default_gravatar = res.json['gravatar_url'] url = api_url_for('current_user_gravatar', size=11) res = self.app.get(url, auth=self.user.auth) current_user_small_gravatar = res.json['gravatar_url'] assert_true(current_user_small_gravatar is not None) assert_not_equal(current_user_default_gravatar, current_user_small_gravatar) def test_get_other_user_gravatar_specific_size(self): user2 = AuthUserFactory() url = api_url_for('get_gravatar', uid=user2._id) res = self.app.get(url, auth=self.user.auth) gravatar_default_size = res.json['gravatar_url'] url = api_url_for('get_gravatar', uid=user2._id, size=11) res = self.app.get(url, auth=self.user.auth) gravatar_small = res.json['gravatar_url'] assert_true(gravatar_small is not None) assert_not_equal(gravatar_default_size, gravatar_small) def test_update_user_timezone(self): assert_equal(self.user.timezone, 'Etc/UTC') payload = {'timezone': 'America/New_York', 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.timezone, 'America/New_York') def test_update_user_locale(self): assert_equal(self.user.locale, 'en_US') payload = {'locale': 'de_DE', 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.locale, 'de_DE') def test_update_user_locale_none(self): assert_equal(self.user.locale, 'en_US') payload = {'locale': None, 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.locale, 'en_US') def test_update_user_locale_empty_string(self): assert_equal(self.user.locale, 'en_US') payload = {'locale': '', 'id': self.user._id} url = api_url_for('update_user', uid=self.user._id) self.app.put_json(url, payload, auth=self.user.auth) self.user.reload() assert_equal(self.user.locale, 'en_US') def test_cannot_update_user_without_user_id(self): user1 = AuthUserFactory() url = api_url_for('update_user') header = {'emails': [{'address': user1.username}]} res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], '"id" is required') @mock.patch('framework.auth.views.mails.send_mail') def test_add_emails_return_emails(self, send_mail): user1 = AuthUserFactory() url = api_url_for('update_user') email = 'test@cos.io' header = {'id': user1._id, 'emails': [{'address': user1.username, 'primary': True, 'confirmed': True}, {'address': email, 'primary': False, 'confirmed': False} ]} res = self.app.put_json(url, header, auth=user1.auth) assert_equal(res.status_code, 200) assert_in('emails', res.json['profile']) assert_equal(len(res.json['profile']['emails']), 2) @mock.patch('framework.auth.views.mails.send_mail') def test_resend_confirmation_return_emails(self, send_mail): user1 = AuthUserFactory() url = api_url_for('resend_confirmation') email = 'test@cos.io' header = {'id': user1._id, 'email': {'address': email, 'primary': False, 'confirmed': False} } res = self.app.put_json(url, header, auth=user1.auth) assert_equal(res.status_code, 200) assert_in('emails', res.json['profile']) assert_equal(len(res.json['profile']['emails']), 2) @mock.patch('framework.auth.views.mails.send_mail') @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail): email = fake.email() self.user.emails.append(email) list_name = 'foo' self.user.mailchimp_mailing_lists[list_name] = True self.user.save() mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]} list_id = mailchimp_utils.get_list_id_from_name(list_name) url = api_url_for('update_user', uid=self.user._id) emails = [ {'address': self.user.username, 'primary': False, 'confirmed': True}, {'address': email, 'primary': True, 'confirmed': True}] payload = {'locale': '', 'id': self.user._id, 'emails': emails} self.app.put_json(url, payload, auth=self.user.auth) assert mock_client.lists.unsubscribe.called mock_client.lists.unsubscribe.assert_called_with( id=list_id, email={'email': self.user.username}, send_goodbye=True ) mock_client.lists.subscribe.assert_called_with( id=list_id, email={'email': email}, merge_vars={ 'fname': self.user.given_name, 'lname': self.user.family_name, }, double_optin=False, update_existing=True ) handlers.celery_teardown_request() @mock.patch('framework.auth.views.mails.send_mail') @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail): email = fake.email() self.user.emails.append(email) list_name = 'foo' self.user.mailchimp_mailing_lists[list_name] = False self.user.save() mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]} url = api_url_for('update_user', uid=self.user._id) emails = [ {'address': self.user.username, 'primary': False, 'confirmed': True}, {'address': email, 'primary': True, 'confirmed': True}] payload = {'locale': '', 'id': self.user._id, 'emails': emails} self.app.put_json(url, payload, auth=self.user.auth) assert_equal(mock_client.lists.unsubscribe.call_count, 0) assert_equal(mock_client.lists.subscribe.call_count, 0) handlers.celery_teardown_request() # TODO: Uncomment once outstanding issues with this feature are addressed # def test_twitter_redirect_success(self): # self.user.social['twitter'] = fake.last_name() # self.user.save() # res = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'])) # assert_equals(res.status_code, http.FOUND) # assert_in(self.user.url, res.location) # def test_twitter_redirect_is_case_insensitive(self): # self.user.social['twitter'] = fake.last_name() # self.user.save() # res1 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'])) # res2 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'].lower())) # assert_equal(res1.location, res2.location) # def test_twitter_redirect_unassociated_twitter_handle_returns_404(self): # unassociated_handle = fake.last_name() # expected_error = 'There is no active user associated with the Twitter handle: {0}.'.format(unassociated_handle) # res = self.app.get( # web_url_for('redirect_to_twitter', twitter_handle=unassociated_handle), # expect_errors=True # ) # assert_equal(res.status_code, http.NOT_FOUND) # assert_true(expected_error in res.body) # def test_twitter_redirect_handle_with_multiple_associated_accounts_redirects_to_selection_page(self): # self.user.social['twitter'] = fake.last_name() # self.user.save() # user2 = AuthUserFactory() # user2.social['twitter'] = self.user.social['twitter'] # user2.save() # expected_error = 'There are multiple OSF accounts associated with the Twitter handle: <strong>{0}</strong>.'.format(self.user.social['twitter']) # res = self.app.get( # web_url_for( # 'redirect_to_twitter', # twitter_handle=self.user.social['twitter'], # expect_error=True # ) # ) # assert_equal(res.status_code, http.MULTIPLE_CHOICES) # assert_true(expected_error in res.body) # assert_true(self.user.url in res.body) # assert_true(user2.url in res.body) class TestUserProfileApplicationsPage(OsfTestCase): def setUp(self): super(TestUserProfileApplicationsPage, self).setUp() self.user = AuthUserFactory() self.user2 = AuthUserFactory() self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user) self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id) def test_non_owner_cant_access_detail_page(self): res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True) assert_equal(res.status_code, http.FORBIDDEN) def test_owner_cant_access_deleted_application(self): self.platform_app.is_active = False self.platform_app.save() res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.GONE) def test_owner_cant_access_nonexistent_application(self): url = web_url_for('oauth_application_detail', client_id='nonexistent') res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.NOT_FOUND) def test_url_has_not_broken(self): assert_equal(self.platform_app.url, self.detail_url) class TestUserProfileTokensPage(OsfTestCase): def setUp(self): super(TestUserProfileTokensPage, self).setUp() self.user = AuthUserFactory() self.token = ApiOAuth2PersonalTokenFactory() self.detail_url = web_url_for('personal_access_token_detail', _id=self.token._id) def test_url_has_not_broken(self): assert_equal(self.token.url, self.detail_url) class TestUserAccount(OsfTestCase): def setUp(self): super(TestUserAccount, self).setUp() self.user = AuthUserFactory() self.user.set_password('password') self.user.auth = (self.user.username, 'password') self.user.save() @mock.patch('website.profile.views.push_status_message') def test_password_change_valid(self, mock_push_status_message, old_password='password', new_password='Pa$$w0rd', confirm_password='Pa$$w0rd'): url = web_url_for('user_account_password') post_data = { 'old_password': old_password, 'new_password': new_password, 'confirm_password': confirm_password, } res = self.app.post(url, post_data, auth=(self.user.username, old_password)) assert_true(302, res.status_code) res = res.follow(auth=(self.user.username, new_password)) assert_true(200, res.status_code) self.user.reload() assert_true(self.user.check_password(new_password)) assert_true(mock_push_status_message.called) assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0]) @mock.patch('website.profile.views.push_status_message') def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='', confirm_password='', error_message='Old password is invalid'): url = web_url_for('user_account_password') post_data = { 'old_password': old_password, 'new_password': new_password, 'confirm_password': confirm_password, } res = self.app.post(url, post_data, auth=self.user.auth) assert_true(302, res.status_code) res = res.follow(auth=self.user.auth) assert_true(200, res.status_code) self.user.reload() assert_false(self.user.check_password(new_password)) assert_true(mock_push_status_message.called) error_strings = [e[1][0] for e in mock_push_status_message.mock_calls] assert_in(error_message, error_strings) def test_password_change_invalid_old_password(self): self.test_password_change_invalid( old_password='invalid old password', new_password='new password', confirm_password='new password', error_message='Old password is invalid', ) def test_password_change_invalid_confirm_password(self): self.test_password_change_invalid( old_password='password', new_password='new password', confirm_password='invalid confirm password', error_message='Password does not match the confirmation', ) def test_password_change_invalid_new_password_length(self): self.test_password_change_invalid( old_password='password', new_password='1234567', confirm_password='1234567', error_message='Password should be at least eight characters', ) def test_password_change_valid_new_password_length(self): self.test_password_change_valid( old_password='password', new_password='12345678', confirm_password='12345678', ) def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''): self.test_password_change_invalid( old_password=old_password, new_password=new_password, confirm_password=confirm_password, error_message='Passwords cannot be blank', ) def test_password_change_invalid_blank_new_password(self): for password in ('', ' '): self.test_password_change_invalid_blank_password('password', password, 'new password') def test_password_change_invalid_blank_confirm_password(self): for password in ('', ' '): self.test_password_change_invalid_blank_password('password', 'new password', password) @mock.patch('framework.auth.views.mails.send_mail') def test_user_cannot_request_account_export_before_throttle_expires(self, send_mail): url = api_url_for('request_export') self.app.post(url, auth=self.user.auth) assert_true(send_mail.called) res = self.app.post(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(send_mail.call_count, 1) @mock.patch('framework.auth.views.mails.send_mail') def test_user_cannot_request_account_deactivation_before_throttle_expires(self, send_mail): url = api_url_for('request_deactivation') self.app.post(url, auth=self.user.auth) assert_true(send_mail.called) res = self.app.post(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(send_mail.call_count, 1) def test_get_unconfirmed_emails_exclude_external_identity(self): external_identity = { 'service': { 'AFI': 'LINK' } } self.user.add_unconfirmed_email("james@steward.com") self.user.add_unconfirmed_email("steward@james.com", external_identity=external_identity) self.user.save() unconfirmed_emails = self.user.get_unconfirmed_emails_exclude_external_identity() assert_in("james@steward.com", unconfirmed_emails) assert_not_in("steward@james.com", unconfirmed_emails) class TestAddingContributorViews(OsfTestCase): def setUp(self): super(TestAddingContributorViews, self).setUp() self.creator = AuthUserFactory() self.project = ProjectFactory(creator=self.creator) self.auth = Auth(self.project.creator) # Authenticate all requests self.app.authenticate(*self.creator.auth) contributor_added.connect(notify_added_contributor) def test_serialize_unregistered_without_record(self): name, email = fake.name(), fake.email() res = serialize_unregistered(fullname=name, email=email) assert_equal(res['fullname'], name) assert_equal(res['email'], email) assert_equal(res['id'], None) assert_false(res['registered']) assert_true(res['gravatar']) assert_false(res['active']) def test_deserialize_contributors(self): contrib = UserFactory() unreg = UnregUserFactory() name, email = fake.name(), fake.email() unreg_no_record = serialize_unregistered(name, email) contrib_data = [ add_contributor_json(contrib), serialize_unregistered(fake.name(), unreg.username), unreg_no_record ] contrib_data[0]['permission'] = 'admin' contrib_data[1]['permission'] = 'write' contrib_data[2]['permission'] = 'read' contrib_data[0]['visible'] = True contrib_data[1]['visible'] = True contrib_data[2]['visible'] = True res = deserialize_contributors( self.project, contrib_data, auth=Auth(self.creator)) assert_equal(len(res), len(contrib_data)) assert_true(res[0]['user'].is_registered) assert_false(res[1]['user'].is_registered) assert_true(res[1]['user']._id) assert_false(res[2]['user'].is_registered) assert_true(res[2]['user']._id) def test_deserialize_contributors_validates_fullname(self): name = "<img src=1 onerror=console.log(1)>" email = fake.email() unreg_no_record = serialize_unregistered(name, email) contrib_data = [unreg_no_record] contrib_data[0]['permission'] = 'admin' contrib_data[0]['visible'] = True with assert_raises(ValidationError): deserialize_contributors( self.project, contrib_data, auth=Auth(self.creator), validate=True) def test_deserialize_contributors_validates_email(self): name = fake.name() email = "!@#$%%^&*" unreg_no_record = serialize_unregistered(name, email) contrib_data = [unreg_no_record] contrib_data[0]['permission'] = 'admin' contrib_data[0]['visible'] = True with assert_raises(ValidationError): deserialize_contributors( self.project, contrib_data, auth=Auth(self.creator), validate=True) def test_serialize_unregistered_with_record(self): name, email = fake.name(), fake.email() user = self.project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(self.project.creator)) self.project.save() res = serialize_unregistered( fullname=name, email=email ) assert_false(res['active']) assert_false(res['registered']) assert_equal(res['id'], user._primary_key) assert_true(res['gravatar_url']) assert_equal(res['fullname'], name) assert_equal(res['email'], email) def test_add_contributor_with_unreg_contribs_and_reg_contribs(self): n_contributors_pre = len(self.project.contributors) reg_user = UserFactory() name, email = fake.name(), fake.email() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': email, 'permission': 'admin', 'visible': True, } reg_dict = add_contributor_json(reg_user) reg_dict['permission'] = 'admin' reg_dict['visible'] = True payload = { 'users': [reg_dict, pseudouser], 'node_ids': [] } url = self.project.api_url_for('project_contributors_post') self.app.post_json(url, payload).maybe_follow() self.project.reload() assert_equal(len(self.project.contributors), n_contributors_pre + len(payload['users'])) new_unreg = auth.get_user(email=email) assert_false(new_unreg.is_registered) # unclaimed record was added new_unreg.reload() assert_in(self.project._primary_key, new_unreg.unclaimed_records) rec = new_unreg.get_unclaimed_record(self.project._primary_key) assert_equal(rec['name'], name) assert_equal(rec['email'], email) @mock.patch('website.project.views.contributor.send_claim_email') def test_add_contributors_post_only_sends_one_email_to_unreg_user( self, mock_send_claim_email): # Project has components comp1, comp2 = NodeFactory( creator=self.creator), NodeFactory(creator=self.creator) NodeRelation.objects.create(parent=self.project, child=comp1) NodeRelation.objects.create(parent=self.project, child=comp2) self.project.save() # An unreg user is added to the project AND its components unreg_user = { # dict because user has not previous unreg record 'id': None, 'registered': False, 'fullname': fake.name(), 'email': fake.email(), 'permission': 'admin', 'visible': True, } payload = { 'users': [unreg_user], 'node_ids': [comp1._primary_key, comp2._primary_key] } # send request url = self.project.api_url_for('project_contributors_post') assert_true(self.project.can_edit(user=self.creator)) self.app.post_json(url, payload, auth=self.creator.auth) # finalize_invitation should only have been called once assert_equal(mock_send_claim_email.call_count, 1) @mock.patch('website.mails.send_mail') def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail): # Project has components comp1 = NodeFactory(creator=self.creator, parent=self.project) comp2 = NodeFactory(creator=self.creator, parent=self.project) # A registered user is added to the project AND its components user = UserFactory() user_dict = { 'id': user._id, 'fullname': user.fullname, 'email': user.username, 'permission': 'write', 'visible': True} payload = { 'users': [user_dict], 'node_ids': [comp1._primary_key, comp2._primary_key] } # send request url = self.project.api_url_for('project_contributors_post') assert self.project.can_edit(user=self.creator) self.app.post_json(url, payload, auth=self.creator.auth) # send_mail should only have been called once assert_equal(mock_send_mail.call_count, 1) @mock.patch('website.mails.send_mail') def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail): # Project has a component with a sub-component component = NodeFactory(creator=self.creator, parent=self.project) sub_component = NodeFactory(creator=self.creator, parent=component) # A registered user is added to the project and the sub-component, but NOT the component user = UserFactory() user_dict = { 'id': user._id, 'fullname': user.fullname, 'email': user.username, 'permission': 'write', 'visible': True} payload = { 'users': [user_dict], 'node_ids': [sub_component._primary_key] } # send request url = self.project.api_url_for('project_contributors_post') assert self.project.can_edit(user=self.creator) self.app.post_json(url, payload, auth=self.creator.auth) # send_mail is called for both the project and the sub-component assert_equal(mock_send_mail.call_count, 2) @mock.patch('website.project.views.contributor.send_claim_email') def test_email_sent_when_unreg_user_is_added(self, send_mail): name, email = fake.name(), fake.email() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': email, 'permission': 'admin', 'visible': True, } payload = { 'users': [pseudouser], 'node_ids': [] } url = self.project.api_url_for('project_contributors_post') self.app.post_json(url, payload).maybe_follow() assert_true(send_mail.called) assert_true(send_mail.called_with(email=email)) @mock.patch('website.mails.send_mail') def test_email_sent_when_reg_user_is_added(self, send_mail): contributor = UserFactory() contributors = [{ 'user': contributor, 'visible': True, 'permissions': ['read', 'write'] }] project = ProjectFactory(creator=self.auth.user) project.add_contributors(contributors, auth=self.auth) project.save() assert_true(send_mail.called) send_mail.assert_called_with( contributor.username, mails.CONTRIBUTOR_ADDED_DEFAULT, user=contributor, node=project, referrer_name=self.auth.user.fullname, all_global_subscriptions_none=False, branded_service_name=None, ) assert_almost_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()), delta=1) @mock.patch('website.mails.send_mail') def test_contributor_added_email_sent_to_unreg_user(self, send_mail): unreg_user = UnregUserFactory() project = ProjectFactory() project.add_unregistered_contributor(fullname=unreg_user.fullname, email=unreg_user.email, auth=Auth(project.creator)) project.save() assert_true(send_mail.called) @mock.patch('website.mails.send_mail') def test_forking_project_does_not_send_contributor_added_email(self, send_mail): project = ProjectFactory() project.fork_node(auth=Auth(project.creator)) assert_false(send_mail.called) @mock.patch('website.mails.send_mail') def test_templating_project_does_not_send_contributor_added_email(self, send_mail): project = ProjectFactory() project.use_as_template(auth=Auth(project.creator)) assert_false(send_mail.called) @mock.patch('website.archiver.tasks.archive') @mock.patch('website.mails.send_mail') def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive): project = ProjectFactory() project.register_node(get_default_metaschema(), Auth(user=project.creator), '', None) assert_false(send_mail.called) @mock.patch('website.mails.send_mail') def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail): contributor = UserFactory() project = ProjectFactory() auth = Auth(project.creator) notify_added_contributor(project, contributor, auth) assert_true(send_mail.called) # 2nd call does not send email because throttle period has not expired notify_added_contributor(project, contributor, auth) assert_equal(send_mail.call_count, 1) @mock.patch('website.mails.send_mail') def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail): throttle = 0.5 contributor = UserFactory() project = ProjectFactory() auth = Auth(project.creator) notify_added_contributor(project, contributor, auth, throttle=throttle) assert_true(send_mail.called) time.sleep(1) # throttle period expires notify_added_contributor(project, contributor, auth, throttle=throttle) assert_equal(send_mail.call_count, 2) def test_add_multiple_contributors_only_adds_one_log(self): n_logs_pre = self.project.logs.count() reg_user = UserFactory() name = fake.name() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': fake.email(), 'permission': 'write', 'visible': True, } reg_dict = add_contributor_json(reg_user) reg_dict['permission'] = 'admin' reg_dict['visible'] = True payload = { 'users': [reg_dict, pseudouser], 'node_ids': [] } url = self.project.api_url_for('project_contributors_post') self.app.post_json(url, payload).maybe_follow() self.project.reload() assert_equal(self.project.logs.count(), n_logs_pre + 1) def test_add_contribs_to_multiple_nodes(self): child = NodeFactory(parent=self.project, creator=self.creator) n_contributors_pre = child.contributors.count() reg_user = UserFactory() name, email = fake.name(), fake.email() pseudouser = { 'id': None, 'registered': False, 'fullname': name, 'email': email, 'permission': 'admin', 'visible': True, } reg_dict = add_contributor_json(reg_user) reg_dict['permission'] = 'admin' reg_dict['visible'] = True payload = { 'users': [reg_dict, pseudouser], 'node_ids': [self.project._primary_key, child._primary_key] } url = '/api/v1/project/{0}/contributors/'.format(self.project._id) self.app.post_json(url, payload).maybe_follow() child.reload() assert_equal(child.contributors.count(), n_contributors_pre + len(payload['users'])) def tearDown(self): super(TestAddingContributorViews, self).tearDown() contributor_added.disconnect(notify_added_contributor) class TestUserInviteViews(OsfTestCase): def setUp(self): super(TestUserInviteViews, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user) self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format( self.project._primary_key) def test_invite_contributor_post_if_not_in_db(self): name, email = fake.name(), fake.email() res = self.app.post_json( self.invite_url, {'fullname': name, 'email': email}, auth=self.user.auth, ) contrib = res.json['contributor'] assert_true(contrib['id'] is None) assert_equal(contrib['fullname'], name) assert_equal(contrib['email'], email) def test_invite_contributor_post_if_unreg_already_in_db(self): # A n unreg user is added to a different project name, email = fake.name(), fake.email() project2 = ProjectFactory() unreg_user = project2.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project2.creator)) project2.save() res = self.app.post_json(self.invite_url, {'fullname': name, 'email': email}, auth=self.user.auth) expected = add_contributor_json(unreg_user) expected['fullname'] = name expected['email'] = email assert_equal(res.json['contributor'], expected) def test_invite_contributor_post_if_emaiL_already_registered(self): reg_user = UserFactory() # Tries to invite user that is already regiestered res = self.app.post_json(self.invite_url, {'fullname': fake.name(), 'email': reg_user.username}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) def test_invite_contributor_post_if_user_is_already_contributor(self): unreg_user = self.project.add_unregistered_contributor( fullname=fake.name(), email=fake.email(), auth=Auth(self.project.creator) ) self.project.save() # Tries to invite unreg user that is already a contributor res = self.app.post_json(self.invite_url, {'fullname': fake.name(), 'email': unreg_user.username}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) def test_invite_contributor_with_no_email(self): name = fake.name() res = self.app.post_json(self.invite_url, {'fullname': name, 'email': None}, auth=self.user.auth) assert_equal(res.status_code, http.OK) data = res.json assert_equal(data['status'], 'success') assert_equal(data['contributor']['fullname'], name) assert_true(data['contributor']['email'] is None) assert_false(data['contributor']['registered']) def test_invite_contributor_requires_fullname(self): res = self.app.post_json(self.invite_url, {'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_email_to_given_email(self, send_mail): project = ProjectFactory() given_email = fake.email() unreg_user = project.add_unregistered_contributor( fullname=fake.name(), email=given_email, auth=Auth(project.creator), ) project.save() send_claim_email(email=given_email, unclaimed_user=unreg_user, node=project) assert_true(send_mail.called) assert_true(send_mail.called_with( to_addr=given_email, mail=mails.INVITE_DEFAULT )) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_email_to_referrer(self, send_mail): project = ProjectFactory() referrer = project.creator given_email, real_email = fake.email(), fake.email() unreg_user = project.add_unregistered_contributor(fullname=fake.name(), email=given_email, auth=Auth( referrer) ) project.save() send_claim_email(email=real_email, unclaimed_user=unreg_user, node=project) assert_true(send_mail.called) # email was sent to referrer send_mail.assert_called_with( referrer.username, mails.FORWARD_INVITE, user=unreg_user, referrer=referrer, claim_url=unreg_user.get_claim_url(project._id, external=True), email=real_email.lower().strip(), fullname=unreg_user.get_unclaimed_record(project._id)['name'], node=project, branded_service_name=None ) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_email_before_throttle_expires(self, send_mail): project = ProjectFactory() given_email = fake.email() unreg_user = project.add_unregistered_contributor( fullname=fake.name(), email=given_email, auth=Auth(project.creator), ) project.save() send_claim_email(email=fake.email(), unclaimed_user=unreg_user, node=project) send_mail.reset_mock() # 2nd call raises error because throttle hasn't expired with assert_raises(HTTPError): send_claim_email(email=fake.email(), unclaimed_user=unreg_user, node=project) assert_false(send_mail.called) class TestClaimViews(OsfTestCase): def setUp(self): super(TestClaimViews, self).setUp() self.referrer = AuthUserFactory() self.project = ProjectFactory(creator=self.referrer, is_public=True) self.given_name = fake.name() self.given_email = fake.email() self.user = self.project.add_unregistered_contributor( fullname=self.given_name, email=self.given_email, auth=Auth(user=self.referrer) ) self.project.save() @mock.patch('website.project.views.contributor.send_claim_email') def test_claim_user_already_registered_redirects_to_claim_user_registered(self, claim_email): name = fake.name() email = fake.email() # project contributor adds an unregistered contributor (without an email) on public project unregistered_user = self.project.add_unregistered_contributor( fullname=name, email=None, auth=Auth(user=self.referrer) ) assert_in(unregistered_user, self.project.contributors) # unregistered user comes along and claims themselves on the public project, entering an email invite_url = self.project.api_url_for('claim_user_post', uid='undefined') self.app.post_json(invite_url, { 'pk': unregistered_user._primary_key, 'value': email }) assert_equal(claim_email.call_count, 1) # set unregistered record email since we are mocking send_claim_email() unclaimed_record = unregistered_user.get_unclaimed_record(self.project._primary_key) unclaimed_record.update({'email': email}) unregistered_user.save() # unregistered user then goes and makes an account with same email, before claiming themselves as contributor UserFactory(username=email, fullname=name) # claim link for the now registered email is accessed while not logged in token = unregistered_user.get_unclaimed_record(self.project._primary_key)['token'] claim_url = '/user/{uid}/{pid}/claim/?token={token}'.format( uid=unregistered_user._id, pid=self.project._id, token=token ) res = self.app.get(claim_url) # should redirect to 'claim_user_registered' view claim_registered_url = '/user/{uid}/{pid}/claim/verify/{token}/'.format( uid=unregistered_user._id, pid=self.project._id, token=token ) assert_equal(res.status_code, 302) assert_in(claim_registered_url, res.headers.get('Location')) @mock.patch('website.project.views.contributor.send_claim_email') def test_claim_user_already_registered_secondary_email_redirects_to_claim_user_registered(self, claim_email): name = fake.name() email = fake.email() secondary_email = fake.email() # project contributor adds an unregistered contributor (without an email) on public project unregistered_user = self.project.add_unregistered_contributor( fullname=name, email=None, auth=Auth(user=self.referrer) ) assert_in(unregistered_user, self.project.contributors) # unregistered user comes along and claims themselves on the public project, entering an email invite_url = self.project.api_url_for('claim_user_post', uid='undefined') self.app.post_json(invite_url, { 'pk': unregistered_user._primary_key, 'value': secondary_email }) assert_equal(claim_email.call_count, 1) # set unregistered record email since we are mocking send_claim_email() unclaimed_record = unregistered_user.get_unclaimed_record(self.project._primary_key) unclaimed_record.update({'email': secondary_email}) unregistered_user.save() # unregistered user then goes and makes an account with same email, before claiming themselves as contributor registered_user = UserFactory(username=email, fullname=name) registered_user.emails.append(secondary_email) registered_user.save() # claim link for the now registered email is accessed while not logged in token = unregistered_user.get_unclaimed_record(self.project._primary_key)['token'] claim_url = '/user/{uid}/{pid}/claim/?token={token}'.format( uid=unregistered_user._id, pid=self.project._id, token=token ) res = self.app.get(claim_url) # should redirect to 'claim_user_registered' view claim_registered_url = '/user/{uid}/{pid}/claim/verify/{token}/'.format( uid=unregistered_user._id, pid=self.project._id, token=token ) assert_equal(res.status_code, 302) assert_in(claim_registered_url, res.headers.get('Location')) def test_claim_user_invited_with_no_email_posts_to_claim_form(self): given_name = fake.name() invited_user = self.project.add_unregistered_contributor( fullname=given_name, email=None, auth=Auth(user=self.referrer) ) self.project.save() url = invited_user.get_claim_url(self.project._primary_key) res = self.app.post(url, { 'password': 'bohemianrhap', 'password2': 'bohemianrhap' }, expect_errors=True) assert_equal(res.status_code, 400) @mock.patch('website.project.views.contributor.mails.send_mail') def test_claim_user_post_with_registered_user_id(self, send_mail): # registered user who is attempting to claim the unclaimed contributor reg_user = UserFactory() payload = { # pk of unreg user record 'pk': self.user._primary_key, 'claimerId': reg_user._primary_key } url = '/api/v1/user/{uid}/{pid}/claim/email/'.format( uid=self.user._primary_key, pid=self.project._primary_key, ) res = self.app.post_json(url, payload) # mail was sent assert_equal(send_mail.call_count, 2) # ... to the correct address referrer_call = send_mail.call_args_list[0] claimer_call = send_mail.call_args_list[1] args, _ = referrer_call assert_equal(args[0], self.referrer.username) args, _ = claimer_call assert_equal(args[0], reg_user.username) # view returns the correct JSON assert_equal(res.json, { 'status': 'success', 'email': reg_user.username, 'fullname': self.given_name, }) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_registered_email(self, mock_send_mail): reg_user = UserFactory() send_claim_registered_email( claimer=reg_user, unclaimed_user=self.user, node=self.project ) assert_equal(mock_send_mail.call_count, 2) first_call_args = mock_send_mail.call_args_list[0][0] assert_equal(first_call_args[0], self.referrer.username) second_call_args = mock_send_mail.call_args_list[1][0] assert_equal(second_call_args[0], reg_user.username) @mock.patch('website.project.views.contributor.mails.send_mail') def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail): reg_user = UserFactory() send_claim_registered_email( claimer=reg_user, unclaimed_user=self.user, node=self.project, ) mock_send_mail.reset_mock() # second call raises error because it was called before throttle period with assert_raises(HTTPError): send_claim_registered_email( claimer=reg_user, unclaimed_user=self.user, node=self.project, ) assert_false(mock_send_mail.called) @mock.patch('website.project.views.contributor.send_claim_registered_email') def test_claim_user_post_with_email_already_registered_sends_correct_email( self, send_claim_registered_email): reg_user = UserFactory() payload = { 'value': reg_user.username, 'pk': self.user._primary_key } url = self.project.api_url_for('claim_user_post', uid=self.user._id) self.app.post_json(url, payload) assert_true(send_claim_registered_email.called) def test_user_with_removed_unclaimed_url_claiming(self): """ Tests that when an unclaimed user is removed from a project, the unregistered user object does not retain the token. """ self.project.remove_contributor(self.user, Auth(user=self.referrer)) assert_not_in( self.project._primary_key, self.user.unclaimed_records.keys() ) def test_user_with_claim_url_cannot_claim_twice(self): """ Tests that when an unclaimed user is replaced on a project with a claimed user, the unregistered user object does not retain the token. """ reg_user = AuthUserFactory() self.project.replace_contributor(self.user, reg_user) assert_not_in( self.project._primary_key, self.user.unclaimed_records.keys() ) def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self): reg_user = AuthUserFactory() url = self.user.get_claim_url(self.project._primary_key) res = self.app.get(url, auth=reg_user.auth) assert_equal(res.status_code, 302) res = res.follow(auth=reg_user.auth) token = self.user.get_unclaimed_record(self.project._primary_key)['token'] expected = self.project.web_url_for( 'claim_user_registered', uid=self.user._id, token=token, ) assert_equal(res.request.path, expected) def test_get_valid_form(self): url = self.user.get_claim_url(self.project._primary_key) res = self.app.get(url).maybe_follow() assert_equal(res.status_code, 200) def test_invalid_claim_form_raise_400(self): uid = self.user._primary_key pid = self.project._primary_key url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals()) res = self.app.get(url, expect_errors=True).maybe_follow() assert_equal(res.status_code, 400) @mock.patch('framework.auth.core.User.update_search_nodes') def test_posting_to_claim_form_with_valid_data(self, mock_update_search_nodes): url = self.user.get_claim_url(self.project._primary_key) res = self.app.post(url, { 'username': self.user.username, 'password': 'killerqueen', 'password2': 'killerqueen' }) assert_equal(res.status_code, 302) location = res.headers.get('Location') assert_in('login?service=', location) assert_in('username', location) assert_in('verification_key', location) assert_in(self.project._primary_key, location) self.user.reload() assert_true(self.user.is_registered) assert_true(self.user.is_active) assert_not_in(self.project._primary_key, self.user.unclaimed_records) @mock.patch('framework.auth.core.User.update_search_nodes') def test_posting_to_claim_form_removes_all_unclaimed_data(self, mock_update_search_nodes): # user has multiple unclaimed records p2 = ProjectFactory(creator=self.referrer) self.user.add_unclaimed_record(node=p2, referrer=self.referrer, given_name=fake.name()) self.user.save() assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check url = self.user.get_claim_url(self.project._primary_key) self.app.post(url, { 'username': self.given_email, 'password': 'bohemianrhap', 'password2': 'bohemianrhap' }) self.user.reload() assert_equal(self.user.unclaimed_records, {}) @mock.patch('framework.auth.core.User.update_search_nodes') def test_posting_to_claim_form_sets_fullname_to_given_name(self, mock_update_search_nodes): # User is created with a full name original_name = fake.name() unreg = UnregUserFactory(fullname=original_name) # User invited with a different name different_name = fake.name() new_user = self.project.add_unregistered_contributor( email=unreg.username, fullname=different_name, auth=Auth(self.project.creator), ) self.project.save() # Goes to claim url claim_url = new_user.get_claim_url(self.project._id) self.app.post(claim_url, { 'username': unreg.username, 'password': 'killerqueen', 'password2': 'killerqueen' }) unreg.reload() # Full name was set correctly assert_equal(unreg.fullname, different_name) # CSL names were set correctly parsed_name = impute_names_model(different_name) assert_equal(unreg.given_name, parsed_name['given_name']) assert_equal(unreg.family_name, parsed_name['family_name']) @mock.patch('website.project.views.contributor.mails.send_mail') def test_claim_user_post_returns_fullname(self, send_mail): url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key, self.project._primary_key) res = self.app.post_json(url, {'value': self.given_email, 'pk': self.user._primary_key}, auth=self.referrer.auth) assert_equal(res.json['fullname'], self.given_name) assert_true(send_mail.called) assert_true(send_mail.called_with(to_addr=self.given_email)) @mock.patch('website.project.views.contributor.mails.send_mail') def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail): email = fake.email() # email that is different from the one the referrer gave url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key, self.project._primary_key) self.app.post_json(url, {'value': email, 'pk': self.user._primary_key} ) assert_true(send_mail.called) assert_equal(send_mail.call_count, 2) call_to_invited = send_mail.mock_calls[0] assert_true(call_to_invited.called_with( to_addr=email )) call_to_referrer = send_mail.mock_calls[1] assert_true(call_to_referrer.called_with( to_addr=self.given_email )) def test_claim_url_with_bad_token_returns_400(self): url = self.project.web_url_for( 'claim_user_registered', uid=self.user._id, token='badtoken', ) res = self.app.get(url, auth=self.referrer.auth, expect_errors=400) assert_equal(res.status_code, 400) def test_cannot_claim_user_with_user_who_is_already_contributor(self): # user who is already a contirbutor to the project contrib = AuthUserFactory() self.project.add_contributor(contrib, auth=Auth(self.project.creator)) self.project.save() # Claiming user goes to claim url, but contrib is already logged in url = self.user.get_claim_url(self.project._primary_key) res = self.app.get( url, auth=contrib.auth, ).follow( auth=contrib.auth, expect_errors=True, ) # Response is a 400 assert_equal(res.status_code, 400) @pytest.mark.skip('Watching no longer supported') class TestWatchViews(OsfTestCase): def setUp(self): super(TestWatchViews, self).setUp() self.user = AuthUserFactory() self.consolidate_auth = Auth(user=self.user) self.auth = self.user.auth # used for requests auth # A public project self.project = ProjectFactory(is_public=True) self.project.save() # Manually reset log date to 100 days ago so it won't show up in feed latest_log = self.project.logs.latest() latest_log.date = timezone.now() - dt.timedelta(days=100) latest_log.save() # A log added now self.last_log = self.project.add_log( NodeLog.TAG_ADDED, params={'node': self.project._primary_key}, auth=self.consolidate_auth, log_date=timezone.now(), save=True, ) # Clear watched list WatchConfig = apps.get_model('osf.WatchConfig') WatchConfig.objects.filter(user=self.user).delete() def test_watching_a_project_appends_to_users_watched_list(self): n_watched_then = self.user.watched.count() url = '/api/v1/project/{0}/watch/'.format(self.project._id) res = self.app.post_json(url, params={'digest': True}, auth=self.auth) assert_equal(res.json['watchCount'], 1) self.user.reload() n_watched_now = self.user.watched.count() assert_equal(res.status_code, 200) assert_equal(n_watched_now, n_watched_then + 1) assert_true(self.user.watched.last().digest) def test_watching_project_twice_returns_400(self): url = '/api/v1/project/{0}/watch/'.format(self.project._id) res = self.app.post_json(url, params={}, auth=self.auth) assert_equal(res.status_code, 200) # User tries to watch a node she's already watching res2 = self.app.post_json(url, params={}, auth=self.auth, expect_errors=True) assert_equal(res2.status_code, http.BAD_REQUEST) def test_unwatching_a_project_removes_from_watched_list(self): # The user has already watched a project watch_config = WatchConfigFactory(node=self.project) self.user.watch(watch_config) self.user.save() n_watched_then = len(self.user.watched) url = '/api/v1/project/{0}/unwatch/'.format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) self.user.reload() n_watched_now = len(self.user.watched) assert_equal(res.status_code, 200) assert_equal(n_watched_now, n_watched_then - 1) assert_false(self.user.is_watching(self.project)) def test_toggle_watch(self): # The user is not watching project assert_false(self.user.is_watching(self.project)) url = '/api/v1/project/{0}/togglewatch/'.format(self.project._id) res = self.app.post_json(url, {}, auth=self.auth) # The response json has a watchcount and watched property assert_equal(res.json['watchCount'], 1) assert_true(res.json['watched']) assert_equal(res.status_code, 200) self.user.reload() # The user is now watching the project assert_true(res.json['watched']) assert_true(self.user.is_watching(self.project)) def test_toggle_watch_node(self): # The project has a public sub-node node = NodeFactory(creator=self.user, parent=self.project, is_public=True) url = "/api/v1/project/{}/node/{}/togglewatch/".format(self.project._id, node._id) res = self.app.post_json(url, {}, auth=self.auth) assert_equal(res.status_code, 200) self.user.reload() # The user is now watching the sub-node assert_true(res.json['watched']) assert_true(self.user.is_watching(node)) class TestPointerViews(OsfTestCase): def setUp(self): super(TestPointerViews, self).setUp() self.user = AuthUserFactory() self.consolidate_auth = Auth(user=self.user) self.project = ProjectFactory(creator=self.user) def _make_pointer_only_user_can_see(self, user, project, save=False): node = ProjectFactory(creator=user) project.add_pointer(node, auth=Auth(user=user), save=save) def test_pointer_list_write_contributor_can_remove_private_component_entry(self): """Ensure that write contributors see the button to delete a pointer, even if they cannot see what it is pointing at""" url = web_url_for('view_project', pid=self.project._id) user2 = AuthUserFactory() self.project.add_contributor(user2, auth=Auth(self.project.creator), permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS) self._make_pointer_only_user_can_see(user2, self.project) self.project.save() res = self.app.get(url, auth=self.user.auth).maybe_follow() assert_equal(res.status_code, 200) has_controls = res.lxml.xpath('//li[@node_id]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]') assert_true(has_controls) def test_pointer_list_write_contributor_can_remove_public_component_entry(self): url = web_url_for('view_project', pid=self.project._id) for i in xrange(3): self.project.add_pointer(ProjectFactory(creator=self.user), auth=Auth(user=self.user)) self.project.save() res = self.app.get(url, auth=self.user.auth).maybe_follow() assert_equal(res.status_code, 200) has_controls = res.lxml.xpath( '//li[@node_id]//i[contains(@class, "remove-pointer")]') assert_equal(len(has_controls), 3) def test_pointer_list_read_contributor_cannot_remove_private_component_entry(self): url = web_url_for('view_project', pid=self.project._id) user2 = AuthUserFactory() self.project.add_contributor(user2, auth=Auth(self.project.creator), permissions=[permissions.READ]) self._make_pointer_only_user_can_see(user2, self.project) self.project.save() res = self.app.get(url, auth=user2.auth).maybe_follow() assert_equal(res.status_code, 200) pointer_nodes = res.lxml.xpath('//li[@node_id]') has_controls = res.lxml.xpath('//li[@node_id]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]') assert_equal(len(pointer_nodes), 1) assert_false(has_controls) def test_pointer_list_read_contributor_cannot_remove_public_component_entry(self): url = web_url_for('view_project', pid=self.project._id) self.project.add_pointer(ProjectFactory(creator=self.user, is_public=True), auth=Auth(user=self.user)) user2 = AuthUserFactory() self.project.add_contributor(user2, auth=Auth(self.project.creator), permissions=[permissions.READ]) self.project.save() res = self.app.get(url, auth=user2.auth).maybe_follow() assert_equal(res.status_code, 200) pointer_nodes = res.lxml.xpath('//li[@node_id]') has_controls = res.lxml.xpath( '//li[@node_id]//i[contains(@class, "remove-pointer")]') assert_equal(len(pointer_nodes), 1) assert_equal(len(has_controls), 0) # https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109 def test_get_pointed_excludes_folders(self): pointer_project = ProjectFactory(is_public=True) # project that points to another project pointed_project = ProjectFactory(creator=self.user) # project that other project points to pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True) # Project is in an organizer collection collection = CollectionFactory(creator=pointed_project.creator) collection.add_pointer(pointed_project, Auth(pointed_project.creator), save=True) url = pointed_project.api_url_for('get_pointed') res = self.app.get(url, auth=self.user.auth) assert_equal(res.status_code, 200) # pointer_project's id is included in response, but folder's id is not pointer_ids = [each['id'] for each in res.json['pointed']] assert_in(pointer_project._id, pointer_ids) assert_not_in(collection._id, pointer_ids) def test_add_pointers(self): url = self.project.api_url + 'pointer/' node_ids = [ NodeFactory()._id for _ in range(5) ] self.app.post_json( url, {'nodeIds': node_ids}, auth=self.user.auth, ).maybe_follow() self.project.reload() assert_equal( self.project.nodes_active.count(), 5 ) def test_add_the_same_pointer_more_than_once(self): url = self.project.api_url + 'pointer/' double_node = NodeFactory() self.app.post_json( url, {'nodeIds': [double_node._id]}, auth=self.user.auth, ) res = self.app.post_json( url, {'nodeIds': [double_node._id]}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_add_pointers_no_user_logg_in(self): url = self.project.api_url_for('add_pointers') node_ids = [ NodeFactory()._id for _ in range(5) ] res = self.app.post_json( url, {'nodeIds': node_ids}, auth=None, expect_errors=True ) assert_equal(res.status_code, 401) def test_add_pointers_public_non_contributor(self): project2 = ProjectFactory() project2.set_privacy('public') project2.save() url = self.project.api_url_for('add_pointers') self.app.post_json( url, {'nodeIds': [project2._id]}, auth=self.user.auth, ).maybe_follow() self.project.reload() assert_equal( self.project.nodes_active.count(), 1 ) def test_add_pointers_contributor(self): user2 = AuthUserFactory() self.project.add_contributor(user2) self.project.save() url = self.project.api_url_for('add_pointers') node_ids = [ NodeFactory()._id for _ in range(5) ] self.app.post_json( url, {'nodeIds': node_ids}, auth=user2.auth, ).maybe_follow() self.project.reload() assert_equal( self.project.linked_nodes.count(), 5 ) def test_add_pointers_not_provided(self): url = self.project.api_url + 'pointer/' res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_move_pointers(self): project_two = ProjectFactory(creator=self.user) url = api_url_for('move_pointers') node = NodeFactory() pointer = self.project.add_pointer(node, auth=self.consolidate_auth) assert_equal(self.project.linked_nodes.count(), 1) assert_equal(project_two.linked_nodes.count(), 0) user_auth = self.user.auth move_request = \ { 'fromNodeId': self.project._id, 'toNodeId': project_two._id, 'pointerIds': [pointer._id], } self.app.post_json( url, move_request, auth=user_auth, ).maybe_follow() self.project.reload() project_two.reload() assert_equal(self.project.linked_nodes.count(), 0) assert_equal(project_two.linked_nodes.count(), 1) def test_remove_pointer(self): url = self.project.api_url + 'pointer/' node = NodeFactory() pointer = self.project.add_pointer(node, auth=self.consolidate_auth) self.app.delete_json( url, {'pointerId': pointer.node._id}, auth=self.user.auth, ) self.project.reload() assert_equal( len(list(self.project.nodes)), 0 ) def test_remove_pointer_not_provided(self): url = self.project.api_url + 'pointer/' res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_remove_pointer_not_found(self): url = self.project.api_url + 'pointer/' res = self.app.delete_json( url, {'pointerId': None}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_remove_pointer_not_in_nodes(self): url = self.project.api_url + 'pointer/' node = NodeFactory() pointer = Pointer() res = self.app.delete_json( url, {'pointerId': pointer._id}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_fork_pointer(self): url = self.project.api_url + 'pointer/fork/' node = NodeFactory(creator=self.user) pointer = self.project.add_pointer(node, auth=self.consolidate_auth) self.app.post_json( url, {'pointerId': pointer._id}, auth=self.user.auth ) def test_fork_pointer_not_provided(self): url = self.project.api_url + 'pointer/fork/' res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_fork_pointer_not_found(self): url = self.project.api_url + 'pointer/fork/' res = self.app.post_json( url, {'pointerId': None}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_fork_pointer_not_in_nodes(self): url = self.project.api_url + 'pointer/fork/' node = NodeFactory() pointer = Pointer() res = self.app.post_json( url, {'pointerId': pointer._id}, auth=self.user.auth, expect_errors=True ) assert_equal(res.status_code, 400) def test_before_register_with_pointer(self): # Assert that link warning appears in before register callback. node = NodeFactory() self.project.add_pointer(node, auth=self.consolidate_auth) url = self.project.api_url + 'fork/before/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your fork' in prompt ] assert_equal(len(prompts), 1) def test_before_fork_with_pointer(self): """Assert that link warning appears in before fork callback.""" node = NodeFactory() self.project.add_pointer(node, auth=self.consolidate_auth) url = self.project.api_url + 'beforeregister/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your registration' in prompt ] assert_equal(len(prompts), 1) def test_before_register_no_pointer(self): """Assert that link warning does not appear in before register callback.""" url = self.project.api_url + 'fork/before/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your fork' in prompt ] assert_equal(len(prompts), 0) def test_before_fork_no_pointer(self): """Assert that link warning does not appear in before fork callback.""" url = self.project.api_url + 'beforeregister/' res = self.app.get(url, auth=self.user.auth).maybe_follow() prompts = [ prompt for prompt in res.json['prompts'] if 'Links will be copied into your registration' in prompt ] assert_equal(len(prompts), 0) def test_get_pointed(self): pointing_node = ProjectFactory(creator=self.user) pointing_node.add_pointer(self.project, auth=Auth(self.user)) url = self.project.api_url_for('get_pointed') res = self.app.get(url, auth=self.user.auth) pointed = res.json['pointed'] assert_equal(len(pointed), 1) assert_equal(pointed[0]['url'], pointing_node.url) assert_equal(pointed[0]['title'], pointing_node.title) assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node)) def test_get_pointed_private(self): secret_user = UserFactory() pointing_node = ProjectFactory(creator=secret_user) pointing_node.add_pointer(self.project, auth=Auth(secret_user)) url = self.project.api_url_for('get_pointed') res = self.app.get(url, auth=self.user.auth) pointed = res.json['pointed'] assert_equal(len(pointed), 1) assert_equal(pointed[0]['url'], None) assert_equal(pointed[0]['title'], 'Private Component') assert_equal(pointed[0]['authorShort'], 'Private Author(s)') class TestPublicViews(OsfTestCase): def test_explore(self): res = self.app.get("/explore/").maybe_follow() assert_equal(res.status_code, 200) class TestAuthViews(OsfTestCase): def setUp(self): super(TestAuthViews, self).setUp() self.user = AuthUserFactory() self.auth = self.user.auth @mock.patch('framework.auth.views.mails.send_mail') def test_register_ok(self, _): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password, } ) user = User.find_one(Q('username', 'eq', email)) assert_equal(user.fullname, name) # Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902 @mock.patch('framework.auth.views.mails.send_mail') def test_register_email_case_insensitive(self, _): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': str(email).upper(), 'password': password, } ) user = User.find_one(Q('username', 'eq', email)) assert_equal(user.fullname, name) @mock.patch('framework.auth.views.send_confirm_email') def test_register_scrubs_username(self, _): url = api_url_for('register_user') name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>" email, password = fake.email(), 'underpressure' res = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password, } ) expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()" user = User.find_one(Q('username', 'eq', email)) assert_equal(res.status_code, http.OK) assert_equal(user.fullname, expected_scrub_username) def test_register_email_mismatch(self): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' res = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email + 'lol', 'password': password, }, expect_errors=True, ) assert_equal(res.status_code, http.BAD_REQUEST) users = User.find(Q('username', 'eq', email)) assert_equal(users.count(), 0) def test_register_blacklisted_email_domain(self): url = api_url_for('register_user') name, email, password = fake.name(), 'bad@mailinator.com', 'agreatpasswordobviously' res = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password }, expect_errors=True ) assert_equal(res.status_code, http.BAD_REQUEST) users = User.find(Q('username', 'eq', email)) assert_equal(users.count(), 0) @mock.patch('framework.auth.views.validate_recaptcha', return_value=True) @mock.patch('framework.auth.views.mails.send_mail') def test_register_good_captcha(self, _, validate_recaptcha): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' captcha = 'some valid captcha' with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'): resp = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': str(email).upper(), 'password': password, 'g-recaptcha-response': captcha, } ) validate_recaptcha.assert_called_with(captcha, remote_ip=None) assert_equal(resp.status_code, http.OK) user = User.find_one(Q('username', 'eq', email)) assert_equal(user.fullname, name) @mock.patch('framework.auth.views.validate_recaptcha', return_value=False) @mock.patch('framework.auth.views.mails.send_mail') def test_register_missing_captcha(self, _, validate_recaptcha): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'): resp = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': str(email).upper(), 'password': password, # 'g-recaptcha-response': 'supposed to be None', }, expect_errors=True ) validate_recaptcha.assert_called_with(None, remote_ip=None) assert_equal(resp.status_code, http.BAD_REQUEST) @mock.patch('framework.auth.views.validate_recaptcha', return_value=False) @mock.patch('framework.auth.views.mails.send_mail') def test_register_bad_captcha(self, _, validate_recaptcha): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'): resp = self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': str(email).upper(), 'password': password, 'g-recaptcha-response': 'bad captcha', }, expect_errors=True ) assert_equal(resp.status_code, http.BAD_REQUEST) @mock.patch('framework.auth.core.User.update_search_nodes') def test_register_after_being_invited_as_unreg_contributor(self, mock_update_search_nodes): # Regression test for: # https://github.com/CenterForOpenScience/openscienceframework.org/issues/861 # https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021 # https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026 # A user is invited as an unregistered contributor project = ProjectFactory() name, email = fake.name(), fake.email() project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project.creator)) project.save() # The new, unregistered user new_user = User.find_one(Q('username', 'eq', email)) # Instead of following the invitation link, they register at the regular # registration page # They use a different name when they register, but same email real_name = fake.name() password = 'myprecious' url = api_url_for('register_user') payload = { 'fullName': real_name, 'email1': email, 'email2': email, 'password': password, } # Send registration request self.app.post_json(url, payload) new_user.reload() # New user confirms by following confirmation link confirm_url = new_user.get_confirmation_url(email, external=False) self.app.get(confirm_url) new_user.reload() # Password and fullname should be updated assert_true(new_user.is_confirmed) assert_true(new_user.check_password(password)) assert_equal(new_user.fullname, real_name) @mock.patch('framework.auth.views.send_confirm_email') def test_register_sends_user_registered_signal(self, mock_send_confirm_email): url = api_url_for('register_user') name, email, password = fake.name(), fake.email(), 'underpressure' with capture_signals() as mock_signals: self.app.post_json( url, { 'fullName': name, 'email1': email, 'email2': email, 'password': password, } ) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered, auth.signals.unconfirmed_user_created])) assert_true(mock_send_confirm_email.called) @mock.patch('framework.auth.views.mails.send_mail') def test_resend_confirmation(self, send_mail): email = 'test@mail.com' token = self.user.add_unconfirmed_email(email) self.user.save() url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': False} self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth) assert_true(send_mail.called) assert_true(send_mail.called_with( to_addr=email )) self.user.reload() assert_not_equal(token, self.user.get_confirmation_token(email)) with assert_raises(InvalidTokenError): self.user.get_unconfirmed_email_for_token(token) @mock.patch('framework.auth.views.mails.send_mail') def test_click_confirmation_email(self, send_mail): email = 'test@mail.com' token = self.user.add_unconfirmed_email(email) self.user.save() self.user.reload() assert_equal(self.user.email_verifications[token]['confirmed'], False) url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username) res = self.app.get(url) self.user.reload() assert_equal(self.user.email_verifications[token]['confirmed'], True) assert_equal(res.status_code, 302) login_url = 'login?service' assert_in(login_url, res.body) def test_get_email_to_add_no_email(self): email_verifications = self.user.unconfirmed_email_info assert_equal(email_verifications, []) def test_get_unconfirmed_email(self): email = 'test@mail.com' self.user.add_unconfirmed_email(email) self.user.save() self.user.reload() email_verifications = self.user.unconfirmed_email_info assert_equal(email_verifications, []) def test_get_email_to_add(self): email = 'test@mail.com' token = self.user.add_unconfirmed_email(email) self.user.save() self.user.reload() assert_equal(self.user.email_verifications[token]['confirmed'], False) url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username) self.app.get(url) self.user.reload() assert_equal(self.user.email_verifications[token]['confirmed'], True) email_verifications = self.user.unconfirmed_email_info assert_equal(email_verifications[0]['address'], 'test@mail.com') def test_add_email(self): email = 'test@mail.com' token = self.user.add_unconfirmed_email(email) self.user.save() self.user.reload() assert_equal(self.user.email_verifications[token]['confirmed'], False) url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token) self.app.get(url) self.user.reload() email_verifications = self.user.unconfirmed_email_info put_email_url = api_url_for('unconfirmed_email_add') res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth) self.user.reload() assert_equal(res.json_body['status'], 'success') assert_equal(self.user.emails[1], 'test@mail.com') def test_remove_email(self): email = 'test@mail.com' token = self.user.add_unconfirmed_email(email) self.user.save() self.user.reload() url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token) self.app.get(url) self.user.reload() email_verifications = self.user.unconfirmed_email_info remove_email_url = api_url_for('unconfirmed_email_remove') remove_res = self.app.delete_json(remove_email_url, email_verifications[0], auth=self.user.auth) self.user.reload() assert_equal(remove_res.json_body['status'], 'success') assert_equal(self.user.unconfirmed_email_info, []) def test_add_expired_email(self): # Do not return expired token and removes it from user.email_verifications email = 'test@mail.com' token = self.user.add_unconfirmed_email(email) self.user.email_verifications[token]['expiration'] = timezone.now() - dt.timedelta(days=100) self.user.save() self.user.reload() assert_equal(self.user.email_verifications[token]['email'], email) self.user.clean_email_verifications(given_token=token) unconfirmed_emails = self.user.unconfirmed_email_info assert_equal(unconfirmed_emails, []) assert_equal(self.user.email_verifications, {}) def test_clean_email_verifications(self): # Do not return bad token and removes it from user.email_verifications email = 'test@mail.com' token = 'blahblahblah' self.user.email_verifications[token] = {'expiration': timezone.now() + dt.timedelta(days=1), 'email': email, 'confirmed': False } self.user.save() self.user.reload() assert_equal(self.user.email_verifications[token]['email'], email) self.user.clean_email_verifications(given_token=token) unconfirmed_emails = self.user.unconfirmed_email_info assert_equal(unconfirmed_emails, []) assert_equal(self.user.email_verifications, {}) def test_clean_email_verifications_when_email_verifications_is_an_empty_dict(self): self.user.email_verifications = {} self.user.save() ret = self.user.clean_email_verifications() assert_equal(ret, None) assert_equal(self.user.email_verifications, {}) def test_add_invalid_email(self): # Do not return expired token and removes it from user.email_verifications email = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello@yourmom.com' # illegal_str = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello' # illegal_str += unichr(0xd800) + unichr(0xdbff) + ' World' # email = 'test@mail.com' with assert_raises(ValidationError): self.user.add_unconfirmed_email(email) def test_add_email_merge(self): email = "copy@cat.com" dupe = UserFactory( username=email, emails=[email] ) dupe.save() token = self.user.add_unconfirmed_email(email) self.user.save() self.user.reload() assert_equal(self.user.email_verifications[token]['confirmed'], False) url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token) self.app.get(url) self.user.reload() email_verifications = self.user.unconfirmed_email_info put_email_url = api_url_for('unconfirmed_email_add') res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth) self.user.reload() assert_equal(res.json_body['status'], 'success') assert_equal(self.user.emails[1], 'copy@cat.com') def test_resend_confirmation_without_user_id(self): email = 'test@mail.com' url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': False} res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], '"id" is required') def test_resend_confirmation_without_email(self): url = api_url_for('resend_confirmation') res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_resend_confirmation_not_work_for_primary_email(self): email = 'test@mail.com' url = api_url_for('resend_confirmation') header = {'address': email, 'primary': True, 'confirmed': False} res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails') def test_resend_confirmation_not_work_for_confirmed_email(self): email = 'test@mail.com' url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': True} res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails') @mock.patch('framework.auth.views.mails.send_mail') def test_resend_confirmation_does_not_send_before_throttle_expires(self, send_mail): email = 'test@mail.com' self.user.save() url = api_url_for('resend_confirmation') header = {'address': email, 'primary': False, 'confirmed': False} self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth) assert_true(send_mail.called) # 2nd call does not send email because throttle period has not expired res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_confirm_email_clears_unclaimed_records_and_revokes_token(self): unclaimed_user = UnconfirmedUserFactory() # unclaimed user has been invited to a project. referrer = UserFactory() project = ProjectFactory(creator=referrer) unclaimed_user.add_unclaimed_record(project, referrer, 'foo') unclaimed_user.save() # sanity check assert_equal(len(unclaimed_user.email_verifications.keys()), 1) # user goes to email confirmation link token = unclaimed_user.get_confirmation_token(unclaimed_user.username) url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token) res = self.app.get(url) assert_equal(res.status_code, 302) # unclaimed records and token are cleared unclaimed_user.reload() assert_equal(unclaimed_user.unclaimed_records, {}) assert_equal(len(unclaimed_user.email_verifications.keys()), 0) def test_confirmation_link_registers_user(self): user = User.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May') assert_false(user.is_registered) # sanity check user.save() confirmation_url = user.get_confirmation_url('brian@queen.com', external=False) res = self.app.get(confirmation_url) assert_equal(res.status_code, 302, 'redirects to settings page') res = res.follow() user.reload() assert_true(user.is_registered) class TestAuthLoginAndRegisterLogic(OsfTestCase): def setUp(self): super(TestAuthLoginAndRegisterLogic, self).setUp() self.no_auth = Auth() self.user_auth = AuthUserFactory() self.auth = Auth(user=self.user_auth) self.next_url = web_url_for('my_projects', _absolute=True) self.invalid_campaign = 'invalid_campaign' def test_osf_login_with_auth(self): # login: user with auth data = login_and_register_handler(self.auth) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True)) def test_osf_login_without_auth(self): # login: user without auth data = login_and_register_handler(self.no_auth) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True)) def test_osf_register_with_auth(self): # register: user with auth data = login_and_register_handler(self.auth, login=False) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True)) def test_osf_register_without_auth(self): # register: user without auth data = login_and_register_handler(self.no_auth, login=False) assert_equal(data.get('status_code'), http.OK) assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True)) def test_next_url_login_with_auth(self): # next_url login: user with auth data = login_and_register_handler(self.auth, next_url=self.next_url) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), self.next_url) def test_next_url_login_without_auth(self): # login: user without auth request.url = web_url_for('auth_login', next=self.next_url, _absolute=True) data = login_and_register_handler(self.no_auth, next_url=self.next_url) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), get_login_url(request.url)) def test_next_url_register_with_auth(self): # register: user with auth data = login_and_register_handler(self.auth, login=False, next_url=self.next_url) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), self.next_url) def test_next_url_register_without_auth(self): # register: user without auth data = login_and_register_handler(self.no_auth, login=False, next_url=self.next_url) assert_equal(data.get('status_code'), http.OK) assert_equal(data.get('next_url'), request.url) def test_institution_login_and_register(self): pass def test_institution_login_with_auth(self): # institution login: user with auth data = login_and_register_handler(self.auth, campaign='institution') assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True)) def test_institution_login_without_auth(self): # institution login: user without auth data = login_and_register_handler(self.no_auth, campaign='institution') assert_equal(data.get('status_code'), http.FOUND) assert_equal( data.get('next_url'), get_login_url(web_url_for('dashboard', _absolute=True), campaign='institution')) def test_institution_regsiter_with_auth(self): # institution register: user with auth data = login_and_register_handler(self.auth, login=False, campaign='institution') assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True)) def test_institution_register_without_auth(self): # institution register: user without auth data = login_and_register_handler(self.no_auth, login=False, campaign='institution') assert_equal(data.get('status_code'), http.FOUND) assert_equal( data.get('next_url'), get_login_url(web_url_for('dashboard', _absolute=True), campaign='institution') ) def test_campaign_login_with_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign login: user with auth data = login_and_register_handler(self.auth, campaign=campaign) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), campaign_url_for(campaign)) def test_campaign_login_without_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign login: user without auth data = login_and_register_handler(self.no_auth, campaign=campaign) assert_equal(data.get('status_code'), http.FOUND) assert_equal( data.get('next_url'), web_url_for('auth_register', campaign=campaign, next=campaign_url_for(campaign)) ) def test_campaign_register_with_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign register: user with auth data = login_and_register_handler(self.auth, login=False, campaign=campaign) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), campaign_url_for(campaign)) def test_campaign_register_without_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign register: user without auth data = login_and_register_handler(self.no_auth, login=False, campaign=campaign) assert_equal(data.get('status_code'), http.OK) if is_native_login(campaign): # native campaign: prereg and erpc assert_equal(data.get('next_url'), campaign_url_for(campaign)) elif is_proxy_login(campaign): # proxy campaign: preprints and branded ones assert_equal( data.get('next_url'), web_url_for('auth_login', next=campaign_url_for(campaign), _absolute=True) ) def test_campaign_next_url_login_with_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign login: user with auth next_url = campaign_url_for(campaign) data = login_and_register_handler(self.auth, campaign=campaign, next_url=next_url) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), next_url) def test_campaign_next_url_login_without_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign login: user without auth next_url = campaign_url_for(campaign) data = login_and_register_handler(self.no_auth, campaign=campaign, next_url=next_url) assert_equal(data.get('status_code'), http.FOUND) assert_equal( data.get('next_url'), web_url_for('auth_register', campaign=campaign, next=next_url) ) def test_campaign_next_url_register_with_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign register: user with auth next_url = campaign_url_for(campaign) data = login_and_register_handler(self.auth, login=False, campaign=campaign, next_url=next_url) assert_equal(data.get('status_code'), http.FOUND) assert_equal(data.get('next_url'), next_url) def test_campaign_next_url_register_without_auth(self): for campaign in get_campaigns(): if is_institution_login(campaign): continue # campaign register: user without auth next_url = campaign_url_for(campaign) data = login_and_register_handler(self.no_auth, login=False, campaign=campaign, next_url=next_url) assert_equal(data.get('status_code'), http.OK) if is_native_login(campaign): # native campaign: prereg and erpc assert_equal(data.get('next_url'), next_url) elif is_proxy_login(campaign): # proxy campaign: preprints and branded ones assert_equal( data.get('next_url'), web_url_for('auth_login', next= next_url, _absolute=True) ) def test_invalid_campaign_login_without_auth(self): data = login_and_register_handler( self.no_auth, login=True, campaign=self.invalid_campaign, next_url=self.next_url ) redirect_url = web_url_for('auth_login', campaigns=None, next=self.next_url) assert_equal(data['status_code'], http.FOUND) assert_equal(data['next_url'], redirect_url) assert_equal(data['campaign'], None) def test_invalid_campaign_register_without_auth(self): data = login_and_register_handler( self.no_auth, login=False, campaign=self.invalid_campaign, next_url=self.next_url ) redirect_url = web_url_for('auth_register', campaigns=None, next=self.next_url) assert_equal(data['status_code'], http.FOUND) assert_equal(data['next_url'], redirect_url) assert_equal(data['campaign'], None) # The following two tests handles the special case for `claim_user_registered` # When an authenticated user clicks the claim confirmation clink, there are two ways to trigger this flow: # 1. If the authenticated user is already a contributor to the project, OSF will ask the user to sign out # by providing a "logout" link. # 2. If the authenticated user is not a contributor but decides not to claim contributor under this account, # OSF provides a link "not <username>?" for the user to logout. # Both links will land user onto the register page with "MUST LOGIN" push notification. def test_register_logout_flag_with_auth(self): # when user click the "logout" or "not <username>?" link, first step is to log user out data = login_and_register_handler(self.auth, login=False, campaign=None, next_url=self.next_url, logout=True) assert_equal(data.get('status_code'), 'auth_logout') assert_equal(data.get('next_url'), self.next_url) def test_register_logout_flage_without(self): # the second step is to land user on register page with "MUST LOGIN" warning data = login_and_register_handler(self.no_auth, login=False, campaign=None, next_url=self.next_url, logout=True) assert_equal(data.get('status_code'), http.OK) assert_equal(data.get('next_url'), self.next_url) assert_true(data.get('must_login_warning')) class TestAuthLogout(OsfTestCase): def setUp(self): super(TestAuthLogout, self).setUp() self.goodbye_url = web_url_for('goodbye', _absolute=True) self.redirect_url = web_url_for('forgot_password_get', _absolute=True) self.valid_next_url = web_url_for('dashboard', _absolute=True) self.invalid_next_url = 'http://localhost:1234/abcde' self.auth_user = AuthUserFactory() def tearDown(self): super(TestAuthLogout, self).tearDown() User.objects.all().delete() assert_equal(User.objects.count(), 0) def test_logout_with_valid_next_url_logged_in(self): logout_url = web_url_for('auth_logout', _absolute=True, next=self.valid_next_url) resp = self.app.get(logout_url, auth=self.auth_user.auth) assert_equal(resp.status_code, http.FOUND) assert_equal(cas.get_logout_url(logout_url), resp.headers['Location']) def test_logout_with_valid_next_url_logged_out(self): logout_url = web_url_for('auth_logout', _absolute=True, next=self.valid_next_url) resp = self.app.get(logout_url, auth=None) assert_equal(resp.status_code, http.FOUND) assert_equal(self.valid_next_url, resp.headers['Location']) def test_logout_with_invalid_next_url_logged_in(self): logout_url = web_url_for('auth_logout', _absolute=True, next=self.invalid_next_url) resp = self.app.get(logout_url, auth=self.auth_user.auth) assert_equal(resp.status_code, http.FOUND) assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location']) def test_logout_with_invalid_next_url_logged_out(self): logout_url = web_url_for('auth_logout', _absolute=True, next=self.invalid_next_url) resp = self.app.get(logout_url, auth=None) assert_equal(resp.status_code, http.FOUND) assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location']) def test_logout_with_redirect_url(self): logout_url = web_url_for('auth_logout', _absolute=True, redirect_url=self.redirect_url) resp = self.app.get(logout_url, auth=self.auth_user.auth) assert_equal(resp.status_code, http.FOUND) assert_equal(cas.get_logout_url(self.redirect_url), resp.headers['Location']) def test_logout_with_no_parameter(self): logout_url = web_url_for('auth_logout', _absolute=True) resp = self.app.get(logout_url, auth=None) assert_equal(resp.status_code, http.FOUND) assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location']) class TestExternalAuthViews(OsfTestCase): def setUp(self): super(TestExternalAuthViews, self).setUp() name, email = fake.name(), fake.email() self.provider_id = fake.ean() external_identity = { 'service': { self.provider_id: 'CREATE' } } self.user = User.create_unconfirmed( username=email, password=str(fake.password()), fullname=name, external_identity=external_identity, ) self.user.save() self.auth = Auth(self.user) def test_external_login_email_get_with_invalid_session(self): url = web_url_for('external_login_email_get') resp = self.app.get(url, expect_errors=True) assert_equal(resp.status_code, 401) def test_external_login_confirm_email_get_with_another_user_logged_in(self): another_user = AuthUserFactory() url = self.user.get_confirmation_url(self.user.username, external_id_provider='service', destination='dashboard') res = self.app.get(url, auth=another_user.auth) assert_equal(res.status_code, 302, 'redirects to cas logout') assert_in('/logout?service=', res.location) assert_in(url, res.location) def test_external_login_confirm_email_get_without_destination(self): url = self.user.get_confirmation_url(self.user.username, external_id_provider='service') res = self.app.get(url, auth=self.auth, expect_errors=True) assert_equal(res.status_code, 400, 'bad request') @mock.patch('website.mails.send_mail') def test_external_login_confirm_email_get_create(self, mock_welcome): assert_false(self.user.is_registered) url = self.user.get_confirmation_url(self.user.username, external_id_provider='service', destination='dashboard') res = self.app.get(url, auth=self.auth) assert_equal(res.status_code, 302, 'redirects to cas login') assert_in('/login?service=', res.location) assert_in('new=true', res.location) assert_equal(mock_welcome.call_count, 1) self.user.reload() assert_equal(self.user.external_identity['service'][self.provider_id], 'VERIFIED') assert_true(self.user.is_registered) assert_true(self.user.has_usable_password()) @mock.patch('website.mails.send_mail') def test_external_login_confirm_email_get_link(self, mock_link_confirm): self.user.external_identity['service'][self.provider_id] = 'LINK' self.user.save() assert_false(self.user.is_registered) url = self.user.get_confirmation_url(self.user.username, external_id_provider='service', destination='dashboard') res = self.app.get(url, auth=self.auth) assert_equal(res.status_code, 302, 'redirects to cas login') assert_in('/login?service=', res.location) assert_not_in('new=true', res.location) assert_equal(mock_link_confirm.call_count, 1) self.user.reload() assert_equal(self.user.external_identity['service'][self.provider_id], 'VERIFIED') assert_true(self.user.is_registered) assert_true(self.user.has_usable_password()) @mock.patch('website.mails.send_mail') def test_external_login_confirm_email_get_duped_id(self, mock_confirm): dupe_user = UserFactory(external_identity={'service': {self.provider_id: 'CREATE'}}) assert_equal(dupe_user.external_identity, self.user.external_identity) url = self.user.get_confirmation_url(self.user.username, external_id_provider='service', destination='dashboard') res = self.app.get(url, auth=self.auth) assert_equal(res.status_code, 302, 'redirects to cas login') assert_in('/login?service=', res.location) assert_equal(mock_confirm.call_count, 1) self.user.reload() dupe_user.reload() assert_equal(self.user.external_identity['service'][self.provider_id], 'VERIFIED') assert_equal(dupe_user.external_identity, {}) @mock.patch('website.mails.send_mail') def test_external_login_confirm_email_get_duping_id(self, mock_confirm): dupe_user = UserFactory(external_identity={'service': {self.provider_id: 'VERIFIED'}}) url = self.user.get_confirmation_url(self.user.username, external_id_provider='service', destination='dashboard') res = self.app.get(url, auth=self.auth, expect_errors=True) assert_equal(res.status_code, 403, 'only allows one user to link an id') assert_equal(mock_confirm.call_count, 0) self.user.reload() dupe_user.reload() assert_equal(dupe_user.external_identity['service'][self.provider_id], 'VERIFIED') assert_equal(self.user.external_identity, {}) def test_ensure_external_identity_uniqueness_unverified(self): dupe_user = UserFactory(external_identity={'service': {self.provider_id: 'CREATE'}}) assert_equal(dupe_user.external_identity, self.user.external_identity) ensure_external_identity_uniqueness('service', self.provider_id, self.user) dupe_user.reload() self.user.reload() assert_equal(dupe_user.external_identity, {}) assert_equal(self.user.external_identity, {'service': {self.provider_id: 'CREATE'}}) def test_ensure_external_identity_uniqueness_verified(self): dupe_user = UserFactory(external_identity={'service': {self.provider_id: 'VERIFIED'}}) assert_equal(dupe_user.external_identity, {'service': {self.provider_id: 'VERIFIED'}}) assert_not_equal(dupe_user.external_identity, self.user.external_identity) with assert_raises(ValidationError): ensure_external_identity_uniqueness('service', self.provider_id, self.user) dupe_user.reload() self.user.reload() assert_equal(dupe_user.external_identity, {'service': {self.provider_id: 'VERIFIED'}}) assert_equal(self.user.external_identity, {}) def test_ensure_external_identity_uniqueness_multiple(self): dupe_user = UserFactory(external_identity={'service': {self.provider_id: 'CREATE'}}) assert_equal(dupe_user.external_identity, self.user.external_identity) ensure_external_identity_uniqueness('service', self.provider_id) dupe_user.reload() self.user.reload() assert_equal(dupe_user.external_identity, {}) assert_equal(self.user.external_identity, {}) # TODO: Use mock add-on class TestAddonUserViews(OsfTestCase): def setUp(self): super(TestAddonUserViews, self).setUp() self.user = AuthUserFactory() def test_choose_addons_add(self): """Add add-ons; assert that add-ons are attached to project. """ url = '/api/v1/settings/addons/' self.app.post_json( url, {'github': True}, auth=self.user.auth, ).maybe_follow() self.user.reload() assert_true(self.user.get_addon('github')) def test_choose_addons_remove(self): # Add, then delete, add-ons; assert that add-ons are not attached to # project. url = '/api/v1/settings/addons/' self.app.post_json( url, {'github': True}, auth=self.user.auth, ).maybe_follow() self.app.post_json( url, {'github': False}, auth=self.user.auth ).maybe_follow() self.user.reload() assert_false(self.user.get_addon('github')) class TestConfigureMailingListViews(OsfTestCase): @classmethod def setUpClass(cls): super(TestConfigureMailingListViews, cls).setUpClass() cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS settings.ENABLE_EMAIL_SUBSCRIPTIONS = True def test_user_unsubscribe_and_subscribe_help_mailing_list(self): user = AuthUserFactory() url = api_url_for('user_choose_mailing_lists') payload = {settings.OSF_HELP_LIST: False} res = self.app.post_json(url, payload, auth=user.auth) user.reload() assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST]) payload = {settings.OSF_HELP_LIST: True} res = self.app.post_json(url, payload, auth=user.auth) user.reload() assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST]) def test_get_notifications(self): user = AuthUserFactory() mailing_lists = dict(user.osf_mailing_lists.items() + user.mailchimp_mailing_lists.items()) url = api_url_for('user_notifications') res = self.app.get(url, auth=user.auth) assert_equal(mailing_lists, res.json['mailing_lists']) def test_osf_help_mails_subscribe(self): user = UserFactory() user.osf_mailing_lists[settings.OSF_HELP_LIST] = False user.save() update_osf_help_mails_subscription(user, True) assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST]) def test_osf_help_mails_unsubscribe(self): user = UserFactory() user.osf_mailing_lists[settings.OSF_HELP_LIST] = True user.save() update_osf_help_mails_subscription(user, False) assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST]) @unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test') @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api): user = AuthUserFactory() list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]} list_id = mailchimp_utils.get_list_id_from_name(list_name) payload = {settings.MAILCHIMP_GENERAL_LIST: True} url = api_url_for('user_choose_mailing_lists') res = self.app.post_json(url, payload, auth=user.auth) user.reload() # check user.mailing_lists is updated assert_true(user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST]) assert_equal( user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST], payload[settings.MAILCHIMP_GENERAL_LIST] ) # check that user is subscribed mock_client.lists.subscribe.assert_called_with(id=list_id, email={'email': user.username}, merge_vars={ 'fname': user.given_name, 'lname': user.family_name, }, double_optin=False, update_existing=True) def test_get_mailchimp_get_endpoint_returns_200(self): url = api_url_for('mailchimp_get_endpoint') res = self.app.get(url) assert_equal(res.status_code, 200) @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api): """ Test that 'subscribe' actions sent to the OSF via mailchimp webhooks update the OSF database. """ list_id = '12345' list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]} # user is not subscribed to a list user = AuthUserFactory() user.mailchimp_mailing_lists = {'OSF General': False} user.save() # user subscribes and webhook sends request to OSF data = { 'type': 'subscribe', 'data[list_id]': list_id, 'data[email]': user.username } url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY res = self.app.post(url, data, content_type="application/x-www-form-urlencoded", auth=user.auth) # user field is updated on the OSF user.reload() assert_true(user.mailchimp_mailing_lists[list_name]) @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api): """ Test that 'profile' actions sent to the OSF via mailchimp webhooks do not cause any database changes. """ list_id = '12345' list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]} # user is subscribed to a list user = AuthUserFactory() user.mailchimp_mailing_lists = {'OSF General': True} user.save() # user hits subscribe again, which will update the user's existing info on mailchimp # webhook sends request (when configured to update on changes made through the API) data = { 'type': 'profile', 'data[list_id]': list_id, 'data[email]': user.username } url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY res = self.app.post(url, data, content_type="application/x-www-form-urlencoded", auth=user.auth) # user field does not change user.reload() assert_true(user.mailchimp_mailing_lists[list_name]) @mock.patch('website.mailchimp_utils.get_mailchimp_api') def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api): list_id = '12345' list_name = 'OSF General' mock_client = mock.MagicMock() mock_get_mailchimp_api.return_value = mock_client mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]} # user is subscribed to a list user = AuthUserFactory() user.mailchimp_mailing_lists = {'OSF General': True} user.save() # user unsubscribes through mailchimp and webhook sends request data = { 'type': 'unsubscribe', 'data[list_id]': list_id, 'data[email]': user.username } url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY res = self.app.post(url, data, content_type="application/x-www-form-urlencoded", auth=user.auth) # user field is updated on the OSF user.reload() assert_false(user.mailchimp_mailing_lists[list_name]) def test_sync_data_from_mailchimp_fails_without_secret_key(self): user = AuthUserFactory() payload = {'values': {'type': 'unsubscribe', 'data': {'list_id': '12345', 'email': 'freddie@cos.io'}}} url = api_url_for('sync_data_from_mailchimp') res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True) assert_equal(res.status_code, http.UNAUTHORIZED) @classmethod def tearDownClass(cls): super(TestConfigureMailingListViews, cls).tearDownClass() settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions # TODO: Move to OSF Storage class TestFileViews(OsfTestCase): def setUp(self): super(TestFileViews, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user, is_public=True) self.project.add_contributor(self.user) self.project.save() def test_files_get(self): url = self.project.api_url_for('collect_file_trees') res = self.app.get(url, auth=self.user.auth) expected = _view_project(self.project, auth=Auth(user=self.user)) assert_equal(res.status_code, http.OK) assert_equal(res.json['node'], expected['node']) assert_in('tree_js', res.json) assert_in('tree_css', res.json) def test_grid_data(self): url = self.project.api_url_for('grid_data') res = self.app.get(url, auth=self.user.auth).maybe_follow() assert_equal(res.status_code, http.OK) expected = rubeus.to_hgrid(self.project, auth=Auth(self.user)) data = res.json['data'] assert_equal(len(data), len(expected)) class TestTagViews(OsfTestCase): def setUp(self): super(TestTagViews, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user) @unittest.skip('Tags endpoint disabled for now.') def test_tag_get_returns_200(self): url = web_url_for('project_tag', tag='foo') res = self.app.get(url) assert_equal(res.status_code, 200) class TestReorderComponents(OsfTestCase): def setUp(self): super(TestReorderComponents, self).setUp() self.creator = AuthUserFactory() self.contrib = AuthUserFactory() # Project is public self.project = ProjectFactory.create(creator=self.creator, is_public=True) self.project.add_contributor(self.contrib, auth=Auth(self.creator)) # subcomponent that only creator can see self.public_component = NodeFactory(creator=self.creator, is_public=True) self.private_component = NodeFactory(creator=self.creator, is_public=False) NodeRelation.objects.create(parent=self.project, child=self.public_component) NodeRelation.objects.create(parent=self.project, child=self.private_component) self.project.save() # https://github.com/CenterForOpenScience/openscienceframework.org/issues/489 def test_reorder_components_with_private_component(self): # contrib tries to reorder components payload = { 'new_list': [ '{0}'.format(self.private_component._id), '{0}'.format(self.public_component._id), ] } url = self.project.api_url_for('project_reorder_components') res = self.app.post_json(url, payload, auth=self.contrib.auth) assert_equal(res.status_code, 200) class TestWikiWidgetViews(OsfTestCase): def setUp(self): super(TestWikiWidgetViews, self).setUp() # project with no home wiki page self.project = ProjectFactory() self.read_only_contrib = AuthUserFactory() self.project.add_contributor(self.read_only_contrib, permissions='read') self.noncontributor = AuthUserFactory() # project with no home wiki content self.project2 = ProjectFactory(creator=self.project.creator) self.project2.add_contributor(self.read_only_contrib, permissions='read') self.project2.update_node_wiki(name='home', content='', auth=Auth(self.project.creator)) def test_show_wiki_for_contributors_when_no_wiki_or_content(self): assert_true(_should_show_wiki_widget(self.project, self.project.creator)) assert_true(_should_show_wiki_widget(self.project2, self.project.creator)) def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self): assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib)) assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib)) def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self): assert_false(_should_show_wiki_widget(self.project, self.noncontributor)) assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib)) class TestProjectCreation(OsfTestCase): def setUp(self): super(TestProjectCreation, self).setUp() self.creator = AuthUserFactory() self.url = api_url_for('project_new_post') self.user1 = AuthUserFactory() self.user2 = AuthUserFactory() self.project = ProjectFactory(creator=self.user1) self.project.add_contributor(self.user2, auth=Auth(self.user1)) self.project.save() def tearDown(self): super(TestProjectCreation, self).tearDown() def test_needs_title(self): res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_create_component_strips_html(self): user = AuthUserFactory() project = ProjectFactory(creator=user) url = web_url_for('project_new_node', pid=project._id) post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''} request = self.app.post(url, post_data, auth=user.auth).follow() project.reload() child = project.nodes[0] # HTML has been stripped assert_equal(child.title, 'New Component Title') def test_strip_html_from_title(self): payload = { 'title': 'no html <b>here</b>' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_equal('no html here', node.title) def test_only_needs_title(self): payload = { 'title': 'Im a real title' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) def test_title_must_be_one_long(self): payload = { 'title': '' } res = self.app.post_json( self.url, payload, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_title_must_be_less_than_200(self): payload = { 'title': ''.join([str(x) for x in xrange(0, 250)]) } res = self.app.post_json( self.url, payload, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_fails_to_create_project_with_whitespace_title(self): payload = { 'title': ' ' } res = self.app.post_json( self.url, payload, auth=self.creator.auth, expect_errors=True) assert_equal(res.status_code, 400) def test_creates_a_project(self): payload = { 'title': 'Im a real title' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_true(node.title, 'Im a real title') def test_create_component_add_contributors_admin(self): url = web_url_for('project_new_node', pid=self.project._id) post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True} res = self.app.post(url, post_data, auth=self.user1.auth) self.project.reload() child = self.project.nodes[0] assert_equal(child.title, 'New Component With Contributors Title') assert_in(self.user1, child.contributors) assert_in(self.user2, child.contributors) # check redirect url assert_in('/contributors/', res.location) def test_create_component_with_contributors_read_write(self): url = web_url_for('project_new_node', pid=self.project._id) non_admin = AuthUserFactory() self.project.add_contributor(non_admin, permissions=['read', 'write']) self.project.save() post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True} res = self.app.post(url, post_data, auth=non_admin.auth) self.project.reload() child = self.project.nodes[0] assert_equal(child.title, 'New Component With Contributors Title') assert_in(non_admin, child.contributors) assert_in(self.user1, child.contributors) assert_in(self.user2, child.contributors) assert_equal(child.get_permissions(non_admin), ['read', 'write', 'admin']) # check redirect url assert_in('/contributors/', res.location) def test_create_component_with_contributors_read(self): url = web_url_for('project_new_node', pid=self.project._id) non_admin = AuthUserFactory() self.project.add_contributor(non_admin, permissions=['read']) self.project.save() post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True} res = self.app.post(url, post_data, auth=non_admin.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_create_component_add_no_contributors(self): url = web_url_for('project_new_node', pid=self.project._id) post_data = {'title': 'New Component With Contributors Title', 'category': ''} res = self.app.post(url, post_data, auth=self.user1.auth) self.project.reload() child = self.project.nodes[0] assert_equal(child.title, 'New Component With Contributors Title') assert_in(self.user1, child.contributors) assert_not_in(self.user2, child.contributors) # check redirect url assert_not_in('/contributors/', res.location) def test_new_project_returns_serialized_node_data(self): payload = { 'title': 'Im a real title' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = res.json['newNode'] assert_true(node) assert_equal(node['title'], 'Im a real title') def test_description_works(self): payload = { 'title': 'Im a real title', 'description': 'I describe things!' } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_true(node.description, 'I describe things!') def test_can_template(self): other_node = ProjectFactory(creator=self.creator) payload = { 'title': 'Im a real title', 'template': other_node._id } res = self.app.post_json(self.url, payload, auth=self.creator.auth) assert_equal(res.status_code, 201) node = Node.load(res.json['projectUrl'].replace('/', '')) assert_true(node) assert_true(node.template_node, other_node) def test_project_before_template_no_addons(self): project = ProjectFactory() res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth) assert_equal(res.json['prompts'], []) def test_project_before_template_with_addons(self): project = ProjectWithAddonFactory(addon='box') res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth) assert_in('Box', res.json['prompts']) def test_project_new_from_template_non_user(self): project = ProjectFactory() url = api_url_for('project_new_from_template', nid=project._id) res = self.app.post(url, auth=None) assert_equal(res.status_code, 302) res2 = res.follow(expect_errors=True) assert_equal(res2.status_code, 301) assert_equal(res2.request.path, '/login') def test_project_new_from_template_public_non_contributor(self): non_contributor = AuthUserFactory() project = ProjectFactory(is_public=True) url = api_url_for('project_new_from_template', nid=project._id) res = self.app.post(url, auth=non_contributor.auth) assert_equal(res.status_code, 201) def test_project_new_from_template_contributor(self): contributor = AuthUserFactory() project = ProjectFactory(is_public=False) project.add_contributor(contributor) project.save() url = api_url_for('project_new_from_template', nid=project._id) res = self.app.post(url, auth=contributor.auth) assert_equal(res.status_code, 201) class TestUnconfirmedUserViews(OsfTestCase): def test_can_view_profile(self): user = UnconfirmedUserFactory() url = web_url_for('profile_view_id', uid=user._id) res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, http.BAD_REQUEST) class TestStaticFileViews(OsfTestCase): def test_robots_dot_txt(self): res = self.app.get('/robots.txt') assert_equal(res.status_code, 200) assert_in('User-agent', res) assert_in('text/plain', res.headers['Content-Type']) def test_favicon(self): res = self.app.get('/favicon.ico') assert_equal(res.status_code, 200) assert_in('image/vnd.microsoft.icon', res.headers['Content-Type']) def test_getting_started_page(self): res = self.app.get('/getting-started/') assert_equal(res.status_code, 302) assert_equal(res.location, 'http://help.osf.io/') def test_help_redirect(self): res = self.app.get('/help/') assert_equal(res.status_code,302) class TestUserConfirmSignal(OsfTestCase): def test_confirm_user_signal_called_when_user_claims_account(self): unclaimed_user = UnconfirmedUserFactory() # unclaimed user has been invited to a project. referrer = UserFactory() project = ProjectFactory(creator=referrer) unclaimed_user.add_unclaimed_record(project, referrer, 'foo', email=fake.email()) unclaimed_user.save() token = unclaimed_user.get_unclaimed_record(project._primary_key)['token'] with capture_signals() as mock_signals: url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token) payload = {'username': unclaimed_user.username, 'password': 'password', 'password2': 'password'} res = self.app.post(url, payload) assert_equal(res.status_code, 302) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed])) def test_confirm_user_signal_called_when_user_confirms_email(self): unconfirmed_user = UnconfirmedUserFactory() unconfirmed_user.save() # user goes to email confirmation link token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username) with capture_signals() as mock_signals: url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token) res = self.app.get(url) assert_equal(res.status_code, 302) assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed])) # copied from tests/test_comments.py class TestCommentViews(OsfTestCase): def setUp(self): super(TestCommentViews, self).setUp() self.project = ProjectFactory(is_public=True) self.user = AuthUserFactory() self.project.add_contributor(self.user) self.project.save() self.user.save() def test_view_project_comments_updates_user_comments_view_timestamp(self): url = self.project.api_url_for('update_comments_timestamp') res = self.app.put_json(url, { 'page': 'node', 'rootId': self.project._id }, auth=self.user.auth) self.user.reload() user_timestamp = self.user.comments_viewed_timestamp[self.project._id] view_timestamp = timezone.now() assert_datetime_equal(user_timestamp, view_timestamp) def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self): non_contributor = AuthUserFactory() url = self.project.api_url_for('update_comments_timestamp') res = self.app.put_json(url, { 'page': 'node', 'rootId': self.project._id }, auth=self.user.auth) non_contributor.reload() assert_not_in(self.project._id, non_contributor.comments_viewed_timestamp) def test_view_comments_updates_user_comments_view_timestamp_files(self): osfstorage = self.project.get_addon('osfstorage') root_node = osfstorage.get_root() test_file = root_node.append_file('test_file') test_file.create_version(self.user, { 'object': '06d80e', 'service': 'cloud', osfstorage_settings.WATERBUTLER_RESOURCE: 'osf', }, { 'size': 1337, 'contentType': 'img/png' }).save() url = self.project.api_url_for('update_comments_timestamp') res = self.app.put_json(url, { 'page': 'files', 'rootId': test_file._id }, auth=self.user.auth) self.user.reload() user_timestamp = self.user.comments_viewed_timestamp[test_file._id] view_timestamp = timezone.now() assert_datetime_equal(user_timestamp, view_timestamp) # Regression test for https://openscience.atlassian.net/browse/OSF-5193 # moved from tests/test_comments.py def test_find_unread_includes_edited_comments(self): project = ProjectFactory() user = AuthUserFactory() project.add_contributor(user, save=True) comment = CommentFactory(node=project, user=project.creator) n_unread = Comment.find_n_unread(user=user, node=project, page='node') assert n_unread == 1 url = project.api_url_for('update_comments_timestamp') payload = {'page': 'node', 'rootId': project._id} self.app.put_json(url, payload, auth=user.auth) user.reload() n_unread = Comment.find_n_unread(user=user, node=project, page='node') assert n_unread == 0 # Edit previously read comment comment.edit( auth=Auth(project.creator), content='edited', save=True ) n_unread = Comment.find_n_unread(user=user, node=project, page='node') assert n_unread == 1 class TestResetPassword(OsfTestCase): def setUp(self): super(TestResetPassword, self).setUp() self.user = AuthUserFactory() self.another_user = AuthUserFactory() self.osf_key_v2 = generate_verification_key(verification_type='password') self.user.verification_key_v2 = self.osf_key_v2 self.user.verification_key = None self.user.save() self.get_url = web_url_for( 'reset_password_get', uid=self.user._id, token=self.osf_key_v2['token'] ) self.get_url_invalid_key = web_url_for( 'reset_password_get', uid=self.user._id, token=generate_verification_key() ) self.get_url_invalid_user = web_url_for( 'reset_password_get', uid=self.another_user._id, token=self.osf_key_v2['token'] ) # successfully load reset password page def test_reset_password_view_returns_200(self): res = self.app.get(self.get_url) assert_equal(res.status_code, 200) # raise http 400 error def test_reset_password_view_raises_400(self): res = self.app.get(self.get_url_invalid_key, expect_errors=True) assert_equal(res.status_code, 400) res = self.app.get(self.get_url_invalid_user, expect_errors=True) assert_equal(res.status_code, 400) self.user.verification_key_v2['expires'] = timezone.now() self.user.save() res = self.app.get(self.get_url, expect_errors=True) assert_equal(res.status_code, 400) # successfully reset password @mock.patch('framework.auth.cas.CasClient.service_validate') def test_can_reset_password_if_form_success(self, mock_service_validate): # load reset password page and submit email res = self.app.get(self.get_url) form = res.forms['resetPasswordForm'] form['password'] = 'newpassword' form['password2'] = 'newpassword' res = form.submit() # check request URL is /resetpassword with username and new verification_key_v2 token request_url_path = res.request.path assert_in('resetpassword', request_url_path) assert_in(self.user._id, request_url_path) assert_not_in(self.user.verification_key_v2['token'], request_url_path) # check verification_key_v2 for OSF is destroyed and verification_key for CAS is in place self.user.reload() assert_equal(self.user.verification_key_v2, {}) assert_not_equal(self.user.verification_key, None) # check redirection to CAS login with username and the new verification_key(CAS) assert_equal(res.status_code, 302) location = res.headers.get('Location') assert_true('login?service=' in location) assert_true('username={}'.format(self.user.username) in location) assert_true('verification_key={}'.format(self.user.verification_key) in location) # check if password was updated self.user.reload() assert_true(self.user.check_password('newpassword')) # check if verification_key is destroyed after service validation mock_service_validate.return_value = cas.CasResponse( authenticated=True, user=self.user._id, attributes={'accessToken': fake.md5()} ) ticket = fake.md5() service_url = 'http://accounts.osf.io/?ticket=' + ticket cas.make_response_from_ticket(ticket, service_url) self.user.reload() assert_equal(self.user.verification_key, None) # log users out before they land on reset password page def test_reset_password_logs_out_user(self): # visit reset password link while another user is logged in res = self.app.get(self.get_url, auth=self.another_user.auth) # check redirection to CAS logout assert_equal(res.status_code, 302) location = res.headers.get('Location') assert_not_in('reauth', location) assert_in('logout?service=', location) assert_in('resetpassword', location) @unittest.skip('Unskip when institution hiding code is reimplemented') class TestIndexView(OsfTestCase): def setUp(self): super(TestIndexView, self).setUp() self.inst_one = InstitutionFactory() self.inst_two = InstitutionFactory() self.inst_three = InstitutionFactory() self.inst_four = InstitutionFactory() self.inst_five = InstitutionFactory() self.user = AuthUserFactory() self.user.affiliated_institutions.add(self.inst_one) self.user.affiliated_institutions.add(self.inst_two) self.user.save() # tests 5 affiliated, non-registered, public projects for i in range(settings.INSTITUTION_DISPLAY_NODE_THRESHOLD): node = ProjectFactory(creator=self.user, is_public=True) node.affiliated_institutions.add(self.inst_one) node.save() # tests 4 affiliated, non-registered, public projects for i in range(settings.INSTITUTION_DISPLAY_NODE_THRESHOLD - 1): node = ProjectFactory(creator=self.user, is_public=True) node.affiliated_institutions.add(self.inst_two) node.save() # tests 5 affiliated, registered, public projects for i in range(settings.INSTITUTION_DISPLAY_NODE_THRESHOLD): registration = RegistrationFactory(creator=self.user, is_public=True) registration.affiliated_institutions.add(self.inst_three) registration.save() # tests 5 affiliated, non-registered public components for i in range(settings.INSTITUTION_DISPLAY_NODE_THRESHOLD): node = NodeFactory(creator=self.user, is_public=True) node.affiliated_institutions.add(self.inst_four) node.save() # tests 5 affiliated, non-registered, private projects for i in range(settings.INSTITUTION_DISPLAY_NODE_THRESHOLD): node = ProjectFactory(creator=self.user) node.affiliated_institutions.add(self.inst_five) node.save() def test_dashboard_institutions(self): dashboard_institutions = index()['dashboard_institutions'] assert_equal(len(dashboard_institutions), 1) assert_equal(dashboard_institutions[0]['id'], self.inst_one._id) assert_not_equal(dashboard_institutions[0]['id'], self.inst_two._id) assert_not_equal(dashboard_institutions[0]['id'], self.inst_three._id) assert_not_equal(dashboard_institutions[0]['id'], self.inst_four._id) assert_not_equal(dashboard_institutions[0]['id'], self.inst_five._id) class TestResolveGuid(OsfTestCase): def setUp(self): super(TestResolveGuid, self).setUp() def test_preprint_provider_without_domain(self): provider = PreprintProviderFactory(domain='') preprint = PreprintFactory(provider=provider) url = web_url_for('resolve_guid', _guid=True, guid=preprint._id) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal( res.request.path, '/{}/'.format(preprint._id) ) def test_preprint_provider_with_domain_without_redirect(self): domain = 'https://test.com/' provider = PreprintProviderFactory(_id='test', domain=domain, domain_redirect_enabled=False) preprint = PreprintFactory(provider=provider) url = web_url_for('resolve_guid', _guid=True, guid=preprint._id) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal( res.request.path, '/{}/'.format(preprint._id) ) def test_preprint_provider_with_domain_with_redirect(self): domain = 'https://test.com/' provider = PreprintProviderFactory(_id='test', domain=domain, domain_redirect_enabled=True) preprint = PreprintFactory(provider=provider) url = web_url_for('resolve_guid', _guid=True, guid=preprint._id) res = self.app.get(url) assert_is_redirect(res) assert_equal(res.status_code, 301) assert_equal( res.headers['location'], '{}{}/'.format(domain, preprint._id) ) assert_equal( res.request.path, '/{}/'.format(preprint._id) ) def test_preprint_provider_with_osf_domain(self): provider = PreprintProviderFactory(_id='osf', domain='https://osf.io/') preprint = PreprintFactory(provider=provider) url = web_url_for('resolve_guid', _guid=True, guid=preprint._id) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal( res.request.path, '/{}/'.format(preprint._id) ) class TestConfirmationViewBlockBingPreview(OsfTestCase): def setUp(self): super(TestConfirmationViewBlockBingPreview, self).setUp() self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534+ (KHTML, like Gecko) BingPreview/1.0b' # reset password link should fail with BingPreview def test_reset_password_get_returns_403(self): user = UserFactory() osf_key_v2 = generate_verification_key(verification_type='password') user.verification_key_v2 = osf_key_v2 user.verification_key = None user.save() reset_password_get_url = web_url_for( 'reset_password_get', uid=user._id, token=osf_key_v2['token'] ) res = self.app.get( reset_password_get_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # new user confirm account should fail with BingPreview def test_confirm_email_get_new_user_returns_403(self): user = User.create_unconfirmed('unconfirmed@cos.io', 'abCD12#$', 'Unconfirmed User') user.save() confirm_url = user.get_confirmation_url('unconfirmed@cos.io', external=False) res = self.app.get( confirm_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # confirmation for adding new email should fail with BingPreview def test_confirm_email_add_email_returns_403(self): user = UserFactory() user.add_unconfirmed_email('unconfirmed@cos.io') user.save() confirm_url = user.get_confirmation_url('unconfirmed@cos.io', external=False) + '?logout=1' res = self.app.get( confirm_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # confirmation for merging accounts should fail with BingPreview def test_confirm_email_merge_account_returns_403(self): user = UserFactory() user_to_be_merged = UserFactory() user.add_unconfirmed_email(user_to_be_merged.username) user.save() confirm_url = user.get_confirmation_url(user_to_be_merged.username, external=False) + '?logout=1' res = self.app.get( confirm_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # confirmation for new user claiming contributor should fail with BingPreview def test_claim_user_form_new_user(self): referrer = AuthUserFactory() project = ProjectFactory(creator=referrer, is_public=True) given_name = fake.name() given_email = fake.email() user = project.add_unregistered_contributor( fullname=given_name, email=given_email, auth=Auth(user=referrer) ) project.save() claim_url = user.get_claim_url(project._primary_key) res = self.app.get( claim_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # confirmation for existing user claiming contributor should fail with BingPreview def test_claim_user_form_existing_user(self): referrer = AuthUserFactory() project = ProjectFactory(creator=referrer, is_public=True) auth_user = AuthUserFactory() pending_user = project.add_unregistered_contributor( fullname=auth_user.fullname, email=None, auth=Auth(user=referrer) ) project.save() claim_url = pending_user.get_claim_url(project._primary_key) res = self.app.get( claim_url, auth = auth_user.auth, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # account creation confirmation for ORCiD login should fail with BingPreview def test_external_login_confirm_email_get_create_user(self): name, email = fake.name(), fake.email() provider_id = fake.ean() external_identity = { 'service': { provider_id: 'CREATE' } } user = User.create_unconfirmed( username=email, password=str(fake.password()), fullname=name, external_identity=external_identity, ) user.save() create_url = user.get_confirmation_url( user.username, external_id_provider='service', destination='dashboard' ) res = self.app.get( create_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) # account linking confirmation for ORCiD login should fail with BingPreview def test_external_login_confirm_email_get_link_user(self): user = UserFactory() provider_id = fake.ean() user.external_identity = { 'service': { provider_id: 'LINK' } } user.add_unconfirmed_email(user.username, external_identity='service') user.save() link_url = user.get_confirmation_url( user.username, external_id_provider='service', destination='dashboard' ) res = self.app.get( link_url, expect_errors=True, headers={ 'User-Agent': self.user_agent, } ) assert_equal(res.status_code, 403) if __name__ == '__main__': unittest.main()
hmoco/osf.io
tests/test_views.py
tests/base.py
# -*- coding: utf-8 -*- # # Copyright 2014 Thomas Amland <thomas.amland@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import time from watchdog.utils.delayed_queue import DelayedQueue def test_get(): q = DelayedQueue(2) q.put("") inserted = time() q.get() elapsed = time() - inserted assert 2.01 > elapsed > 1.99
# -*- coding: utf-8 -*- # # Copyright 2014 Thomas Amland <thomas.amland@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import os import time import pytest import logging from tests import Queue from functools import partial from .shell import mkdir, touch, mv, rm, mkdtemp from watchdog.utils import platform from watchdog.utils.unicode_paths import str_cls from watchdog.events import * from watchdog.observers.api import ObservedWatch pytestmark = pytest.mark.skipif(not platform.is_linux() and not platform.is_darwin(), reason="") if platform.is_linux(): from watchdog.observers.inotify import InotifyEmitter as Emitter elif platform.is_darwin(): from watchdog.observers.fsevents2 import FSEventsEmitter as Emitter logging.basicConfig(level=logging.DEBUG) def setup_function(function): global p, event_queue tmpdir = os.path.realpath(mkdtemp()) p = partial(os.path.join, tmpdir) event_queue = Queue() def start_watching(path=None): path = p('') if path is None else path global emitter emitter = Emitter(event_queue, ObservedWatch(path, recursive=True)) if platform.is_darwin(): # FSEvents will report old evens (like create for mkdtemp in test # setup. Waiting for a considerable time seems to 'flush' the events. time.sleep(10) emitter.start() def teardown_function(function): emitter.stop() emitter.join(5) rm(p(''), recursive=True) assert not emitter.is_alive() def test_create(): start_watching() open(p('a'), 'a').close() event = event_queue.get(timeout=5)[0] assert event.src_path == p('a') assert isinstance(event, FileCreatedEvent) event = event_queue.get(timeout=5)[0] assert os.path.normpath(event.src_path) == os.path.normpath(p('')) assert isinstance(event, DirModifiedEvent) def test_delete(): touch(p('a')) start_watching() rm(p('a')) event = event_queue.get(timeout=5)[0] assert event.src_path == p('a') assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert os.path.normpath(event.src_path) == os.path.normpath(p('')) assert isinstance(event, DirModifiedEvent) def test_modify(): touch(p('a')) start_watching() touch(p('a')) event = event_queue.get(timeout=5)[0] assert event.src_path == p('a') assert isinstance(event, FileModifiedEvent) def test_move(): mkdir(p('dir1')) mkdir(p('dir2')) touch(p('dir1', 'a')) start_watching() mv(p('dir1', 'a'), p('dir2', 'b')) event = event_queue.get(timeout=5)[0] assert event.src_path == p('dir1', 'a') assert event.dest_path == p('dir2', 'b') assert isinstance(event, FileMovedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p('dir1') assert isinstance(event, DirModifiedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p('dir2') assert isinstance(event, DirModifiedEvent) def test_move_to(): mkdir(p('dir1')) mkdir(p('dir2')) touch(p('dir1', 'a')) start_watching(p('dir2')) mv(p('dir1', 'a'), p('dir2', 'b')) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileCreatedEvent) assert event.src_path == p('dir2', 'b') def test_move_from(): mkdir(p('dir1')) mkdir(p('dir2')) touch(p('dir1', 'a')) start_watching(p('dir1')) mv(p('dir1', 'a'), p('dir2', 'b')) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileDeletedEvent) assert event.src_path == p('dir1', 'a') def test_separate_consecutive_moves(): mkdir(p('dir1')) touch(p('dir1', 'a')) touch(p('b')) start_watching(p('dir1')) mv(p('dir1', 'a'), p('c')) mv(p('b'), p('dir1', 'd')) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileDeletedEvent) assert event.src_path == p('dir1', 'a') assert isinstance(event_queue.get(timeout=5)[0], DirModifiedEvent) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileCreatedEvent) assert event.src_path == p('dir1', 'd') assert isinstance(event_queue.get(timeout=5)[0], DirModifiedEvent) @pytest.mark.skipif(platform.is_linux(), reason="bug. inotify will deadlock") def test_delete_self(): mkdir(p('dir1')) start_watching(p('dir1')) rm(p('dir1'), True) event_queue.get(timeout=5)[0] def test_passing_unicode_should_give_unicode(): start_watching(p('')) touch(p('a')) event = event_queue.get(timeout=5)[0] assert isinstance(event.src_path, str_cls) def test_passing_bytes_should_give_bytes(): start_watching(p('').encode()) touch(p('a')) event = event_queue.get(timeout=5)[0] assert isinstance(event.src_path, bytes)
mconstantin/watchdog
tests/test_emitter.py
tests/test_delayed_queue.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import sys import traceback import warnings import socketserver import xmlrpc.client as xmlrpc from xmlrpc.server import SimpleXMLRPCRequestHandler, SimpleXMLRPCServer from .constants import SAMP_ICON from .errors import SAMPWarning __all__ = [] class SAMPSimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): """ XMLRPC handler of Standard Profile requests. """ def do_GET(self): if self.path == '/samp/icon': self.send_response(200, 'OK') self.send_header('Content-Type', 'image/png') self.end_headers() self.wfile.write(SAMP_ICON) def do_POST(self): """ Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's ``_dispatch`` method for handling. """ # Check that the path is legal if not self.is_rpc_path_valid(): self.report_404() return try: # Get arguments by reading body of request. # We read this in chunks to avoid straining # socket.read(); around the 10 or 15Mb mark, some platforms # begin to have problems (bug #792570). max_chunk_size = 10 * 1024 * 1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) L.append(self.rfile.read(chunk_size)) size_remaining -= len(L[-1]) data = b''.join(L) params, method = xmlrpc.loads(data) if method == "samp.webhub.register": params = list(params) params.append(self.client_address) if 'Origin' in self.headers: params.append(self.headers.get('Origin')) else: params.append('unknown') params = tuple(params) data = xmlrpc.dumps(params, methodname=method) elif method in ('samp.hub.notify', 'samp.hub.notifyAll', 'samp.hub.call', 'samp.hub.callAll', 'samp.hub.callAndWait'): user = "unknown" if method == 'samp.hub.callAndWait': params[2]["host"] = self.address_string() params[2]["user"] = user else: params[-1]["host"] = self.address_string() params[-1]["user"] = user data = xmlrpc.dumps(params, methodname=method) data = self.decode_request_content(data) if data is None: return # response has been sent # In previous versions of SimpleXMLRPCServer, _dispatch # could be overridden in this class, instead of in # SimpleXMLRPCDispatcher. To maintain backwards compatibility, # check to see if a subclass implements _dispatch and dispatch # using that method if present. response = self.server._marshaled_dispatch( data, getattr(self, '_dispatch', None), self.path ) except Exception as e: # This should only happen if the module is buggy # internal error, report as HTTP server error self.send_response(500) # Send information about the exception if requested if hasattr(self.server, '_send_traceback_header') and \ self.server._send_traceback_header: self.send_header("X-exception", str(e)) trace = traceback.format_exc() trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') self.send_header("X-traceback", trace) self.send_header("Content-length", "0") self.end_headers() else: # got a valid XML RPC response self.send_response(200) self.send_header("Content-type", "text/xml") if self.encode_threshold is not None: if len(response) > self.encode_threshold: q = self.accept_encodings().get("gzip", 0) if q: try: response = xmlrpc.gzip_encode(response) self.send_header("Content-Encoding", "gzip") except NotImplementedError: pass self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response) class ThreadingXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer): """ Asynchronous multithreaded XMLRPC server. """ def __init__(self, addr, log=None, requestHandler=SAMPSimpleXMLRPCRequestHandler, logRequests=True, allow_none=True, encoding=None): self.log = log SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, encoding) def handle_error(self, request, client_address): if self.log is None: socketserver.BaseServer.handle_error(self, request, client_address) else: warnings.warn("Exception happened during processing of request " "from {}: {}".format(client_address, sys.exc_info()[1]), SAMPWarning)
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import pytest import numpy as np from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy import units as u from astropy.nddata.nddata import NDData from astropy.nddata.decorators import support_nddata class CCDData(NDData): pass @support_nddata def wrapped_function_1(data, wcs=None, unit=None): return data, wcs, unit def test_pass_numpy(): data_in = np.array([1, 2, 3]) data_out, wcs_out, unit_out = wrapped_function_1(data=data_in) assert data_out is data_in assert wcs_out is None assert unit_out is None def test_pass_all_separate(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata_and_explicit(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy unit_in_alt = u.mJy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in_alt assert len(w) == 1 assert str(w[0].message) == ("Property unit has been passed explicitly and as " "an NDData property, using explicitly specified value") def test_pass_nddata_ignored(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0]) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in assert len(w) == 1 assert str(w[0].message) == ("The following attributes were set on the data " "object, but will be ignored by the function: mask") def test_incorrect_first_argument(): with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_2(something, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_3(something, data, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_4(wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" def test_wrap_function_no_kwargs(): @support_nddata def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in def test_wrap_function_repack_valid(): @support_nddata(repack=True, returns=['data']) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) nddata_out = wrapped_function_5(nddata_in, [1, 2, 3]) assert isinstance(nddata_out, NDData) assert nddata_out.data is data_in def test_wrap_function_accepts(): class MyData(NDData): pass @support_nddata(accepts=MyData) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) mydata_in = MyData(data_in) assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in with pytest.raises(TypeError) as exc: wrapped_function_5(nddata_in, [1, 2, 3]) assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function" def test_wrap_preserve_signature_docstring(): @support_nddata def wrapped_function_6(data, wcs=None, unit=None): """ An awesome function """ pass if wrapped_function_6.__doc__ is not None: assert wrapped_function_6.__doc__.strip() == "An awesome function" signature = inspect.signature(wrapped_function_6) assert str(signature) == "(data, wcs=None, unit=None)" def test_setup_failures1(): # repack but no returns with pytest.raises(ValueError): support_nddata(repack=True) def test_setup_failures2(): # returns but no repack with pytest.raises(ValueError): support_nddata(returns=['data']) def test_setup_failures9(): # keeps but no repack with pytest.raises(ValueError): support_nddata(keeps=['unit']) def test_setup_failures3(): # same attribute in keeps and returns with pytest.raises(ValueError): support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask']) def test_setup_failures4(): # function accepts *args with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures10(): # function accepts **kwargs with pytest.raises(ValueError): @support_nddata def test(data, **kwargs): pass def test_setup_failures5(): # function accepts *args (or **kwargs) with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures6(): # First argument is not data with pytest.raises(ValueError): @support_nddata def test(img): pass def test_setup_failures7(): # accepts CCDData but was given just an NDData with pytest.raises(TypeError): @support_nddata(accepts=CCDData) def test(data): pass test(NDData(np.ones((3, 3)))) def test_setup_failures8(): # function returns a different amount of arguments than specified. Using # NDData here so we don't get into troubles when creating a CCDData without # unit! with pytest.raises(ValueError): @support_nddata(repack=True, returns=['data', 'mask']) def test(data): return 10 test(NDData(np.ones((3, 3)))) # do NOT use CCDData here. def test_setup_failures11(): # function accepts no arguments with pytest.raises(ValueError): @support_nddata def test(): pass def test_setup_numpyarray_default(): # It should be possible (even if it's not advisable to use mutable # defaults) to have a numpy array as default value. @support_nddata def func(data, wcs=np.array([1, 2, 3])): return wcs def test_still_accepts_other_input(): @support_nddata(repack=True, returns=['data']) def test(data): return data assert isinstance(test(NDData(np.ones((3, 3)))), NDData) assert isinstance(test(10), int) assert isinstance(test([1, 2, 3]), list) def test_accepting_property_normal(): # Accepts a mask attribute and takes it from the input @support_nddata def test(data, mask=None): return mask ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, mask=10) == 10 assert len(w) == 1 def test_parameter_default_identical_to_explicit_passed_argument(): # If the default is identical to the explicitly passed argument this # should still raise a Warning and use the explicit one. @support_nddata def func(data, wcs=[1, 2, 3]): return wcs with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3] assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2])) == [1, 2] assert len(w) == 0 def test_accepting_property_notexist(): # Accepts flags attribute but NDData doesn't have one @support_nddata def test(data, flags=10): return flags ndd = NDData(np.ones((3, 3))) test(ndd) def test_accepting_property_translated(): # Accepts a error attribute and we want to pass in uncertainty! @support_nddata(mask='masked') def test(data, masked=None): return masked ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, masked=10) == 10 assert len(w) == 1 def test_accepting_property_meta_empty(): # Meta is always set (OrderedDict) so it has a special case that it's # ignored if it's empty but not None @support_nddata def test(data, meta=None): return meta ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._meta = {'a': 10} assert test(ndd) == {'a': 10}
bsipocz/astropy
astropy/nddata/tests/test_decorators.py
astropy/samp/standard_profile.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy import units as u from .wcs import WCS, WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_CELESTIAL __doctest_skip__ = ['wcs_to_celestial_frame', 'celestial_frame_to_wcs'] __all__ = ['add_stokes_axis_to_wcs', 'celestial_frame_to_wcs', 'wcs_to_celestial_frame', 'proj_plane_pixel_scales', 'proj_plane_pixel_area', 'is_proj_plane_distorted', 'non_celestial_pixel_scales', 'skycoord_to_pixel', 'pixel_to_skycoord', 'custom_wcs_to_frame_mappings', 'custom_frame_to_wcs_mappings'] def add_stokes_axis_to_wcs(wcs, add_before_ind): """ Add a new Stokes axis that is uncorrelated with any other axes. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to add to add_before_ind : int Index of the WCS to insert the new Stokes axis in front of. To add at the end, do add_before_ind = wcs.wcs.naxis The beginning is at position 0. Returns ------- A new `~astropy.wcs.WCS` instance with an additional axis """ inds = [i + 1 for i in range(wcs.wcs.naxis)] inds.insert(add_before_ind, 0) newwcs = wcs.sub(inds) newwcs.wcs.ctype[add_before_ind] = 'STOKES' newwcs.wcs.cname[add_before_ind] = 'STOKES' return newwcs def _wcs_to_celestial_frame_builtin(wcs): # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic # Import astropy.time here otherwise setup.py fails before extensions are compiled from astropy.time import Time if wcs.wcs.lng == -1 or wcs.wcs.lat == -1: return None radesys = wcs.wcs.radesys if np.isnan(wcs.wcs.equinox): equinox = None else: equinox = wcs.wcs.equinox xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4] ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4] # Apply logic from FITS standard to determine the default radesys if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-': if equinox is None: radesys = "ICRS" elif equinox < 1984.: radesys = "FK4" else: radesys = "FK5" if radesys == 'FK4': if equinox is not None: equinox = Time(equinox, format='byear') frame = FK4(equinox=equinox) elif radesys == 'FK4-NO-E': if equinox is not None: equinox = Time(equinox, format='byear') frame = FK4NoETerms(equinox=equinox) elif radesys == 'FK5': if equinox is not None: equinox = Time(equinox, format='jyear') frame = FK5(equinox=equinox) elif radesys == 'ICRS': frame = ICRS() else: if xcoord == 'GLON' and ycoord == 'GLAT': frame = Galactic() elif xcoord == 'TLON' and ycoord == 'TLAT': frame = ITRS(obstime=wcs.wcs.dateobs or None) else: frame = None return frame def _celestial_frame_to_wcs_builtin(frame, projection='TAN'): # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import BaseRADecFrame, FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic # Create a 2-dimensional WCS wcs = WCS(naxis=2) if isinstance(frame, BaseRADecFrame): xcoord = 'RA--' ycoord = 'DEC-' if isinstance(frame, ICRS): wcs.wcs.radesys = 'ICRS' elif isinstance(frame, FK4NoETerms): wcs.wcs.radesys = 'FK4-NO-E' wcs.wcs.equinox = frame.equinox.byear elif isinstance(frame, FK4): wcs.wcs.radesys = 'FK4' wcs.wcs.equinox = frame.equinox.byear elif isinstance(frame, FK5): wcs.wcs.radesys = 'FK5' wcs.wcs.equinox = frame.equinox.jyear else: return None elif isinstance(frame, Galactic): xcoord = 'GLON' ycoord = 'GLAT' elif isinstance(frame, ITRS): xcoord = 'TLON' ycoord = 'TLAT' wcs.wcs.radesys = 'ITRS' wcs.wcs.dateobs = frame.obstime.utc.isot else: return None wcs.wcs.ctype = [xcoord + '-' + projection, ycoord + '-' + projection] return wcs WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]] FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]] class custom_wcs_to_frame_mappings: def __init__(self, mappings=[]): if hasattr(mappings, '__call__'): mappings = [mappings] WCS_FRAME_MAPPINGS.append(mappings) def __enter__(self): pass def __exit__(self, type, value, tb): WCS_FRAME_MAPPINGS.pop() # Backward-compatibility custom_frame_mappings = custom_wcs_to_frame_mappings class custom_frame_to_wcs_mappings: def __init__(self, mappings=[]): if hasattr(mappings, '__call__'): mappings = [mappings] FRAME_WCS_MAPPINGS.append(mappings) def __enter__(self): pass def __exit__(self, type, value, tb): FRAME_WCS_MAPPINGS.pop() def wcs_to_celestial_frame(wcs): """ For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings >>> with custom_wcs_to_frame_mappings(my_function): ... wcs_to_celestial_frame(...) """ for mapping_set in WCS_FRAME_MAPPINGS: for func in mapping_set: frame = func(wcs) if frame is not None: return frame raise ValueError("Could not determine celestial frame corresponding to " "the specified WCS object") def celestial_frame_to_wcs(frame, projection='TAN'): """ For a given coordinate frame, return the corresponding WCS object. Note that the returned WCS object has only the elements corresponding to coordinate frames set (e.g. ctype, equinox, radesys). Parameters ---------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance for which to find the WCS projection : str Projection code to use in ctype, if applicable Returns ------- wcs : :class:`~astropy.wcs.WCS` instance The corresponding WCS object Examples -------- :: >>> from astropy.wcs.utils import celestial_frame_to_wcs >>> from astropy.coordinates import FK5 >>> frame = FK5(equinox='J2010') >>> wcs = celestial_frame_to_wcs(frame) >>> wcs.to_header() WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / [deg] Coordinate increment at reference point CDELT2 = 1.0 / [deg] Coordinate increment at reference point CUNIT1 = 'deg' / Units of coordinate increment and value CUNIT2 = 'deg' / Units of coordinate increment and value CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection CRVAL1 = 0.0 / [deg] Coordinate value at reference point CRVAL2 = 0.0 / [deg] Coordinate value at reference point LONPOLE = 180.0 / [deg] Native longitude of celestial pole LATPOLE = 0.0 / [deg] Native latitude of celestial pole RADESYS = 'FK5' / Equatorial coordinate system EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance and a projection (given as a string) and should return either a WCS instance, or `None` if the WCS could not be determined. You can register this function temporarily with:: >>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings >>> with custom_frame_to_wcs_mappings(my_function): ... celestial_frame_to_wcs(...) """ for mapping_set in FRAME_WCS_MAPPINGS: for func in mapping_set: wcs = func(frame, projection=projection) if wcs is not None: return wcs raise ValueError("Could not determine WCS corresponding to the specified " "coordinate frame.") def proj_plane_pixel_scales(wcs): """ For a WCS returns pixel scales along each axis of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the scales corresponding to celestial axes only, make sure that the input `~astropy.wcs.WCS` object contains celestial axes only, e.g., by passing in the `~astropy.wcs.WCS.celestial` WCS object. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- scale : `~numpy.ndarray` A vector (`~numpy.ndarray`) of projection plane increments corresponding to each pixel side (axis). The units of the returned results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the input `~astropy.wcs.WCS` WCS object. See Also -------- astropy.wcs.utils.proj_plane_pixel_area """ return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float)) def proj_plane_pixel_area(wcs): """ For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel area of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the area of pixels corresponding to celestial axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS object of the input ``wcs``. This is different from the `~astropy.wcs.utils.proj_plane_pixel_scales` function that computes the scales for the axes of the input WCS itself. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- area : float Area (in the projection plane) of the pixel at ``CRPIX`` location. The units of the returned result are the same as the units of the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the `~astropy.wcs.WCS.celestial` WCS object. Raises ------ ValueError Pixel area is defined only for 2D pixels. Most likely the `~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial` WCS is not a square matrix of second order. Notes ----- Depending on the application, square root of the pixel area can be used to represent a single pixel scale of an equivalent square pixel whose area is equal to the area of a generally non-square pixel. See Also -------- astropy.wcs.utils.proj_plane_pixel_scales """ psm = wcs.celestial.pixel_scale_matrix if psm.shape != (2, 2): raise ValueError("Pixel area is defined only for 2D pixels.") return np.abs(np.linalg.det(psm)) def is_proj_plane_distorted(wcs, maxerr=1.0e-5): r""" For a WCS returns `False` if square image (detector) pixels stay square when projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. It will return `True` if transformation from image (detector) coordinates to the focal plane coordinates is non-orthogonal or if WCS contains non-linear (e.g., SIP) distortions. .. note:: Since this function is concerned **only** about the transformation "image plane"->"focal plane" and **not** about the transformation "celestial sphere"->"focal plane"->"image plane", this function ignores distortions arising due to non-linear nature of most projections. Let's denote by *C* either the original or the reconstructed (from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted` verifies that the transformation from image (detector) coordinates to the focal plane coordinates is orthogonal using the following check: .. math:: \left \| \frac{C \cdot C^{\mathrm{T}}} {| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon . Parameters ---------- wcs : `~astropy.wcs.WCS` World coordinate system object maxerr : float, optional Accuracy to which the CD matrix, **normalized** such that :math:`|det(CD)|=1`, should be close to being an orthogonal matrix as described in the above equation (see :math:`\epsilon`). Returns ------- distorted : bool Returns `True` if focal (projection) plane is distorted and `False` otherwise. """ cwcs = wcs.celestial return (not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs)) def _is_cd_orthogonal(cd, maxerr): shape = cd.shape if not (len(shape) == 2 and shape[0] == shape[1]): raise ValueError("CD (or PC) matrix must be a 2D square matrix.") pixarea = np.abs(np.linalg.det(cd)) if (pixarea == 0.0): raise ValueError("CD (or PC) matrix is singular.") # NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T)) # However, I am not aware of complex CD/PC matrices... I = np.dot(cd, cd.T) / pixarea cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0]))) return (cd_unitary_err < maxerr) def non_celestial_pixel_scales(inwcs): """ Calculate the pixel scale along each axis of a non-celestial WCS, for example one with mixed spectral and spatial axes. Parameters ---------- inwcs : `~astropy.wcs.WCS` The world coordinate system object. Returns ------- scale : `numpy.ndarray` The pixel scale along each axis. """ if inwcs.is_celestial: raise ValueError("WCS is celestial, use celestial_pixel_scales instead") pccd = inwcs.pixel_scale_matrix if np.allclose(np.extract(1-np.eye(*pccd.shape), pccd), 0): return np.abs(np.diagonal(pccd))*u.deg else: raise ValueError("WCS is rotated, cannot determine consistent pixel scales") def _has_distortion(wcs): """ `True` if contains any SIP or image distortion components. """ return any(getattr(wcs, dist_attr) is not None for dist_attr in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip']) # TODO: in future, we should think about how the following two functions can be # integrated better into the WCS class. def skycoord_to_pixel(coords, wcs, origin=0, mode='all'): """ Convert a set of SkyCoord coordinates into pixels. Parameters ---------- coords : `~astropy.coordinates.SkyCoord` The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- xp, yp : `numpy.ndarray` The pixel coordinates See Also -------- astropy.coordinates.SkyCoord.from_pixel """ if _has_distortion(wcs) and wcs.naxis != 2: raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS") # Keep only the celestial part of the axes, also re-orders lon/lat wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE]) if wcs.naxis != 2: raise ValueError("WCS should contain celestial component") # Check which frame the WCS uses frame = wcs_to_celestial_frame(wcs) # Check what unit the WCS needs xw_unit = u.Unit(wcs.wcs.cunit[0]) yw_unit = u.Unit(wcs.wcs.cunit[1]) # Convert positions to frame coords = coords.transform_to(frame) # Extract longitude and latitude. We first try and use lon/lat directly, # but if the representation is not spherical or unit spherical this will # fail. We should then force the use of the unit spherical # representation. We don't do that directly to make sure that we preserve # custom lon/lat representations if available. try: lon = coords.data.lon.to(xw_unit) lat = coords.data.lat.to(yw_unit) except AttributeError: lon = coords.spherical.lon.to(xw_unit) lat = coords.spherical.lat.to(yw_unit) # Convert to pixel coordinates if mode == 'all': xp, yp = wcs.all_world2pix(lon.value, lat.value, origin) elif mode == 'wcs': xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin) else: raise ValueError("mode should be either 'all' or 'wcs'") return xp, yp def pixel_to_skycoord(xp, yp, wcs, origin=0, mode='all', cls=None): """ Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord` coordinate. Parameters ---------- xp, yp : float or `numpy.ndarray` The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). cls : class or None The class of object to create. Should be a `~astropy.coordinates.SkyCoord` subclass. If None, defaults to `~astropy.coordinates.SkyCoord`. Returns ------- coords : Whatever ``cls`` is (a subclass of `~astropy.coordinates.SkyCoord`) The celestial coordinates See Also -------- astropy.coordinates.SkyCoord.from_pixel """ # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import SkyCoord, UnitSphericalRepresentation # we have to do this instead of actually setting the default to SkyCoord # because importing SkyCoord at the module-level leads to circular # dependencies. if cls is None: cls = SkyCoord if _has_distortion(wcs) and wcs.naxis != 2: raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS") # Keep only the celestial part of the axes, also re-orders lon/lat wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE]) if wcs.naxis != 2: raise ValueError("WCS should contain celestial component") # Check which frame the WCS uses frame = wcs_to_celestial_frame(wcs) # Check what unit the WCS gives lon_unit = u.Unit(wcs.wcs.cunit[0]) lat_unit = u.Unit(wcs.wcs.cunit[1]) # Convert pixel coordinates to celestial coordinates if mode == 'all': lon, lat = wcs.all_pix2world(xp, yp, origin) elif mode == 'wcs': lon, lat = wcs.wcs_pix2world(xp, yp, origin) else: raise ValueError("mode should be either 'all' or 'wcs'") # Add units to longitude/latitude lon = lon * lon_unit lat = lat * lat_unit # Create a SkyCoord-like object data = UnitSphericalRepresentation(lon=lon, lat=lat) coords = cls(frame.realize_frame(data)) return coords
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import pytest import numpy as np from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy import units as u from astropy.nddata.nddata import NDData from astropy.nddata.decorators import support_nddata class CCDData(NDData): pass @support_nddata def wrapped_function_1(data, wcs=None, unit=None): return data, wcs, unit def test_pass_numpy(): data_in = np.array([1, 2, 3]) data_out, wcs_out, unit_out = wrapped_function_1(data=data_in) assert data_out is data_in assert wcs_out is None assert unit_out is None def test_pass_all_separate(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata_and_explicit(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy unit_in_alt = u.mJy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in_alt assert len(w) == 1 assert str(w[0].message) == ("Property unit has been passed explicitly and as " "an NDData property, using explicitly specified value") def test_pass_nddata_ignored(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0]) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in assert len(w) == 1 assert str(w[0].message) == ("The following attributes were set on the data " "object, but will be ignored by the function: mask") def test_incorrect_first_argument(): with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_2(something, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_3(something, data, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_4(wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" def test_wrap_function_no_kwargs(): @support_nddata def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in def test_wrap_function_repack_valid(): @support_nddata(repack=True, returns=['data']) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) nddata_out = wrapped_function_5(nddata_in, [1, 2, 3]) assert isinstance(nddata_out, NDData) assert nddata_out.data is data_in def test_wrap_function_accepts(): class MyData(NDData): pass @support_nddata(accepts=MyData) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) mydata_in = MyData(data_in) assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in with pytest.raises(TypeError) as exc: wrapped_function_5(nddata_in, [1, 2, 3]) assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function" def test_wrap_preserve_signature_docstring(): @support_nddata def wrapped_function_6(data, wcs=None, unit=None): """ An awesome function """ pass if wrapped_function_6.__doc__ is not None: assert wrapped_function_6.__doc__.strip() == "An awesome function" signature = inspect.signature(wrapped_function_6) assert str(signature) == "(data, wcs=None, unit=None)" def test_setup_failures1(): # repack but no returns with pytest.raises(ValueError): support_nddata(repack=True) def test_setup_failures2(): # returns but no repack with pytest.raises(ValueError): support_nddata(returns=['data']) def test_setup_failures9(): # keeps but no repack with pytest.raises(ValueError): support_nddata(keeps=['unit']) def test_setup_failures3(): # same attribute in keeps and returns with pytest.raises(ValueError): support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask']) def test_setup_failures4(): # function accepts *args with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures10(): # function accepts **kwargs with pytest.raises(ValueError): @support_nddata def test(data, **kwargs): pass def test_setup_failures5(): # function accepts *args (or **kwargs) with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures6(): # First argument is not data with pytest.raises(ValueError): @support_nddata def test(img): pass def test_setup_failures7(): # accepts CCDData but was given just an NDData with pytest.raises(TypeError): @support_nddata(accepts=CCDData) def test(data): pass test(NDData(np.ones((3, 3)))) def test_setup_failures8(): # function returns a different amount of arguments than specified. Using # NDData here so we don't get into troubles when creating a CCDData without # unit! with pytest.raises(ValueError): @support_nddata(repack=True, returns=['data', 'mask']) def test(data): return 10 test(NDData(np.ones((3, 3)))) # do NOT use CCDData here. def test_setup_failures11(): # function accepts no arguments with pytest.raises(ValueError): @support_nddata def test(): pass def test_setup_numpyarray_default(): # It should be possible (even if it's not advisable to use mutable # defaults) to have a numpy array as default value. @support_nddata def func(data, wcs=np.array([1, 2, 3])): return wcs def test_still_accepts_other_input(): @support_nddata(repack=True, returns=['data']) def test(data): return data assert isinstance(test(NDData(np.ones((3, 3)))), NDData) assert isinstance(test(10), int) assert isinstance(test([1, 2, 3]), list) def test_accepting_property_normal(): # Accepts a mask attribute and takes it from the input @support_nddata def test(data, mask=None): return mask ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, mask=10) == 10 assert len(w) == 1 def test_parameter_default_identical_to_explicit_passed_argument(): # If the default is identical to the explicitly passed argument this # should still raise a Warning and use the explicit one. @support_nddata def func(data, wcs=[1, 2, 3]): return wcs with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3] assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2])) == [1, 2] assert len(w) == 0 def test_accepting_property_notexist(): # Accepts flags attribute but NDData doesn't have one @support_nddata def test(data, flags=10): return flags ndd = NDData(np.ones((3, 3))) test(ndd) def test_accepting_property_translated(): # Accepts a error attribute and we want to pass in uncertainty! @support_nddata(mask='masked') def test(data, masked=None): return masked ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, masked=10) == 10 assert len(w) == 1 def test_accepting_property_meta_empty(): # Meta is always set (OrderedDict) so it has a special case that it's # ignored if it's empty but not None @support_nddata def test(data, meta=None): return meta ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._meta = {'a': 10} assert test(ndd) == {'a': 10}
bsipocz/astropy
astropy/nddata/tests/test_decorators.py
astropy/wcs/utils.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module includes a fast iterator-based XML parser. """ # STDLIB import contextlib import io import sys # ASTROPY from astropy.utils import data __all__ = ['get_xml_iterator', 'get_xml_encoding', 'xml_readlines'] @contextlib.contextmanager def _convert_to_fd_or_read_function(fd): """ Returns a function suitable for streaming input, or a file object. This function is only useful if passing off to C code where: - If it's a real file object, we want to use it as a real C file object to avoid the Python overhead. - If it's not a real file object, it's much handier to just have a Python function to call. This is somewhat quirky behavior, of course, which is why it is private. For a more useful version of similar behavior, see `astropy.utils.misc.get_readable_fileobj`. Parameters ---------- fd : object May be: - a file object. If the file is uncompressed, this raw file object is returned verbatim. Otherwise, the read method is returned. - a function that reads from a stream, in which case it is returned verbatim. - a file path, in which case it is opened. Again, like a file object, if it's uncompressed, a raw file object is returned, otherwise its read method. - an object with a :meth:`read` method, in which case that method is returned. Returns ------- fd : context-dependent See above. """ if callable(fd): yield fd return with data.get_readable_fileobj(fd, encoding='binary') as new_fd: if sys.platform.startswith('win'): yield new_fd.read else: if isinstance(new_fd, io.FileIO): yield new_fd else: yield new_fd.read def _fast_iterparse(fd, buffersize=2 ** 10): from xml.parsers import expat if not callable(fd): read = fd.read else: read = fd queue = [] text = [] def start(name, attr): queue.append((True, name, attr, (parser.CurrentLineNumber, parser.CurrentColumnNumber))) del text[:] def end(name): queue.append((False, name, ''.join(text).strip(), (parser.CurrentLineNumber, parser.CurrentColumnNumber))) parser = expat.ParserCreate() parser.specified_attributes = True parser.StartElementHandler = start parser.EndElementHandler = end parser.CharacterDataHandler = text.append Parse = parser.Parse data = read(buffersize) while data: Parse(data, False) for elem in queue: yield elem del queue[:] data = read(buffersize) Parse('', True) for elem in queue: yield elem # Try to import the C version of the iterparser, otherwise fall back # to the Python implementation above. _slow_iterparse = _fast_iterparse try: from . import _iterparser _fast_iterparse = _iterparser.IterParser except ImportError: pass @contextlib.contextmanager def get_xml_iterator(source, _debug_python_based_parser=False): """ Returns an iterator over the elements of an XML file. The iterator doesn't ever build a tree, so it is much more memory and time efficient than the alternative in ``cElementTree``. Parameters ---------- fd : readable file-like object or read function Returns ------- parts : iterator The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*): - *start*: when `True` is a start element event, otherwise an end element event. - *tag*: The name of the element - *data*: Depends on the value of *event*: - if *start* == `True`, data is a dictionary of attributes - if *start* == `False`, data is a string containing the text content of the element - *pos*: Tuple (*line*, *col*) indicating the source of the event. """ with _convert_to_fd_or_read_function(source) as fd: if _debug_python_based_parser: context = _slow_iterparse(fd) else: context = _fast_iterparse(fd) yield iter(context) def get_xml_encoding(source): """ Determine the encoding of an XML file by reading its header. Parameters ---------- source : readable file-like object, read function or str path Returns ------- encoding : str """ with get_xml_iterator(source) as iterator: start, tag, data, pos = next(iterator) if not start or tag != 'xml': raise OSError('Invalid XML file') # The XML spec says that no encoding === utf-8 return data.get('encoding') or 'utf-8' def xml_readlines(source): """ Get the lines from a given XML file. Correctly determines the encoding and always returns unicode. Parameters ---------- source : readable file-like object, read function or str path Returns ------- lines : list of unicode """ encoding = get_xml_encoding(source) with data.get_readable_fileobj(source, encoding=encoding) as input: input.seek(0) xml_lines = input.readlines() return xml_lines
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import pytest import numpy as np from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy import units as u from astropy.nddata.nddata import NDData from astropy.nddata.decorators import support_nddata class CCDData(NDData): pass @support_nddata def wrapped_function_1(data, wcs=None, unit=None): return data, wcs, unit def test_pass_numpy(): data_in = np.array([1, 2, 3]) data_out, wcs_out, unit_out = wrapped_function_1(data=data_in) assert data_out is data_in assert wcs_out is None assert unit_out is None def test_pass_all_separate(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata_and_explicit(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy unit_in_alt = u.mJy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in_alt assert len(w) == 1 assert str(w[0].message) == ("Property unit has been passed explicitly and as " "an NDData property, using explicitly specified value") def test_pass_nddata_ignored(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0]) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in assert len(w) == 1 assert str(w[0].message) == ("The following attributes were set on the data " "object, but will be ignored by the function: mask") def test_incorrect_first_argument(): with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_2(something, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_3(something, data, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_4(wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" def test_wrap_function_no_kwargs(): @support_nddata def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in def test_wrap_function_repack_valid(): @support_nddata(repack=True, returns=['data']) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) nddata_out = wrapped_function_5(nddata_in, [1, 2, 3]) assert isinstance(nddata_out, NDData) assert nddata_out.data is data_in def test_wrap_function_accepts(): class MyData(NDData): pass @support_nddata(accepts=MyData) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) mydata_in = MyData(data_in) assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in with pytest.raises(TypeError) as exc: wrapped_function_5(nddata_in, [1, 2, 3]) assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function" def test_wrap_preserve_signature_docstring(): @support_nddata def wrapped_function_6(data, wcs=None, unit=None): """ An awesome function """ pass if wrapped_function_6.__doc__ is not None: assert wrapped_function_6.__doc__.strip() == "An awesome function" signature = inspect.signature(wrapped_function_6) assert str(signature) == "(data, wcs=None, unit=None)" def test_setup_failures1(): # repack but no returns with pytest.raises(ValueError): support_nddata(repack=True) def test_setup_failures2(): # returns but no repack with pytest.raises(ValueError): support_nddata(returns=['data']) def test_setup_failures9(): # keeps but no repack with pytest.raises(ValueError): support_nddata(keeps=['unit']) def test_setup_failures3(): # same attribute in keeps and returns with pytest.raises(ValueError): support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask']) def test_setup_failures4(): # function accepts *args with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures10(): # function accepts **kwargs with pytest.raises(ValueError): @support_nddata def test(data, **kwargs): pass def test_setup_failures5(): # function accepts *args (or **kwargs) with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures6(): # First argument is not data with pytest.raises(ValueError): @support_nddata def test(img): pass def test_setup_failures7(): # accepts CCDData but was given just an NDData with pytest.raises(TypeError): @support_nddata(accepts=CCDData) def test(data): pass test(NDData(np.ones((3, 3)))) def test_setup_failures8(): # function returns a different amount of arguments than specified. Using # NDData here so we don't get into troubles when creating a CCDData without # unit! with pytest.raises(ValueError): @support_nddata(repack=True, returns=['data', 'mask']) def test(data): return 10 test(NDData(np.ones((3, 3)))) # do NOT use CCDData here. def test_setup_failures11(): # function accepts no arguments with pytest.raises(ValueError): @support_nddata def test(): pass def test_setup_numpyarray_default(): # It should be possible (even if it's not advisable to use mutable # defaults) to have a numpy array as default value. @support_nddata def func(data, wcs=np.array([1, 2, 3])): return wcs def test_still_accepts_other_input(): @support_nddata(repack=True, returns=['data']) def test(data): return data assert isinstance(test(NDData(np.ones((3, 3)))), NDData) assert isinstance(test(10), int) assert isinstance(test([1, 2, 3]), list) def test_accepting_property_normal(): # Accepts a mask attribute and takes it from the input @support_nddata def test(data, mask=None): return mask ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, mask=10) == 10 assert len(w) == 1 def test_parameter_default_identical_to_explicit_passed_argument(): # If the default is identical to the explicitly passed argument this # should still raise a Warning and use the explicit one. @support_nddata def func(data, wcs=[1, 2, 3]): return wcs with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3] assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2])) == [1, 2] assert len(w) == 0 def test_accepting_property_notexist(): # Accepts flags attribute but NDData doesn't have one @support_nddata def test(data, flags=10): return flags ndd = NDData(np.ones((3, 3))) test(ndd) def test_accepting_property_translated(): # Accepts a error attribute and we want to pass in uncertainty! @support_nddata(mask='masked') def test(data, masked=None): return masked ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, masked=10) == 10 assert len(w) == 1 def test_accepting_property_meta_empty(): # Meta is always set (OrderedDict) so it has a special case that it's # ignored if it's empty but not None @support_nddata def test(data, meta=None): return meta ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._meta = {'a': 10} assert test(ndd) == {'a': 10}
bsipocz/astropy
astropy/nddata/tests/test_decorators.py
astropy/utils/xml/iterparser.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import numpy as np from asdf import tagged from asdf import yamlutil from asdf.tags.core.ndarray import NDArrayType from astropy import table from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType class TableType: """ This class defines to_tree and from_tree methods that are used by both the AstropyTableType and the AsdfTableType defined below. The behavior is differentiated by the ``_compat`` class attribute. When ``_compat==True``, the behavior will conform to the table schema defined by the ASDF Standard. Otherwise, the behavior will conform to the custom table schema defined by Astropy. """ _compat = False @classmethod def from_tree(cls, node, ctx): # This is getting meta, guys meta = node.get('meta', {}) # This enables us to support files that use the table definition from # the ASDF Standard, rather than the custom one that Astropy defines. if cls._compat: columns = [ yamlutil.tagged_tree_to_custom_tree(col, ctx) for col in node['columns'] ] return table.Table(columns, meta=meta) if node.get('qtable', False): t = table.QTable(meta=node.get('meta', {})) else: t = table.Table(meta=node.get('meta', {})) for name, col in zip(node['colnames'], node['columns']): t[name] = yamlutil.tagged_tree_to_custom_tree(col, ctx) return t @classmethod def to_tree(cls, data, ctx): columns = [] for name in data.colnames: thiscol = data[name] column = yamlutil.custom_tree_to_tagged_tree(thiscol, ctx) columns.append(column) node = dict(columns=columns) # Files that use the table definition from the ASDF Standard (instead # of the one defined by Astropy) will not contain these fields if not cls._compat: node['colnames'] = data.colnames node['qtable'] = isinstance(data, table.QTable) if data.meta: node['meta'] = data.meta return node @classmethod def assert_equal(cls, old, new): assert old.meta == new.meta try: NDArrayType.assert_equal(np.array(old), np.array(new)) except (AttributeError, TypeError, ValueError): for col0, col1 in zip(old, new): try: NDArrayType.assert_equal(np.array(col0), np.array(col1)) except (AttributeError, TypeError, ValueError): assert col0 == col1 class AstropyTableType(TableType, AstropyType): """ This tag class reads and writes tables that conform to the custom schema that is defined by Astropy (in contrast to the one that is defined by the ASDF Standard). The primary reason for differentiating is to enable the support of Astropy mixin columns, which are not supported by the ASDF Standard. """ name = 'table/table' types = ['astropy.table.Table'] requires = ['astropy'] class AsdfTableType(TableType, AstropyAsdfType): """ This tag class allows Astropy to read (and write) ASDF files that use the table definition that is provided by the ASDF Standard (instead of the custom one defined by Astropy). This is important to maintain for cross-compatibility. """ name = 'core/table' types = ['astropy.table.Table'] requires = ['astropy'] _compat = True class ColumnType(AstropyAsdfType): name = 'core/column' types = ['astropy.table.Column', 'astropy.table.MaskedColumn'] requires = ['astropy'] handle_dynamic_subclasses = True @classmethod def from_tree(cls, node, ctx): data = yamlutil.tagged_tree_to_custom_tree( node['data'], ctx) name = node['name'] description = node.get('description') unit = node.get('unit') meta = node.get('meta', None) return table.Column( data=data._make_array(), name=name, description=description, unit=unit, meta=meta) @classmethod def to_tree(cls, data, ctx): node = { 'data': yamlutil.custom_tree_to_tagged_tree( data.data, ctx), 'name': data.name } if data.description: node['description'] = data.description if data.unit: node['unit'] = yamlutil.custom_tree_to_tagged_tree( data.unit, ctx) if data.meta: node['meta'] = data.meta return node @classmethod def assert_equal(cls, old, new): assert old.meta == new.meta assert old.description == new.description assert old.unit == new.unit NDArrayType.assert_equal(np.array(old), np.array(new))
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import pytest import numpy as np from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy import units as u from astropy.nddata.nddata import NDData from astropy.nddata.decorators import support_nddata class CCDData(NDData): pass @support_nddata def wrapped_function_1(data, wcs=None, unit=None): return data, wcs, unit def test_pass_numpy(): data_in = np.array([1, 2, 3]) data_out, wcs_out, unit_out = wrapped_function_1(data=data_in) assert data_out is data_in assert wcs_out is None assert unit_out is None def test_pass_all_separate(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata_and_explicit(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy unit_in_alt = u.mJy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in_alt assert len(w) == 1 assert str(w[0].message) == ("Property unit has been passed explicitly and as " "an NDData property, using explicitly specified value") def test_pass_nddata_ignored(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0]) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in assert len(w) == 1 assert str(w[0].message) == ("The following attributes were set on the data " "object, but will be ignored by the function: mask") def test_incorrect_first_argument(): with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_2(something, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_3(something, data, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_4(wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" def test_wrap_function_no_kwargs(): @support_nddata def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in def test_wrap_function_repack_valid(): @support_nddata(repack=True, returns=['data']) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) nddata_out = wrapped_function_5(nddata_in, [1, 2, 3]) assert isinstance(nddata_out, NDData) assert nddata_out.data is data_in def test_wrap_function_accepts(): class MyData(NDData): pass @support_nddata(accepts=MyData) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) mydata_in = MyData(data_in) assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in with pytest.raises(TypeError) as exc: wrapped_function_5(nddata_in, [1, 2, 3]) assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function" def test_wrap_preserve_signature_docstring(): @support_nddata def wrapped_function_6(data, wcs=None, unit=None): """ An awesome function """ pass if wrapped_function_6.__doc__ is not None: assert wrapped_function_6.__doc__.strip() == "An awesome function" signature = inspect.signature(wrapped_function_6) assert str(signature) == "(data, wcs=None, unit=None)" def test_setup_failures1(): # repack but no returns with pytest.raises(ValueError): support_nddata(repack=True) def test_setup_failures2(): # returns but no repack with pytest.raises(ValueError): support_nddata(returns=['data']) def test_setup_failures9(): # keeps but no repack with pytest.raises(ValueError): support_nddata(keeps=['unit']) def test_setup_failures3(): # same attribute in keeps and returns with pytest.raises(ValueError): support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask']) def test_setup_failures4(): # function accepts *args with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures10(): # function accepts **kwargs with pytest.raises(ValueError): @support_nddata def test(data, **kwargs): pass def test_setup_failures5(): # function accepts *args (or **kwargs) with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures6(): # First argument is not data with pytest.raises(ValueError): @support_nddata def test(img): pass def test_setup_failures7(): # accepts CCDData but was given just an NDData with pytest.raises(TypeError): @support_nddata(accepts=CCDData) def test(data): pass test(NDData(np.ones((3, 3)))) def test_setup_failures8(): # function returns a different amount of arguments than specified. Using # NDData here so we don't get into troubles when creating a CCDData without # unit! with pytest.raises(ValueError): @support_nddata(repack=True, returns=['data', 'mask']) def test(data): return 10 test(NDData(np.ones((3, 3)))) # do NOT use CCDData here. def test_setup_failures11(): # function accepts no arguments with pytest.raises(ValueError): @support_nddata def test(): pass def test_setup_numpyarray_default(): # It should be possible (even if it's not advisable to use mutable # defaults) to have a numpy array as default value. @support_nddata def func(data, wcs=np.array([1, 2, 3])): return wcs def test_still_accepts_other_input(): @support_nddata(repack=True, returns=['data']) def test(data): return data assert isinstance(test(NDData(np.ones((3, 3)))), NDData) assert isinstance(test(10), int) assert isinstance(test([1, 2, 3]), list) def test_accepting_property_normal(): # Accepts a mask attribute and takes it from the input @support_nddata def test(data, mask=None): return mask ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, mask=10) == 10 assert len(w) == 1 def test_parameter_default_identical_to_explicit_passed_argument(): # If the default is identical to the explicitly passed argument this # should still raise a Warning and use the explicit one. @support_nddata def func(data, wcs=[1, 2, 3]): return wcs with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3] assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2])) == [1, 2] assert len(w) == 0 def test_accepting_property_notexist(): # Accepts flags attribute but NDData doesn't have one @support_nddata def test(data, flags=10): return flags ndd = NDData(np.ones((3, 3))) test(ndd) def test_accepting_property_translated(): # Accepts a error attribute and we want to pass in uncertainty! @support_nddata(mask='masked') def test(data, masked=None): return masked ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, masked=10) == 10 assert len(w) == 1 def test_accepting_property_meta_empty(): # Meta is always set (OrderedDict) so it has a special case that it's # ignored if it's empty but not None @support_nddata def test(data, meta=None): return meta ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._meta = {'a': 10} assert test(ndd) == {'a': 10}
bsipocz/astropy
astropy/nddata/tests/test_decorators.py
astropy/io/misc/asdf/tags/table/table.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy import numpy as np from astropy import units as u from astropy.tests.helper import (catch_warnings, pytest, assert_quantity_allclose as assert_allclose) from astropy.utils import OrderedDescriptorContainer from astropy.utils.compat import NUMPY_LT_1_14 from astropy.utils.exceptions import AstropyWarning from astropy.coordinates import representation as r from astropy.coordinates.representation import REPRESENTATION_CLASSES from astropy.units import allclose from .test_representation import unitphysics # this fixture is used below def setup_function(func): func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) def teardown_function(func): REPRESENTATION_CLASSES.clear() REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) def test_frame_attribute_descriptor(): """ Unit tests of the Attribute descriptor """ from astropy.coordinates.attributes import Attribute class TestAttributes(metaclass=OrderedDescriptorContainer): attr_none = Attribute() attr_2 = Attribute(default=2) attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2') attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2') attr_none_nonexist = Attribute(default=None, secondary_attribute='nonexist') t = TestAttributes() # Defaults assert t.attr_none is None assert t.attr_2 == 2 assert t.attr_3_attr2 == 3 assert t.attr_none_attr2 == t.attr_2 assert t.attr_none_nonexist is None # No default and non-existent secondary attr # Setting values via '_'-prefixed internal vars (as would normally done in __init__) t._attr_none = 10 assert t.attr_none == 10 t._attr_2 = 20 assert t.attr_2 == 20 assert t.attr_3_attr2 == 3 assert t.attr_none_attr2 == t.attr_2 t._attr_none_attr2 = 40 assert t.attr_none_attr2 == 40 # Make sure setting values via public attribute fails with pytest.raises(AttributeError) as err: t.attr_none = 5 assert 'Cannot set frame attribute' in str(err.value) def test_frame_subclass_attribute_descriptor(): from astropy.coordinates.builtin_frames import FK4 from astropy.coordinates.attributes import Attribute, TimeAttribute from astropy.time import Time _EQUINOX_B1980 = Time('B1980', scale='tai') class MyFK4(FK4): # equinox inherited from FK4, obstime overridden, and newattr is new obstime = TimeAttribute(default=_EQUINOX_B1980) newattr = Attribute(default='newattr') mfk4 = MyFK4() assert mfk4.equinox.value == 'B1950.000' assert mfk4.obstime.value == 'B1980.000' assert mfk4.newattr == 'newattr' assert set(mfk4.get_frame_attr_names()) == set(['equinox', 'obstime', 'newattr']) mfk4 = MyFK4(equinox='J1980.0', obstime='J1990.0', newattr='world') assert mfk4.equinox.value == 'J1980.000' assert mfk4.obstime.value == 'J1990.000' assert mfk4.newattr == 'world' def test_create_data_frames(): from astropy.coordinates.builtin_frames import ICRS # from repr i1 = ICRS(r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)) i2 = ICRS(r.UnitSphericalRepresentation(lon=1*u.deg, lat=2*u.deg)) # from preferred name i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc) i4 = ICRS(ra=1*u.deg, dec=2*u.deg) assert i1.data.lat == i3.data.lat assert i1.data.lon == i3.data.lon assert i1.data.distance == i3.data.distance assert i2.data.lat == i4.data.lat assert i2.data.lon == i4.data.lon # now make sure the preferred names work as properties assert_allclose(i1.ra, i3.ra) assert_allclose(i2.ra, i4.ra) assert_allclose(i1.distance, i3.distance) with pytest.raises(AttributeError): i1.ra = [11.]*u.deg def test_create_orderered_data(): from astropy.coordinates.builtin_frames import ICRS, Galactic, AltAz TOL = 1e-10*u.deg i = ICRS(1*u.deg, 2*u.deg) assert (i.ra - 1*u.deg) < TOL assert (i.dec - 2*u.deg) < TOL g = Galactic(1*u.deg, 2*u.deg) assert (g.l - 1*u.deg) < TOL assert (g.b - 2*u.deg) < TOL a = AltAz(1*u.deg, 2*u.deg) assert (a.az - 1*u.deg) < TOL assert (a.alt - 2*u.deg) < TOL with pytest.raises(TypeError): ICRS(1*u.deg, 2*u.deg, 1*u.deg, 2*u.deg) with pytest.raises(TypeError): sph = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc) ICRS(sph, 1*u.deg, 2*u.deg) def test_create_nodata_frames(): from astropy.coordinates.builtin_frames import ICRS, FK4, FK5 i = ICRS() assert len(i.get_frame_attr_names()) == 0 f5 = FK5() assert f5.equinox == FK5.get_frame_attr_names()['equinox'] f4 = FK4() assert f4.equinox == FK4.get_frame_attr_names()['equinox'] # obstime is special because it's a property that uses equinox if obstime is not set assert f4.obstime in (FK4.get_frame_attr_names()['obstime'], FK4.get_frame_attr_names()['equinox']) def test_no_data_nonscalar_frames(): from astropy.coordinates.builtin_frames import AltAz from astropy.time import Time a1 = AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day, temperature=np.ones((3, 1)) * u.deg_C) assert a1.obstime.shape == (3, 10) assert a1.temperature.shape == (3, 10) assert a1.shape == (3, 10) with pytest.raises(ValueError) as exc: AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day, temperature=np.ones((3,)) * u.deg_C) assert 'inconsistent shapes' in str(exc.value) def test_frame_repr(): from astropy.coordinates.builtin_frames import ICRS, FK5 i = ICRS() assert repr(i) == '<ICRS Frame>' f5 = FK5() assert repr(f5).startswith('<FK5 Frame (equinox=') i2 = ICRS(ra=1*u.deg, dec=2*u.deg) i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc) assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n' ' ({})>').format(' 1., 2.' if NUMPY_LT_1_14 else '1., 2.') assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n' ' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14 else '1., 2., 3.') # try with arrays i2 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[2.1, 3.1]*u.deg) i3 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[-15.6, 17.1]*u.deg, distance=[11., 21.]*u.kpc) assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n' ' [{}]>').format('( 1.1, 2.1), ( 2.1, 3.1)' if NUMPY_LT_1_14 else '(1.1, 2.1), (2.1, 3.1)') if NUMPY_LT_1_14: assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n' ' [( 1.1, -15.6, 11.), ( 2.1, 17.1, 21.)]>') else: assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n' ' [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>') def test_frame_repr_vels(): from astropy.coordinates.builtin_frames import ICRS i = ICRS(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=2*u.marcsec/u.yr) # unit comes out as mas/yr because of the preferred units defined in the # frame RepresentationMapping assert repr(i) == ('<ICRS Coordinate: (ra, dec) in deg\n' ' ({0})\n' ' (pm_ra_cosdec, pm_dec) in mas / yr\n' ' ({0})>').format(' 1., 2.' if NUMPY_LT_1_14 else '1., 2.') def test_converting_units(): import re from astropy.coordinates.baseframe import RepresentationMapping from astropy.coordinates.builtin_frames import ICRS, FK5 # this is a regular expression that with split (see below) removes what's # the decimal point to fix rounding problems rexrepr = re.compile(r'(.*?=\d\.).*?( .*?=\d\.).*?( .*)') # Use values that aren't subject to rounding down to X.9999... i2 = ICRS(ra=2.*u.deg, dec=2.*u.deg) i2_many = ICRS(ra=[2., 4.]*u.deg, dec=[2., -8.1]*u.deg) # converting from FK5 to ICRS and back changes the *internal* representation, # but it should still come out in the preferred form i4 = i2.transform_to(FK5).transform_to(ICRS) i4_many = i2_many.transform_to(FK5).transform_to(ICRS) ri2 = ''.join(rexrepr.split(repr(i2))) ri4 = ''.join(rexrepr.split(repr(i4))) assert ri2 == ri4 assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed ri2_many = ''.join(rexrepr.split(repr(i2_many))) ri4_many = ''.join(rexrepr.split(repr(i4_many))) assert ri2_many == ri4_many assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed # but that *shouldn't* hold if we turn off units for the representation class FakeICRS(ICRS): frame_specific_representation_info = { 'spherical': [RepresentationMapping('lon', 'ra', u.hourangle), RepresentationMapping('lat', 'dec', None), RepresentationMapping('distance', 'distance')] # should fall back to default of None unit } fi = FakeICRS(i4.data) ri2 = ''.join(rexrepr.split(repr(i2))) rfi = ''.join(rexrepr.split(repr(fi))) rfi = re.sub('FakeICRS', 'ICRS', rfi) # Force frame name to match assert ri2 != rfi # the attributes should also get the right units assert i2.dec.unit == i4.dec.unit # unless no/explicitly given units assert i2.dec.unit != fi.dec.unit assert i2.ra.unit != fi.ra.unit assert fi.ra.unit == u.hourangle def test_representation_info(): from astropy.coordinates.baseframe import RepresentationMapping from astropy.coordinates.builtin_frames import ICRS class NewICRS1(ICRS): frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping('lon', 'rara', u.hourangle), RepresentationMapping('lat', 'decdec', u.degree), RepresentationMapping('distance', 'distance', u.kpc)] } i1 = NewICRS1(rara=10*u.degree, decdec=-12*u.deg, distance=1000*u.pc, pm_rara_cosdecdec=100*u.mas/u.yr, pm_decdec=17*u.mas/u.yr, radial_velocity=10*u.km/u.s) assert allclose(i1.rara, 10*u.deg) assert i1.rara.unit == u.hourangle assert allclose(i1.decdec, -12*u.deg) assert allclose(i1.distance, 1000*u.pc) assert i1.distance.unit == u.kpc assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr) assert allclose(i1.pm_decdec, 17*u.mas/u.yr) # this should auto-set the names of UnitSpherical: i1.set_representation_cls(r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential) assert allclose(i1.rara, 10*u.deg) assert allclose(i1.decdec, -12*u.deg) assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr) assert allclose(i1.pm_decdec, 17*u.mas/u.yr) # For backwards compatibility, we also support the string name in the # representation info dictionary: class NewICRS2(ICRS): frame_specific_representation_info = { 'spherical': [ RepresentationMapping('lon', 'ang1', u.hourangle), RepresentationMapping('lat', 'ang2', u.degree), RepresentationMapping('distance', 'howfar', u.kpc)] } i2 = NewICRS2(ang1=10*u.degree, ang2=-12*u.deg, howfar=1000*u.pc) assert allclose(i2.ang1, 10*u.deg) assert i2.ang1.unit == u.hourangle assert allclose(i2.ang2, -12*u.deg) assert allclose(i2.howfar, 1000*u.pc) assert i2.howfar.unit == u.kpc # Test that the differential kwargs get overridden class NewICRS3(ICRS): frame_specific_representation_info = { r.SphericalCosLatDifferential: [ RepresentationMapping('d_lon_coslat', 'pm_ang1', u.hourangle/u.year), RepresentationMapping('d_lat', 'pm_ang2'), RepresentationMapping('d_distance', 'vlos', u.kpc/u.Myr)] } i3 = NewICRS3(lon=10*u.degree, lat=-12*u.deg, distance=1000*u.pc, pm_ang1=1*u.mas/u.yr, pm_ang2=2*u.mas/u.yr, vlos=100*u.km/u.s) assert allclose(i3.pm_ang1, 1*u.mas/u.yr) assert i3.pm_ang1.unit == u.hourangle/u.year assert allclose(i3.pm_ang2, 2*u.mas/u.yr) assert allclose(i3.vlos, 100*u.km/u.s) assert i3.vlos.unit == u.kpc/u.Myr def test_realizing(): from astropy.coordinates.builtin_frames import ICRS, FK5 from astropy.time import Time rep = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc) i = ICRS() i2 = i.realize_frame(rep) assert not i.has_data assert i2.has_data f = FK5(equinox=Time('J2001')) f2 = f.realize_frame(rep) assert not f.has_data assert f2.has_data assert f2.equinox == f.equinox assert f2.equinox != FK5.get_frame_attr_names()['equinox'] # Check that a nicer error message is returned: with pytest.raises(TypeError) as excinfo: f.realize_frame(f.representation_type) assert ('Class passed as data instead of a representation' in excinfo.value.args[0]) def test_replicating(): from astropy.coordinates.builtin_frames import ICRS, AltAz from astropy.time import Time i = ICRS(ra=[1]*u.deg, dec=[2]*u.deg) icopy = i.replicate(copy=True) irepl = i.replicate(copy=False) i.data._lat[:] = 0*u.deg assert np.all(i.data.lat == irepl.data.lat) assert np.all(i.data.lat != icopy.data.lat) iclone = i.replicate_without_data() assert i.has_data assert not iclone.has_data aa = AltAz(alt=1*u.deg, az=2*u.deg, obstime=Time('J2000')) aaclone = aa.replicate_without_data(obstime=Time('J2001')) assert not aaclone.has_data assert aa.obstime != aaclone.obstime assert aa.pressure == aaclone.pressure assert aa.obswl == aaclone.obswl def test_getitem(): from astropy.coordinates.builtin_frames import ICRS rep = r.SphericalRepresentation( [1, 2, 3]*u.deg, [4, 5, 6]*u.deg, [7, 8, 9]*u.kpc) i = ICRS(rep) assert len(i.ra) == 3 iidx = i[1:] assert len(iidx.ra) == 2 iidx2 = i[0] assert iidx2.ra.isscalar def test_transform(): """ This test just makes sure the transform architecture works, but does *not* actually test all the builtin transforms themselves are accurate """ from astropy.coordinates.builtin_frames import ICRS, FK4, FK5, Galactic from astropy.time import Time i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) f = i.transform_to(FK5) i2 = f.transform_to(ICRS) assert i2.data.__class__ == r.UnitSphericalRepresentation assert_allclose(i.ra, i2.ra) assert_allclose(i.dec, i2.dec) i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc) f = i.transform_to(FK5) i2 = f.transform_to(ICRS) assert i2.data.__class__ != r.UnitSphericalRepresentation f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001')) f4 = f.transform_to(FK4) f4_2 = f.transform_to(FK4(equinox=f.equinox)) # make sure attributes are copied over correctly assert f4.equinox == FK4.get_frame_attr_names()['equinox'] assert f4_2.equinox == f.equinox # make sure self-transforms also work i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) i2 = i.transform_to(ICRS) assert_allclose(i.ra, i2.ra) assert_allclose(i.dec, i2.dec) f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001')) f2 = f.transform_to(FK5) # default equinox, so should be *different* assert f2.equinox == FK5().equinox with pytest.raises(AssertionError): assert_allclose(f.ra, f2.ra) with pytest.raises(AssertionError): assert_allclose(f.dec, f2.dec) # finally, check Galactic round-tripping i1 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) i2 = i1.transform_to(Galactic).transform_to(ICRS) assert_allclose(i1.ra, i2.ra) assert_allclose(i1.dec, i2.dec) def test_transform_to_nonscalar_nodata_frame(): # https://github.com/astropy/astropy/pull/5254#issuecomment-241592353 from astropy.coordinates.builtin_frames import ICRS, FK5 from astropy.time import Time times = Time('2016-08-23') + np.linspace(0, 10, 12)*u.day coo1 = ICRS(ra=[[0.], [10.], [20.]]*u.deg, dec=[[-30.], [30.], [60.]]*u.deg) coo2 = coo1.transform_to(FK5(equinox=times)) assert coo2.shape == (3, 12) def test_sep(): from astropy.coordinates.builtin_frames import ICRS i1 = ICRS(ra=0*u.deg, dec=1*u.deg) i2 = ICRS(ra=0*u.deg, dec=2*u.deg) sep = i1.separation(i2) assert sep.deg == 1 i3 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc) i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc) sep3d = i3.separation_3d(i4) assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc) # check that it works even with velocities i5 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc, pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr, radial_velocity=[5, 6]*u.km/u.s) i6 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[7, 8]*u.kpc, pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr, radial_velocity=[5, 6]*u.km/u.s) sep3d = i5.separation_3d(i6) assert_allclose(sep3d.to(u.kpc), np.array([2, 2])*u.kpc) # 3d separations of dimensionless distances should still work i7 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.one) i8 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=4*u.one) sep3d = i7.separation_3d(i8) assert_allclose(sep3d, 1*u.one) # but should fail with non-dimensionless with pytest.raises(ValueError): i7.separation_3d(i3) def test_time_inputs(): """ Test validation and conversion of inputs for equinox and obstime attributes. """ from astropy.time import Time from astropy.coordinates.builtin_frames import FK4 c = FK4(1 * u.deg, 2 * u.deg, equinox='J2001.5', obstime='2000-01-01 12:00:00') assert c.equinox == Time('J2001.5') assert c.obstime == Time('2000-01-01 12:00:00') with pytest.raises(ValueError) as err: c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5) assert 'Invalid time input' in str(err.value) with pytest.raises(ValueError) as err: c = FK4(1 * u.deg, 2 * u.deg, obstime='hello') assert 'Invalid time input' in str(err.value) # A vector time should work if the shapes match, but we don't automatically # broadcast the basic data (just like time). FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=['J2000', 'J2001']) with pytest.raises(ValueError) as err: FK4(1 * u.deg, 2 * u.deg, obstime=['J2000', 'J2001']) assert 'shape' in str(err.value) def test_is_frame_attr_default(): """ Check that the `is_frame_attr_default` machinery works as expected """ from astropy.time import Time from astropy.coordinates.builtin_frames import FK5 c1 = FK5(ra=1*u.deg, dec=1*u.deg) c2 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=FK5.get_frame_attr_names()['equinox']) c3 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=Time('J2001.5')) assert c1.equinox == c2.equinox assert c1.equinox != c3.equinox assert c1.is_frame_attr_default('equinox') assert not c2.is_frame_attr_default('equinox') assert not c3.is_frame_attr_default('equinox') c4 = c1.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg)) c5 = c2.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg)) assert c4.is_frame_attr_default('equinox') assert not c5.is_frame_attr_default('equinox') def test_altaz_attributes(): from astropy.time import Time from astropy.coordinates import EarthLocation, AltAz aa = AltAz(1*u.deg, 2*u.deg) assert aa.obstime is None assert aa.location is None aa2 = AltAz(1*u.deg, 2*u.deg, obstime='J2000') assert aa2.obstime == Time('J2000') aa3 = AltAz(1*u.deg, 2*u.deg, location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m)) assert isinstance(aa3.location, EarthLocation) def test_representation(): """ Test the getter and setter properties for `representation` """ from astropy.coordinates.builtin_frames import ICRS # Create the frame object. icrs = ICRS(ra=1*u.deg, dec=1*u.deg) data = icrs.data # Create some representation objects. icrs_cart = icrs.cartesian icrs_spher = icrs.spherical icrs_cyl = icrs.cylindrical # Testing when `_representation` set to `CartesianRepresentation`. icrs.representation_type = r.CartesianRepresentation assert icrs.representation_type == r.CartesianRepresentation assert icrs_cart.x == icrs.x assert icrs_cart.y == icrs.y assert icrs_cart.z == icrs.z assert icrs.data == data # Testing that an ICRS object in CartesianRepresentation must not have spherical attributes. for attr in ('ra', 'dec', 'distance'): with pytest.raises(AttributeError) as err: getattr(icrs, attr) assert 'object has no attribute' in str(err.value) # Testing when `_representation` set to `CylindricalRepresentation`. icrs.representation_type = r.CylindricalRepresentation assert icrs.representation_type == r.CylindricalRepresentation assert icrs.data == data # Testing setter input using text argument for spherical. icrs.representation_type = 'spherical' assert icrs.representation_type is r.SphericalRepresentation assert icrs_spher.lat == icrs.dec assert icrs_spher.lon == icrs.ra assert icrs_spher.distance == icrs.distance assert icrs.data == data # Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes. for attr in ('x', 'y', 'z'): with pytest.raises(AttributeError) as err: getattr(icrs, attr) assert 'object has no attribute' in str(err.value) # Testing setter input using text argument for cylindrical. icrs.representation_type = 'cylindrical' assert icrs.representation_type is r.CylindricalRepresentation assert icrs_cyl.rho == icrs.rho assert icrs_cyl.phi == icrs.phi assert icrs_cyl.z == icrs.z assert icrs.data == data # Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes. for attr in ('ra', 'dec', 'distance'): with pytest.raises(AttributeError) as err: getattr(icrs, attr) assert 'object has no attribute' in str(err.value) with pytest.raises(ValueError) as err: icrs.representation_type = 'WRONG' assert 'but must be a BaseRepresentation class' in str(err.value) with pytest.raises(ValueError) as err: icrs.representation_type = ICRS assert 'but must be a BaseRepresentation class' in str(err.value) def test_represent_as(): from astropy.coordinates.builtin_frames import ICRS icrs = ICRS(ra=1*u.deg, dec=1*u.deg) cart1 = icrs.represent_as('cartesian') cart2 = icrs.represent_as(r.CartesianRepresentation) cart1.x == cart2.x cart1.y == cart2.y cart1.z == cart2.z # now try with velocities icrs = ICRS(ra=0*u.deg, dec=0*u.deg, distance=10*u.kpc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=1*u.km/u.s) # single string rep2 = icrs.represent_as('cylindrical') assert isinstance(rep2, r.CylindricalRepresentation) assert isinstance(rep2.differentials['s'], r.CylindricalDifferential) # single class with positional in_frame_units, verify that warning raised with catch_warnings() as w: icrs.represent_as(r.CylindricalRepresentation, False) assert len(w) == 1 assert w[0].category == AstropyWarning assert 'argument position' in str(w[0].message) # TODO: this should probably fail in the future once we figure out a better # workaround for dealing with UnitSphericalRepresentation's with # RadialDifferential's # two classes # rep2 = icrs.represent_as(r.CartesianRepresentation, # r.SphericalCosLatDifferential) # assert isinstance(rep2, r.CartesianRepresentation) # assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential) with pytest.raises(ValueError): icrs.represent_as('odaigahara') def test_shorthand_representations(): from astropy.coordinates.builtin_frames import ICRS rep = r.CartesianRepresentation([1, 2, 3]*u.pc) dif = r.CartesianDifferential([1, 2, 3]*u.km/u.s) rep = rep.with_differentials(dif) icrs = ICRS(rep) cyl = icrs.cylindrical assert isinstance(cyl, r.CylindricalRepresentation) assert isinstance(cyl.differentials['s'], r.CylindricalDifferential) sph = icrs.spherical assert isinstance(sph, r.SphericalRepresentation) assert isinstance(sph.differentials['s'], r.SphericalDifferential) sph = icrs.sphericalcoslat assert isinstance(sph, r.SphericalRepresentation) assert isinstance(sph.differentials['s'], r.SphericalCosLatDifferential) def test_dynamic_attrs(): from astropy.coordinates.builtin_frames import ICRS c = ICRS(1*u.deg, 2*u.deg) assert 'ra' in dir(c) assert 'dec' in dir(c) with pytest.raises(AttributeError) as err: c.blahblah assert "object has no attribute 'blahblah'" in str(err.value) with pytest.raises(AttributeError) as err: c.ra = 1 assert "Cannot set any frame attribute" in str(err.value) c.blahblah = 1 assert c.blahblah == 1 def test_nodata_error(): from astropy.coordinates.builtin_frames import ICRS i = ICRS() with pytest.raises(ValueError) as excinfo: i.data assert 'does not have associated data' in str(excinfo.value) def test_len0_data(): from astropy.coordinates.builtin_frames import ICRS i = ICRS([]*u.deg, []*u.deg) assert i.has_data repr(i) def test_quantity_attributes(): from astropy.coordinates.builtin_frames import GCRS # make sure we can create a GCRS frame with valid inputs GCRS(obstime='J2002', obsgeoloc=[1, 2, 3]*u.km, obsgeovel=[4, 5, 6]*u.km/u.s) # make sure it fails for invalid lovs or vels with pytest.raises(TypeError): GCRS(obsgeoloc=[1, 2, 3]) # no unit with pytest.raises(u.UnitsError): GCRS(obsgeoloc=[1, 2, 3]*u.km/u.s) # incorrect unit with pytest.raises(ValueError): GCRS(obsgeoloc=[1, 3]*u.km) # incorrect shape @pytest.mark.remote_data def test_eloc_attributes(): from astropy.coordinates import AltAz, ITRS, GCRS, EarthLocation el = EarthLocation(lon=12.3*u.deg, lat=45.6*u.deg, height=1*u.km) it = ITRS(r.SphericalRepresentation(lon=12.3*u.deg, lat=45.6*u.deg, distance=1*u.km)) gc = GCRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=6375*u.km) el1 = AltAz(location=el).location assert isinstance(el1, EarthLocation) # these should match *exactly* because the EarthLocation assert el1.lat == el.lat assert el1.lon == el.lon assert el1.height == el.height el2 = AltAz(location=it).location assert isinstance(el2, EarthLocation) # these should *not* match because giving something in Spherical ITRS is # *not* the same as giving it as an EarthLocation: EarthLocation is on an # elliptical geoid. So the longitude should match (because flattening is # only along the z-axis), but latitude should not. Also, height is relative # to the *surface* in EarthLocation, but the ITRS distance is relative to # the center of the Earth assert not allclose(el2.lat, it.spherical.lat) assert allclose(el2.lon, it.spherical.lon) assert el2.height < -6000*u.km el3 = AltAz(location=gc).location # GCRS inputs implicitly get transformed to ITRS and then onto # EarthLocation's elliptical geoid. So both lat and lon shouldn't match assert isinstance(el3, EarthLocation) assert not allclose(el3.lat, gc.dec) assert not allclose(el3.lon, gc.ra) assert np.abs(el3.height) < 500*u.km def test_equivalent_frames(): from astropy.coordinates import SkyCoord from astropy.coordinates.builtin_frames import ICRS, FK4, FK5, AltAz i = ICRS() i2 = ICRS(1*u.deg, 2*u.deg) assert i.is_equivalent_frame(i) assert i.is_equivalent_frame(i2) with pytest.raises(TypeError): assert i.is_equivalent_frame(10) with pytest.raises(TypeError): assert i2.is_equivalent_frame(SkyCoord(i2)) f0 = FK5() # this J2000 is TT f1 = FK5(equinox='J2000') f2 = FK5(1*u.deg, 2*u.deg, equinox='J2000') f3 = FK5(equinox='J2010') f4 = FK4(equinox='J2010') assert f1.is_equivalent_frame(f1) assert not i.is_equivalent_frame(f1) assert f0.is_equivalent_frame(f1) assert f1.is_equivalent_frame(f2) assert not f1.is_equivalent_frame(f3) assert not f3.is_equivalent_frame(f4) aa1 = AltAz() aa2 = AltAz(obstime='J2010') assert aa2.is_equivalent_frame(aa2) assert not aa1.is_equivalent_frame(i) assert not aa1.is_equivalent_frame(aa2) def test_representation_subclass(): # Regression test for #3354 from astropy.coordinates.builtin_frames import FK5 # Normally when instantiating a frame without a distance the frame will try # and use UnitSphericalRepresentation internally instead of # SphericalRepresentation. frame = FK5(representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg) assert type(frame._data) == r.UnitSphericalRepresentation assert frame.representation_type == r.SphericalRepresentation # If using a SphericalRepresentation class this used to not work, so we # test here that this is now fixed. class NewSphericalRepresentation(r.SphericalRepresentation): attr_classes = r.SphericalRepresentation.attr_classes frame = FK5(representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg) assert type(frame._data) == r.UnitSphericalRepresentation assert frame.representation_type == NewSphericalRepresentation # A similar issue then happened in __repr__ with subclasses of # SphericalRepresentation. assert repr(frame) == ("<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n" " ({})>").format(' 32., 20.' if NUMPY_LT_1_14 else '32., 20.') # A more subtle issue is when specifying a custom # UnitSphericalRepresentation subclass for the data and # SphericalRepresentation or a subclass for the representation. class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation): attr_classes = r.UnitSphericalRepresentation.attr_classes def __repr__(self): return "<NewUnitSphericalRepresentation: spam spam spam>" frame = FK5(NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg), representation_type=NewSphericalRepresentation) assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>" def test_getitem_representation(): """ Make sure current representation survives __getitem__ even if different from data representation. """ from astropy.coordinates.builtin_frames import ICRS c = ICRS([1, 1] * u.deg, [2, 2] * u.deg) c.representation_type = 'cartesian' assert c[0].representation_type is r.CartesianRepresentation def test_component_error_useful(): """ Check that a data-less frame gives useful error messages about not having data when the attributes asked for are possible coordinate components """ from astropy.coordinates.builtin_frames import ICRS i = ICRS() with pytest.raises(ValueError) as excinfo: i.ra assert 'does not have associated data' in str(excinfo.value) with pytest.raises(AttributeError) as excinfo1: i.foobar with pytest.raises(AttributeError) as excinfo2: i.lon # lon is *not* the component name despite being the underlying representation's name assert "object has no attribute 'foobar'" in str(excinfo1.value) assert "object has no attribute 'lon'" in str(excinfo2.value) def test_cache_clear(): from astropy.coordinates.builtin_frames import ICRS i = ICRS(1*u.deg, 2*u.deg) # Add an in frame units version of the rep to the cache. repr(i) assert len(i.cache['representation']) == 2 i.cache.clear() assert len(i.cache['representation']) == 0 def test_inplace_array(): from astropy.coordinates.builtin_frames import ICRS i = ICRS([[1, 2], [3, 4]]*u.deg, [[10, 20], [30, 40]]*u.deg) # Add an in frame units version of the rep to the cache. repr(i) # Check that repr() has added a rep to the cache assert len(i.cache['representation']) == 2 # Modify the data i.data.lon[:, 0] = [100, 200]*u.deg # Clear the cache i.cache.clear() # This will use a second (potentially cached rep) assert_allclose(i.ra, [[100, 2], [200, 4]]*u.deg) assert_allclose(i.dec, [[10, 20], [30, 40]]*u.deg) def test_inplace_change(): from astropy.coordinates.builtin_frames import ICRS i = ICRS(1*u.deg, 2*u.deg) # Add an in frame units version of the rep to the cache. repr(i) # Check that repr() has added a rep to the cache assert len(i.cache['representation']) == 2 # Modify the data i.data.lon[()] = 10*u.deg # Clear the cache i.cache.clear() # This will use a second (potentially cached rep) assert i.ra == 10 * u.deg assert i.dec == 2 * u.deg def test_representation_with_multiple_differentials(): from astropy.coordinates.builtin_frames import ICRS dif1 = r.CartesianDifferential([1, 2, 3]*u.km/u.s) dif2 = r.CartesianDifferential([1, 2, 3]*u.km/u.s**2) rep = r.CartesianRepresentation([1, 2, 3]*u.pc, differentials={'s': dif1, 's2': dif2}) # check warning is raised for a scalar with pytest.raises(ValueError): ICRS(rep) def test_representation_arg_backwards_compatibility(): # TODO: this test can be removed when the `representation` argument is # removed from the BaseCoordinateFrame initializer. from astropy.coordinates.builtin_frames import ICRS c1 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type=r.CartesianRepresentation) c2 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type=r.CartesianRepresentation) c3 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type='cartesian') assert c1.x == c2.x assert c1.y == c2.y assert c1.z == c2.z assert c1.x == c3.x assert c1.y == c3.y assert c1.z == c3.z assert c1.representation_type == c1.representation_type with pytest.raises(ValueError): ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, representation_type='cartesian', representation='cartesian') def test_missing_component_error_names(): """ This test checks that the component names are frame component names, not representation or differential names, when referenced in an exception raised when not passing in enough data. For example: ICRS(ra=10*u.deg) should state: TypeError: __init__() missing 1 required positional argument: 'dec' """ from astropy.coordinates.builtin_frames import ICRS with pytest.raises(TypeError) as e: ICRS(ra=150 * u.deg) assert "missing 1 required positional argument: 'dec'" in str(e.value) with pytest.raises(TypeError) as e: ICRS(ra=150*u.deg, dec=-11*u.deg, pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr) assert "pm_ra_cosdec" in str(e.value) def test_non_spherical_representation_unit_creation(unitphysics): from astropy.coordinates.builtin_frames import ICRS class PhysicsICRS(ICRS): default_representation = r.PhysicsSphericalRepresentation pic = PhysicsICRS(phi=1*u.deg, theta=25*u.deg, r=1*u.kpc) assert isinstance(pic.data, r.PhysicsSphericalRepresentation) picu = PhysicsICRS(phi=1*u.deg, theta=25*u.deg) assert isinstance(picu.data, unitphysics) def test_attribute_repr(): from astropy.coordinates.attributes import Attribute from astropy.coordinates.baseframe import BaseCoordinateFrame class Spam: def _astropy_repr_in_frame(self): return "TEST REPR" class TestFrame(BaseCoordinateFrame): attrtest = Attribute(default=Spam()) assert "TEST REPR" in repr(TestFrame()) def test_component_names_repr(): from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping # Frame class with new component names that includes a name swap class NameChangeFrame(BaseCoordinateFrame): default_representation = r.PhysicsSphericalRepresentation frame_specific_representation_info = { r.PhysicsSphericalRepresentation: [ RepresentationMapping('phi', 'theta', u.deg), RepresentationMapping('theta', 'phi', u.arcsec), RepresentationMapping('r', 'JUSTONCE', u.AU)] } frame = NameChangeFrame(0*u.deg, 0*u.arcsec, 0*u.AU) # Check for the new names in the Frame repr assert "(theta, phi, JUSTONCE)" in repr(frame) # Check that the letter "r" has not been replaced more than once in the Frame repr assert repr(frame).count("JUSTONCE") == 1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import pytest import numpy as np from astropy.tests.helper import catch_warnings from astropy.utils.exceptions import AstropyUserWarning from astropy import units as u from astropy.nddata.nddata import NDData from astropy.nddata.decorators import support_nddata class CCDData(NDData): pass @support_nddata def wrapped_function_1(data, wcs=None, unit=None): return data, wcs, unit def test_pass_numpy(): data_in = np.array([1, 2, 3]) data_out, wcs_out, unit_out = wrapped_function_1(data=data_in) assert data_out is data_in assert wcs_out is None assert unit_out is None def test_pass_all_separate(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata_and_explicit(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy unit_in_alt = u.mJy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in_alt assert len(w) == 1 assert str(w[0].message) == ("Property unit has been passed explicitly and as " "an NDData property, using explicitly specified value") def test_pass_nddata_ignored(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0]) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in assert len(w) == 1 assert str(w[0].message) == ("The following attributes were set on the data " "object, but will be ignored by the function: mask") def test_incorrect_first_argument(): with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_2(something, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_3(something, data, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_4(wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" def test_wrap_function_no_kwargs(): @support_nddata def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in def test_wrap_function_repack_valid(): @support_nddata(repack=True, returns=['data']) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) nddata_out = wrapped_function_5(nddata_in, [1, 2, 3]) assert isinstance(nddata_out, NDData) assert nddata_out.data is data_in def test_wrap_function_accepts(): class MyData(NDData): pass @support_nddata(accepts=MyData) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) mydata_in = MyData(data_in) assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in with pytest.raises(TypeError) as exc: wrapped_function_5(nddata_in, [1, 2, 3]) assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function" def test_wrap_preserve_signature_docstring(): @support_nddata def wrapped_function_6(data, wcs=None, unit=None): """ An awesome function """ pass if wrapped_function_6.__doc__ is not None: assert wrapped_function_6.__doc__.strip() == "An awesome function" signature = inspect.signature(wrapped_function_6) assert str(signature) == "(data, wcs=None, unit=None)" def test_setup_failures1(): # repack but no returns with pytest.raises(ValueError): support_nddata(repack=True) def test_setup_failures2(): # returns but no repack with pytest.raises(ValueError): support_nddata(returns=['data']) def test_setup_failures9(): # keeps but no repack with pytest.raises(ValueError): support_nddata(keeps=['unit']) def test_setup_failures3(): # same attribute in keeps and returns with pytest.raises(ValueError): support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask']) def test_setup_failures4(): # function accepts *args with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures10(): # function accepts **kwargs with pytest.raises(ValueError): @support_nddata def test(data, **kwargs): pass def test_setup_failures5(): # function accepts *args (or **kwargs) with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures6(): # First argument is not data with pytest.raises(ValueError): @support_nddata def test(img): pass def test_setup_failures7(): # accepts CCDData but was given just an NDData with pytest.raises(TypeError): @support_nddata(accepts=CCDData) def test(data): pass test(NDData(np.ones((3, 3)))) def test_setup_failures8(): # function returns a different amount of arguments than specified. Using # NDData here so we don't get into troubles when creating a CCDData without # unit! with pytest.raises(ValueError): @support_nddata(repack=True, returns=['data', 'mask']) def test(data): return 10 test(NDData(np.ones((3, 3)))) # do NOT use CCDData here. def test_setup_failures11(): # function accepts no arguments with pytest.raises(ValueError): @support_nddata def test(): pass def test_setup_numpyarray_default(): # It should be possible (even if it's not advisable to use mutable # defaults) to have a numpy array as default value. @support_nddata def func(data, wcs=np.array([1, 2, 3])): return wcs def test_still_accepts_other_input(): @support_nddata(repack=True, returns=['data']) def test(data): return data assert isinstance(test(NDData(np.ones((3, 3)))), NDData) assert isinstance(test(10), int) assert isinstance(test([1, 2, 3]), list) def test_accepting_property_normal(): # Accepts a mask attribute and takes it from the input @support_nddata def test(data, mask=None): return mask ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, mask=10) == 10 assert len(w) == 1 def test_parameter_default_identical_to_explicit_passed_argument(): # If the default is identical to the explicitly passed argument this # should still raise a Warning and use the explicit one. @support_nddata def func(data, wcs=[1, 2, 3]): return wcs with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3] assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2])) == [1, 2] assert len(w) == 0 def test_accepting_property_notexist(): # Accepts flags attribute but NDData doesn't have one @support_nddata def test(data, flags=10): return flags ndd = NDData(np.ones((3, 3))) test(ndd) def test_accepting_property_translated(): # Accepts a error attribute and we want to pass in uncertainty! @support_nddata(mask='masked') def test(data, masked=None): return masked ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, masked=10) == 10 assert len(w) == 1 def test_accepting_property_meta_empty(): # Meta is always set (OrderedDict) so it has a special case that it's # ignored if it's empty but not None @support_nddata def test(data, meta=None): return meta ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._meta = {'a': 10} assert test(ndd) == {'a': 10}
bsipocz/astropy
astropy/nddata/tests/test_decorators.py
astropy/coordinates/tests/test_frames.py
from traitlets import Unicode from .. import utils from . import NbGraderPreprocessor class ClearSolutions(NbGraderPreprocessor): code_stub = Unicode( "# YOUR CODE HERE\nraise NotImplementedError()", config=True, help="The code snippet that will replace code solutions") text_stub = Unicode( "YOUR ANSWER HERE", config=True, help="The text snippet that will replace written solutions") comment_mark = Unicode( "#", config=True, help="The comment mark to prefix solution delimiters") begin_solution_delimeter = Unicode( "## BEGIN SOLUTION", config=True, help="The delimiter marking the beginning of a solution (excluding comment mark)") end_solution_delimeter = Unicode( "## END SOLUTION", config=True, help="The delimiter marking the end of a solution (excluding comment mark)") @property def begin_solution(self): return "{}{}".format(self.comment_mark, self.begin_solution_delimeter) @property def end_solution(self): return "{}{}".format(self.comment_mark, self.end_solution_delimeter) def _replace_solution_region(self, cell): """Find a region in the cell that is delimeted by `self.begin_solution` and `self.end_solution` (e.g. ### BEGIN SOLUTION and ### END SOLUTION). Replace that region either with the code stub or text stub, depending the cell type. This modifies the cell in place, and then returns True if a solution region was replaced, and False otherwise. """ # pull out the cell input/source lines = cell.source.split("\n") if cell.cell_type == "code": stub_lines = self.code_stub.split("\n") else: stub_lines = self.text_stub.split("\n") new_lines = [] in_solution = False replaced_solution = False for line in lines: # begin the solution area if line.strip() == self.begin_solution: # check to make sure this isn't a nested BEGIN # SOLUTION region if in_solution: raise RuntimeError( "encountered nested begin solution statements") in_solution = True replaced_solution = True # replace it with the stub, indented as necessary indent = line[:line.find(self.begin_solution)] for stub_line in stub_lines: new_lines.append(indent + stub_line) # end the solution area elif line.strip() == self.end_solution: in_solution = False # add lines as long as it's not in the solution area elif not in_solution: new_lines.append(line) # we finished going through all the lines, but didn't find a # matching END SOLUTION statment if in_solution: raise RuntimeError("no end solution statement found") # replace the cell source cell.source = "\n".join(new_lines) return replaced_solution def preprocess(self, nb, resources): nb, resources = super(ClearSolutions, self).preprocess(nb, resources) if 'celltoolbar' in nb.metadata: del nb.metadata['celltoolbar'] return nb, resources def preprocess_cell(self, cell, resources, cell_index): # replace solution regions with the relevant stubs replaced_solution = self._replace_solution_region(cell) # determine whether the cell is a solution/grade cell is_solution = utils.is_solution(cell) # check that it is marked as a solution cell if we replaced a solution # region -- if it's not, then this is a problem, because the cell needs # to be given an id if not is_solution and replaced_solution: raise RuntimeError( "Solution region detected in a non-solution cell; " "please make sure all solution regions are within " "solution cells") # replace solution cells with the code/text stub -- but not if # we already replaced a solution region, because that means # there are parts of the cells that should be preserved if is_solution and not replaced_solution: if cell.cell_type == 'code': cell.source = self.code_stub else: cell.source = self.text_stub return cell, resources
import pytest from textwrap import dedent from ...preprocessors import LimitOutput from .base import BaseTestPreprocessor from .. import create_code_cell, create_text_cell @pytest.fixture def preprocessor(): return LimitOutput() class TestLimitOutput(BaseTestPreprocessor): def test_long_output(self): nb = self._read_nb("files/long-output.ipynb") cell, = nb.cells output, = cell.outputs assert len(output.text.split("\n")) > 1000 pp = LimitOutput() nb, resources = pp.preprocess(nb, {}) cell, = nb.cells output, = cell.outputs assert len(output.text.split("\n")) == 1000 def test_infinite_recursion(self): nb = self._read_nb("files/infinite-recursion.ipynb") pp = LimitOutput() nb, resources = pp.preprocess(nb, {}) cell, = nb.cells output, = cell.outputs assert len(output.traceback) == 100
minrk/nbgrader
nbgrader/tests/preprocessors/test_limitoutput.py
nbgrader/preprocessors/clearsolutions.py
# -*- coding: utf-8 -*- import os import itertools import furl import requests from framework.exceptions import HTTPError class BaseClient(object): @property def _auth(self): return None @property def _default_headers(self): return {} @property def _default_params(self): return {} def _make_request(self, method, url, **kwargs): expects = kwargs.pop('expects', None) throws = kwargs.pop('throws', None) kwargs['headers'] = self._build_defaults(self._default_headers, **kwargs.get('headers', {})) kwargs['params'] = self._build_defaults(self._default_params, **kwargs.get('params', {})) response = requests.request(method, url, auth=self._auth, **kwargs) if expects and response.status_code not in expects: raise throws if throws else HTTPError(response.status_code, message=response.content) return response def _build_defaults(self, defaults, **kwargs): defaults.update(kwargs) return { key: value for key, value in defaults.items() if value is not None } def _build_url(self, base, *segments): url = furl.furl(base) segments = filter( lambda segment: segment, map( lambda segment: str(segment).strip('/'), itertools.chain(url.path.segments, segments) ) ) url.path = os.path.join(*segments) return url.url
import mock import pytest from nose.tools import * # flake8: noqa from framework.auth.core import Auth from website.models import Node from website.util import permissions from api.base.settings.defaults import API_BASE from tests.base import ApiTestCase from osf_tests.factories import ( NodeFactory, ProjectFactory, RegistrationFactory, AuthUserFactory, WithdrawnRegistrationFactory, ForkFactory ) class TestRegistrationForksList(ApiTestCase): def setUp(self): super(TestRegistrationForksList, self).setUp() self.user = AuthUserFactory() self.private_project = ProjectFactory(creator=self.user) self.private_project.save() self.component = NodeFactory(parent=self.private_project, creator=self.user) self.pointer = ProjectFactory(creator=self.user) self.private_project.add_pointer(self.pointer, auth=Auth(self.user), save=True) self.private_registration = RegistrationFactory(project=self.private_project, creator=self.user) self.private_fork = ForkFactory(project=self.private_registration, user=self.user) self.private_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.private_registration._id) self.public_project = ProjectFactory(is_public=True, creator=self.user) self.public_project.save() self.public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True) self.public_registration = RegistrationFactory(project = self.public_project, creator=self.user, is_public=True) self.public_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.public_registration._id) self.public_fork = ForkFactory(project=self.public_registration, user=self.user) self.user_two = AuthUserFactory() def test_can_access_public_registration_forks_list_when_unauthenticated(self): res = self.app.get(self.public_registration_url) assert_equal(len(res.json['data']), 0) # Fork defaults to private assert_equal(self.public_fork.is_public, False) self.public_fork.is_public = True self.public_fork.save() res = self.app.get(self.public_registration_url) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']), 1) assert_equal(self.public_fork.is_public, True) data = res.json['data'][0] assert_equal(data['attributes']['title'], 'Fork of ' + self.public_registration.title) assert_equal(data['id'], self.public_fork._id) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_can_access_public_registration_forks_list_authenticated_contributor(self): res = self.app.get(self.public_registration_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(self.public_fork.is_public, False) assert_equal(len(res.json['data']), 1) data = res.json['data'][0] assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title) assert_equal(data['id'], self.public_fork._id) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_can_access_public_registration_forks_list_authenticated_non_contributor(self): res = self.app.get(self.public_registration_url, auth=self.user_two.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']), 0) # Fork defaults to private assert_equal(self.public_fork.is_public, False) self.public_fork.is_public = True self.public_fork.save() res = self.app.get(self.public_registration_url) assert_equal(len(res.json['data']), 1) assert_equal(self.public_fork.is_public, True) data = res.json['data'][0] assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title) assert_equal(data['id'], self.public_fork._id) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_cannot_access_private_registration_forks_list_unauthenticated(self): res = self.app.get(self.private_registration_url, expect_errors=True) assert_equal(res.status_code, 401) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_authenticated_contributor_can_access_private_registration_forks_list(self): res = self.app.get(self.private_registration_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(len(res.json['data']), 1) data = res.json['data'][0] assert_equal(data['attributes']['title'], 'Fork of ' + self.private_project.title) assert_equal(data['id'], self.private_fork._id) fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data'] assert_equal(fork_contributors['attributes']['family_name'], self.user.family_name) assert_equal(fork_contributors['id'], self.user._id) forked_children = data['embeds']['children']['data'][0] assert_equal(forked_children['id'], self.private_registration.forks.first().nodes.first()._id) assert_equal(forked_children['attributes']['title'], self.component.title) forked_node_links = data['embeds']['node_links']['data'][0]['embeds']['target_node']['data'] assert_equal(forked_node_links['id'], self.pointer._id) assert_equal(forked_node_links['attributes']['title'], self.pointer.title) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) expected_logs = list(self.private_registration.logs.values_list('action', flat=True)) expected_logs.append(self.private_registration.nodes.first().logs.latest().action) expected_logs.append('node_forked') expected_logs.append('node_forked') forked_logs = data['embeds']['logs']['data'] assert_equal(set(expected_logs), set(log['attributes']['action'] for log in forked_logs)) assert_equal(len(forked_logs), 6) forked_from = data['embeds']['forked_from']['data'] assert_equal(forked_from['id'], self.private_registration._id) def test_authenticated_non_contributor_cannot_access_private_registration_forks_list(self): res = self.app.get(self.private_registration_url, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') class TestRegistrationForkCreate(ApiTestCase): def setUp(self): super(TestRegistrationForkCreate, self).setUp() self.user = AuthUserFactory() self.user_two = AuthUserFactory() self.user_three = AuthUserFactory() self.private_project = ProjectFactory(creator=self.user) private_pointer = ProjectFactory(creator=self.user_two) actual_pointer = self.private_project.add_pointer(private_pointer, auth=Auth(self.user_two), save=True) self.private_registration = RegistrationFactory(creator=self.user, project=self.private_project) self.fork_data = { 'data': { 'type': 'nodes' } } self.fork_data_with_title = { 'data': { 'type': 'nodes', 'attributes': {'title': 'My Forked Project'} } } self.private_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.private_registration._id) self.public_project = ProjectFactory(is_public=True, creator=self.user) self.public_registration = RegistrationFactory(creator=self.user, project=self.public_project, is_public=True) self.public_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.public_registration._id) def test_create_fork_from_public_registration_with_new_title(self): res = self.app.post_json_api(self.public_registration_url, self.fork_data_with_title, auth=self.user.auth) assert_equal(res.status_code, 201) data = res.json['data'] assert_equal(data['id'], self.public_registration.forks.first()._id) assert_equal(data['attributes']['title'], self.fork_data_with_title['data']['attributes']['title']) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_create_fork_from_private_registration_with_new_title(self): res = self.app.post_json_api(self.private_registration_url, self.fork_data_with_title, auth=self.user.auth) assert_equal(res.status_code, 201) data = res.json['data'] assert_equal(data['id'], self.private_registration.forks.first()._id) assert_equal(data['attributes']['title'], self.fork_data_with_title['data']['attributes']['title']) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_can_fork_public_registration_logged_in(self): res = self.app.post_json_api(self.public_registration_url, self.fork_data, auth=self.user_two.auth) assert_equal(res.status_code, 201) data = res.json['data'] assert_equal(data['id'], self.public_registration.forks.first()._id) assert_equal(data['attributes']['title'], 'Fork of ' + self.public_registration.title) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_cannot_fork_public_registration_logged_out(self): res = self.app.post_json_api(self.public_registration_url, self.fork_data, expect_errors=True) assert_equal(res.status_code, 401) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_can_fork_public_registration_logged_in_contributor(self): res = self.app.post_json_api(self.public_registration_url, self.fork_data, auth=self.user.auth) assert_equal(res.status_code, 201) data = res.json['data'] assert_equal(data['id'], self.public_registration.forks.first()._id) assert_equal(data['attributes']['title'], 'Fork of ' + self.public_registration.title) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) def test_cannot_fork_private_registration_logged_out(self): res = self.app.post_json_api(self.private_registration_url, self.fork_data, expect_errors=True) assert_equal(res.status_code, 401) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_cannot_fork_private_registration_logged_in_non_contributor(self): res = self.app.post_json_api(self.private_registration_url, self.fork_data, auth=self.user_two.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_can_fork_private_registration_logged_in_contributor(self): res = self.app.post_json_api(self.private_registration_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', self.fork_data, auth=self.user.auth) assert_equal(res.status_code, 201) data = res.json['data'] assert_equal(data['attributes']['title'], 'Fork of ' + self.private_registration.title) assert_equal(data['attributes']['registration'], False) assert_equal(data['attributes']['fork'], True) fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data'] assert_equal(fork_contributors['attributes']['family_name'], self.user.family_name) assert_equal(fork_contributors['id'], self.user._id) forked_from = data['embeds']['forked_from']['data'] assert_equal(forked_from['id'], self.private_registration._id) def test_fork_private_components_no_access(self): url = self.public_registration_url + '?embed=children' private_component = NodeFactory(parent=self.public_registration, creator=self.user_two, is_public=False) res = self.app.post_json_api(url, self.fork_data, auth=self.user_three.auth) assert_equal(res.status_code, 201) # Private components that you do not have access to are not forked assert_equal(res.json['data']['embeds']['children']['links']['meta']['total'], 0) def test_fork_components_you_can_access(self): url = self.private_registration_url + '?embed=children' new_component = NodeFactory(parent=self.private_registration, creator=self.user) res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth) assert_equal(res.status_code, 201) assert_equal(res.json['data']['embeds']['children']['links']['meta']['total'], 1) assert_equal(res.json['data']['embeds']['children']['data'][0]['id'], new_component.forks.first()._id) def test_fork_private_node_links(self): url = self.private_registration_url + '?embed=node_links' # Node link is forked, but shows up as a private node link res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth) assert_equal(res.json['data']['embeds']['node_links']['data'][0]['embeds']['target_node']['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(res.json['data']['embeds']['node_links']['links']['meta']['total'], 1) def test_fork_node_links_you_can_access(self): pointer = ProjectFactory(creator=self.user) self.private_project.add_pointer(pointer, auth=Auth(self.user), save=True) new_registration = RegistrationFactory(project = self.private_project, creator=self.user) url = '/{}registrations/{}/forks/'.format(API_BASE, new_registration._id) + '?embed=node_links' res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth) assert_equal(res.json['data']['embeds']['node_links']['data'][1]['embeds']['target_node']['data']['id'], pointer._id) assert_equal(res.json['data']['embeds']['node_links']['links']['meta']['total'], 2) def test_cannot_fork_retractions(self): with mock.patch('osf.models.AbstractNode.update_search'): retraction = WithdrawnRegistrationFactory(registration=self.private_registration, user=self.user) url = '/{}registrations/{}/forks/'.format(API_BASE, self.private_registration._id) + '?embed=forked_from' res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403)
alexschiller/osf.io
api_tests/registrations/views/test_registration_forks.py
website/util/client.py
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ import collections from collections import OrderedDict, abc import functools from io import StringIO import itertools import sys import warnings from textwrap import dedent from typing import FrozenSet, List, Optional, Set, Type, Union import numpy as np import numpy.ma as ma from pandas._config import get_option from pandas._libs import lib, algos as libalgos from pandas.util._decorators import (Appender, Substitution, rewrite_axis_style_signature, deprecate_kwarg) from pandas.util._validators import (validate_bool_kwarg, validate_axis_style_args) from pandas.compat import PY36, raise_with_traceback from pandas.compat.numpy import function as nv from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.dtypes.cast import ( maybe_upcast, cast_scalar_to_array, infer_dtype_from_scalar, maybe_cast_to_datetime, maybe_infer_to_datetimelike, maybe_convert_platform, maybe_downcast_to_dtype, invalidate_string_dtypes, coerce_to_dtypes, maybe_upcast_putmask, find_common_type) from pandas.core.dtypes.common import ( is_dict_like, is_datetime64tz_dtype, is_object_dtype, is_extension_type, is_extension_array_dtype, is_datetime64_any_dtype, is_bool_dtype, is_integer_dtype, is_float_dtype, is_integer, is_scalar, is_dtype_equal, needs_i8_conversion, infer_dtype_from_object, ensure_float64, ensure_int64, ensure_platform_int, is_list_like, is_nested_list_like, is_iterator, is_sequence, is_named_tuple) from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, ABCIndexClass, ABCMultiIndex) from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms from pandas.core import common as com from pandas.core import nanops from pandas.core import ops from pandas.core.accessor import CachedAccessor from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import ( DatetimeLikeArrayMixin as DatetimeLikeArray ) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, ensure_index, ensure_index_from_sequences) from pandas.core.indexes import base as ibase from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import BlockManager from pandas.core.internals.construction import ( masked_rec_array_to_mgr, get_names_from_index, to_arrays, reorder_arrays, init_ndarray, init_dict, arrays_to_mgr, sanitize_index) from pandas.core.series import Series from pandas.io.formats import console from pandas.io.formats import format as fmt from pandas.io.formats.printing import pprint_thing import pandas.plotting # --------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = dict( axes='index, columns', klass='DataFrame', axes_single_arg="{0 or 'index', 1 or 'columns'}", axis="""axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index': apply function to each column. If 1 or 'columns': apply function to each row.""", optional_by=""" by : str or list of str Name or list of names to sort by. - if `axis` is 0 or `'index'` then `by` may contain index levels and/or column labels - if `axis` is 1 or `'columns'` then `by` may contain column levels and/or index labels .. versionchanged:: 0.23.0 Allow specifying index or column level names.""", versionadded_to_excel='', optional_labels="""labels : array-like, optional New labels / index to conform the axis specified by 'axis' to.""", optional_axis="""axis : int or str, optional Axis to target. Can be either the axis name ('index', 'columns') or number (0, 1).""", ) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame or named Series objects with a database-style join. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame or named Series Object to merge with. how : {'left', 'right', 'outer', 'inner'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : tuple of (str, str), default ('_x', '_y') Suffix to apply to overlapping column names in the left and right side, respectively. To raise an exception on overlapping columns use (False, False). copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False If True, adds a column to output DataFrame called "_merge" with information on the source of each row. If string, column with information on source of each row will be added to output DataFrame, and column will be named value of string. Information column is Categorical-type and takes on a value of "left_only" for observations whose merge key only appears in 'left' DataFrame, "right_only" for observations whose merge key only appears in 'right' DataFrame, and "both" if the observation's merge key is found in both. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. .. versionadded:: 0.21.0 Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Notes ----- Support for specifying index levels as the `on`, `left_on`, and `right_on` parameters was added in version 0.23.0 Support for merging named Series objects was added in version 0.24.0 Examples -------- >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}) >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on='lkey', right_on='rkey') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', ... suffixes=('_left', '_right')) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') """ # ----------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame): """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects .. versionchanged :: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. DataFrame.from_items : From sequence of (key, value) pairs read_csv, pandas.read_table, pandas.read_clipboard. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 """ @property def _constructor(self): return DataFrame _constructor_sliced = Series # type: Type[Series] _deprecations = NDFrame._deprecations | frozenset([ 'get_value', 'set_value', 'from_csv', 'from_items' ]) # type: FrozenSet[str] _accessors = set() # type: Set[str] @property def _constructor_expanddim(self): raise NotImplementedError("Not supported for DataFrames!") # ---------------------------------------------------------------------- # Constructors def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns mgr = init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None) is not None: mgr = init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) # For data is list-like, or Iterable (will consume into list) elif (isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes))): if not isinstance(data, abc.Sequence): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = get_names_from_index(data) elif isinstance(data[0], Categorical): index = ibase.default_index(len(data[0])) else: index = ibase.default_index(len(data)) mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = init_dict({}, index, columns, dtype=dtype) else: try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' 'incompatible data and dtype: {e}'.format(e=e)) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: values = cast_scalar_to_array((len(index), len(columns)), data, dtype=dtype) mgr = init_ndarray(values, index, columns, dtype=values.dtype, copy=False) else: raise ValueError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr, fastpath=True) # ---------------------------------------------------------------------- @property def axes(self): """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) @property def _is_homogeneous_type(self): """ Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if self._data.any_extension_types: return len({block.dtype for block in self._data.blocks}) == 1 else: return not self._data.is_mixed_type # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self): """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width=False): """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case off non-interactive session, no boundaries apply. `ignore_width` is here so ipnb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if ((max_columns and nb_columns > max_columns) or ((not ignore_width) and width and nb_columns > (width // 2))): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or not console.in_interactive_session(): return True if (get_option('display.width') is not None or console.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if not (max_rows is None): # unlimited rows # min of two, where one may be None d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(l) for l in value.split('\n')) return repr_width < width def _info_repr(self): """ True if the repr should show the info view. """ info_repr_option = (get_option("display.large_repr") == "info") return info_repr_option and not (self._repr_fits_horizontal_() and self._repr_fits_vertical_()) def __repr__(self): """ Return a string representation for a particular DataFrame. """ buf = StringIO("") if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() else: width = None self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols, line_width=width, show_dimensions=show_dimensions) return buf.getvalue() def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1) val = val.replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, notebook=True) else: return None @Substitution(header='Write out the column names. If a list of strings ' 'is given, it is assumed to be aliases for the ' 'column names', col_space_type='int', col_space='The minimum width of each column') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, line_width=line_width) formatter.to_string() if buf is None: result = formatter.buf.getvalue() return result # ---------------------------------------------------------------------- @property def style(self): """ Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. See Also -------- io.formats.style.Styler """ from pandas.io.formats.style import Styler return Styler(self) def iteritems(self): r""" Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content, sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self): """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. See Also -------- itertuples : Iterate over DataFrame rows as namedtuples of the values. iteritems : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k) yield k, s def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.iteritems : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) # Python 3 supports at most 255 arguments to constructor if name is not None and len(self.columns) + index < 256: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) items = iteritems def __len__(self): """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other): """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Serie. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, ' '{s} vs {r}'.format(s=lvals.shape, r=rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: {oth}'.format(oth=type(other))) def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.T.dot(np.transpose(other)).T # ---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict(cls, data, orient='columns', dtype=None, columns=None): """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from ndarray (structured dtype), list of tuples, dict, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == 'columns': if columns is not None: raise ValueError("cannot use columns parameter with " "orient='columns'") else: # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', [ list(map(com.maybe_box_datetimelike, t)) for t in self.itertuples(index=False, name=None) ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: raise ValueError( "DataFrame index must be unique for orient='index'." ) return into_c((t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None)) else: raise ValueError("orient '{o}' not understood".format(o=orient)) def to_gbq(self, destination_table, project_id=None, chunksize=None, reauth=False, if_exists='fail', auth_local_webserver=False, table_schema=None, location=None, progress_bar=True, credentials=None, verbose=None, private_key=None): """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists, do nothing. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 verbose : bool, deprecated Deprecated in pandas-gbq version 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, verbose=verbose, private_key=private_key) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- DataFrame """ # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns = [] for k, v in data.items(): if k in columns: arr_columns.append(k) arrays.append(v) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) if columns is not None: columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float) arr_columns = ensure_index(arr_columns) if columns is not None: columns = ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if (isinstance(index, str) or not hasattr(index, "__iter__")): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) except Exception: result_index = index if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None .. deprecated:: 0.23.0 Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = "<S{}".format(df.index.str.len().max()) >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if convert_datetime64 is not None: warnings.warn("The 'convert_datetime64' parameter is " "deprecated and will be removed in a future " "version", FutureWarning, stacklevel=2) if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index.values))) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [self[c].get_values() for c in self.columns] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = ("Invalid dtype {dtype} specified for " "{element} {name}").format(dtype=dtype_mapping, element=element, name=name) raise ValueError(msg) return np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} ) @classmethod def from_items(cls, items, columns=None, orient='columns'): """ Construct a DataFrame from a list of tuples. .. deprecated:: 0.23.0 `from_items` is deprecated and will be removed in a future version. Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` instead. :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- DataFrame """ warnings.warn("from_items is deprecated. Please use " "DataFrame.from_dict(dict(items), ...) instead. " "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order.", FutureWarning, stacklevel=2) keys, values = zip(*items) if orient == 'columns': if columns is not None: columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = ensure_index(keys) arrays = values # GH 17312 # Provide more informative error msg when scalar values passed try: return cls._from_arrays(arrays, columns, None) except ValueError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed try: arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) except TypeError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'") @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=None, infer_datetime_format=False): """ Read CSV file. .. deprecated:: 0.21.0 Use :func:`read_csv` instead. It is preferable to use the more powerful :func:`read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a DataFrame of time series data. This method only differs from the preferred :func:`read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) So a ``pd.DataFrame.from_csv(path)`` can be replaced by ``pd.read_csv(path, index_col=0, parse_dates=True)``. Parameters ---------- path : string file path or file handle / StringIO header : int, default 0 Row to use as header (skip prior rows) sep : string, default ',' Field delimiter index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table parse_dates : boolean, default True Parse dates. Different default from read_table tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) infer_datetime_format : boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Returns ------- DataFrame See Also -------- read_csv """ warnings.warn("from_csv is deprecated. Please use read_csv(...) " "instead. Note that some of the default arguments are " "different, so please refer to the documentation " "for from_csv when changing your function calls", FutureWarning, stacklevel=2) from pandas.io.parsers import read_csv return read_csv(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame. .. deprecated:: 0.25.0 Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() # doctest: +SKIP >>> sdf # doctest: +SKIP 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) # doctest: +SKIP <class 'pandas.core.sparse.frame.SparseDataFrame'> """ warnings.warn("DataFrame.to_sparse is deprecated and will be removed " "in a future version", FutureWarning, stacklevel=2) from pandas.core.sparse.api import SparseDataFrame with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="SparseDataFrame") return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value) @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None, version=114, convert_strl=None): """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- fname : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 version : {114, 117}, default 114 Version to use in the output dta file. Version 114 can be used read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 114 limits string variables to 244 characters or fewer while 117 allows strings with lengths up to 2,000,000 characters. .. versionadded:: 0.23.0 convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. .. versionadded:: 0.23.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ kwargs = {} if version not in (114, 117): raise ValueError('Only formats 114 and 117 supported.') if version == 114: if convert_strl is not None: raise ValueError('strl support is only available when using ' 'format 117') from pandas.io.stata import StataWriter as statawriter else: from pandas.io.stata import StataWriter117 as statawriter kwargs['convert_strl'] = convert_strl writer = statawriter(fname, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, **kwargs) writer.write_file() def to_feather(self, fname): """ Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname) def to_parquet(self, fname, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- fname : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the behavior depends on the chosen engine. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine, compression=compression, index=index, partition_cols=partition_cols, **kwargs) @Substitution(header='Whether to print column labels, default True', col_space_type='str or int', col_space='The minimum width of each column in CSS length ' 'units. An int is assumed to be px units.\n\n' ' .. versionadded:: 0.25.0\n' ' Ability to use str') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue() # ---------------------------------------------------------------------- def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None): """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. By default, the setting in ``pandas.options.display.max_info_columns`` is followed. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. By default, the setting in ``pandas.options.display.max_info_columns`` is used. memory_usage : bool, str, optional Specifies whether total memory usage of the DataFrame elements (including the index) should be displayed. By default, this follows the ``pandas.options.display.memory_usage`` setting. True always show memory usage. False never shows memory usage. A value of 'deep' is equivalent to "True with deep introspection". Memory usage is shown in human-readable units (base-2 representation). Without deep introspection a memory estimation is made based in column dtype and number of rows assuming values consume the same memory amount for corresponding dtypes. With deep memory introspection, a real memory usage calculation is performed at the cost of computational resources. null_counts : bool, optional Whether to show the non-null counts. By default, this is shown only if the frame is smaller than ``pandas.options.display.max_info_rows`` and ``pandas.options.display.max_info_columns``. A value of True always shows the counts, and False never shows the counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. DataFrame.memory_usage: Memory usage of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, ... "float_col": float_values}) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): int_col 5 non-null int64 text_col 5 non-null object float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open("df_info.txt", "w", ... encoding="utf-8") as f: # doctest: +SKIP ... f.write(s) 260 The `memory_usage` parameter allows deep introspection mode, specially useful for big DataFrames and fine-tune memory optimization: >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) >>> df = pd.DataFrame({ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) ... }) >>> df.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): column_1 1000000 non-null object column_2 1000000 non-null object column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB >>> df.info(memory_usage='deep') <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): column_1 1000000 non-null object column_2 1000000 non-null object column_3 1000000 non-null object dtypes: object(3) memory usage: 188.8 MB """ if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append(str(type(self))) lines.append(self.index._summary()) if len(self.columns) == 0: lines.append('Empty {name}'.format(name=type(self).__name__)) fmt.buffer_put_lines(buf, lines) return cols = self.columns # hack if max_cols is None: max_cols = get_option('display.max_info_columns', len(self.columns) + 1) max_rows = get_option('display.max_info_rows', len(self) + 1) if null_counts is None: show_counts = ((len(self.columns) <= max_cols) and (len(self) < max_rows)) else: show_counts = null_counts exceeds_info_cols = len(self.columns) > max_cols def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max(len(pprint_thing(k)) for k in self.columns) + 4 counts = None tmpl = "{count}{dtype}" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError( 'Columns must equal counts ' '({cols:d} != {counts:d})'.format( cols=len(cols), counts=len(counts))) tmpl = "{count} non-null {dtype}" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes.iloc[i] col = pprint_thing(col) count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl.format(count=count, dtype=dtype)) def _non_verbose_repr(): lines.append(self.columns._summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return ("{num:3.1f}{size_q} " "{x}".format(num=num, size_q=size_qualifier, x=x)) num /= 1024.0 return "{num:3.1f}{size_q} {pb}".format(num=num, size_q=size_qualifier, pb='PB') if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None _non_verbose_repr() else: if exceeds_info_cols: _non_verbose_repr() else: _verbose_repr() counts = self.get_dtype_counts() dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k in sorted(counts.items())] lines.append('dtypes: {types}'.format(types=', '.join(dtypes))) if memory_usage is None: memory_usage = get_option('display.memory_usage') if memory_usage: # append memory usage of df to display size_qualifier = '' if memory_usage == 'deep': deep = True else: # size_qualifier is just a best effort; not guaranteed to catch # all cases (e.g., it misses categorical data even with object # categories) deep = False if ('object' in counts or self.index._is_memory_usage_qualified()): size_qualifier = '+' mem_usage = self.memory_usage(index=True, deep=deep).sum() lines.append("memory usage: {mem}\n".format( mem=_sizeof_fmt(mem_usage, size_qualifier))) fmt.buffer_put_lines(buf, lines) def memory_usage(self, index=True, deep=False): """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5216 """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) if index: result = Series(self.index.memory_usage(deep=deep), index=['Index']).append(result) return result def transpose(self, *args, **kwargs): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- copy : bool, default False If True, the underlying data is copied. Otherwise (default), no copy is made if possible. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, dict()) return super().transpose(1, 0, **kwargs) T = property(transpose) # ---------------------------------------------------------------------- # Picklability # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state columns = com._unpickle_array(cols) index = com._unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover # old unpickling (vals, idx, cols), object_state = state index = com._unpickle_array(idx) dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, columns=com._unpickle_array(ocols), copy=False) dm = dm.join(objects) self._data = dm._data # ---------------------------------------------------------------------- # Getting and setting elements def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable) def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) return com.maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine try: return engine.get_value(series._values, index) except KeyError: # GH 20629 if self.index.nlevels > 1: # partial indexing forbidden raise except (TypeError, ValueError): pass # we cannot handle direct indexing # use positional col = self.columns.get_loc(col) index = self.index.get_loc(index) return self._get_value(index, col, takeable=True) _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable) def _set_value(self, index, col, value, takeable=False): try: if takeable is True: series = self._iget_item_cache(col) return series._set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine engine.set_value(series._values, index, value) return self except (KeyError, TypeError): # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) return self _set_value.__doc__ = set_value.__doc__ def _ixs(self, i, axis=0): """ Parameters ---------- i : int, slice, or sequence of integers axis : int Notes ----- If slice passed, the resulting data will be a view. """ # irow if axis == 0: if isinstance(i, slice): return self[i] else: label = self.index[i] if isinstance(label, Index): # a location index by definition result = self.take(i, axis=axis) copy = True else: new_values = self._data.fast_xs(i) if is_scalar(new_values): return new_values # if we are a copy, mark as such copy = (isinstance(new_values, np.ndarray) and new_values.base is None) result = self._constructor_sliced(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] if isinstance(i, slice): # need to return view lab_slice = slice(label[0], label[-1]) return self.loc[:, lab_slice] else: if isinstance(label, Index): return self._take(i, axis=1) index_len = len(self.index) # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) if index_len and not len(values): values = np.array([np.nan] * index_len, dtype=object) result = self._box_col_values(values, label) # this is a cached value, mark it so result._set_as_cached(label, self) return result def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) # shortcut if the key is in columns try: if self.columns.is_unique and key in self.columns: if self.columns.nlevels > 1: return self._getitem_multilevel(key) return self._get_item_cache(key) except (TypeError, ValueError): # The TypeError correctly catches non hashable "key" (e.g. list) # The ValueError can be removed once GH #21729 is fixed pass # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self._getitem_frame(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True) # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): data = data[key] return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn("Boolean Series key will be reindexed to match " "DataFrame index.", UserWarning, stacklevel=3) elif len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d.' % (len(key), len(self.index))) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self._take(indexer, axis=0) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = self._constructor(new_values, index=self.index, columns=result_columns) result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == '': result = result[''] if isinstance(result, Series): result = self._constructor_sliced(result, index=self.index, name=key) result._set_is_copy(self) return result else: return self._get_item_cache(key) def _getitem_frame(self, key): if key.values.size and not is_bool_dtype(key.values): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) def query(self, expr, inplace=False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. .. versionadded:: 0.25.0 You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, str): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: new_data = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query new_data = self[res] if inplace: self._update_inplace(new_data) else: return new_data def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = \ self._get_space_character_free_column_resolvers() resolvers = column_resolvers, index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' raise TypeError(msg.format(typ=type(obj).__name__)) slices = [slice(None)] * obj.ndim slices[obj._info_axis_number] = indexer return tuple(slices) if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map( lambda x: frozenset(map(infer_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on {inc_ex}'.format( inc_ex=(include & exclude))) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(idx, dtype): return idx, functools.partial(issubclass, dtype.type) for idx, f in itertools.starmap(is_dtype_instance_mapper, enumerate(self.dtypes)): if include: # checks for the case of empty include or exclude include_these.iloc[idx] = any(map(f, include)) if exclude: exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[_get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: return self._box_col_values(values, items) def _box_col_values(self, values, items): """ Provide boxed values for a column. """ klass = self._constructor_sliced return klass(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key, value): self._check_setitem_copy() self.loc._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d!' % (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.loc._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: indexer = self.loc._convert_to_indexer(key, axis=1) self._check_setitem_copy() self.loc._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError( 'Array conditional must be same shape as self' ) key = self._constructor(key, **self._construct_axes_dict()) if key.values.size and not is_bool_dtype(key.values): raise TypeError( 'Must pass DataFrame or 2-d ndarray with boolean values only' ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _ensure_valid_index(self, value): """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan) def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data def _sanitize_column(self, key, value, broadcast=True): """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- key : object value : scalar, Series, or array-like broadcast : bool, default True If ``key`` matches multiple duplicate column names in the DataFrame, this parameter indicates whether ``value`` should be tiled so that the returned array contains a (duplicated) column for each occurrence of the key. If False, ``value`` will not be tiled. Returns ------- numpy.ndarray """ def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value._values.copy() else: # GH 4107 try: value = value.reindex(self.index)._values except Exception as e: # duplicate axis if not value.index.is_unique: raise e # other raise TypeError('incompatible index of inserted column ' 'with frame index') return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, ExtensionArray): # Explicitly copy here, instead of in sanitize_index, # as sanitize_index won't copy an EA, even with copy=True value = value.copy() value = sanitize_index(value, self.index, copy=False) elif isinstance(value, Index) or is_sequence(value): # turn me into an ndarray value = sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): value = value.copy(deep=True) else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = maybe_infer_to_datetimelike(value) else: # cast ignores pandas dtypes. so save the dtype first infer_dtype, _ = infer_dtype_from_scalar( value, pandas_dtype=True) # upcast value = cast_scalar_to_array(len(self.index), value) value = maybe_cast_to_datetime(value, infer_dtype) # return internal types directly if is_extension_type(value) or is_extension_array_dtype(value): return value # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: if (not self.columns.is_unique or isinstance(self.columns, MultiIndex)): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): return {item: Series(self._data.iget(idx), index=self.index, name=item) for idx, item in enumerate(self.columns)} def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Returns ------- numpy.ndarray Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, method, copy, level, fill_value, limit, tolerance) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, fill_value, limit, tolerance) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan, limit=None, tolerance=None): new_index, indexer = self.index.reindex(new_index, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_columns(self, new_columns, method, copy, level, fill_value=None, limit=None, tolerance=None): new_columns, indexer = self.columns.reindex(new_columns, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) @Appender(_shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): return super().align(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) @rewrite_axis_style_signature('labels', [('method', None), ('copy', True), ('level', None), ('fill_value', np.nan), ('limit', None), ('tolerance', None)]) def reindex(self, *args, **kwargs): axes = validate_axis_style_args(self, args, kwargs, 'labels', 'reindex') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('labels', None) return super().reindex(**kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): return super().reindex_axis(labels=labels, axis=axis, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors) @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False), ('level', None), ('errors', 'ignore')]) def rename(self, *args, **kwargs): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) return super().rename(**kwargs) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.fillna.__doc__) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): return super().fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, **kwargs) @Appender(_shared_docs['replace'] % _shared_doc_kwargs) def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad'): return super().replace(to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None): return super().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ Set the DataFrame index using existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and instances of :class:`~collections.abc.Iterator`. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). verify_integrity : bool, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method. Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 Create a MultiIndex using an Index and a column: >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 Create a MultiIndex using two Series: >>> s = pd.Series([1, 2, 3, 4]) >>> df.set_index([s, s**2]) month year sale 1 1 1 2012 55 2 4 4 2014 40 3 9 7 2013 84 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(keys, list): keys = [keys] err_msg = ('The parameter "keys" may be a column key, one-dimensional ' 'array, or a list containing only valid column keys and ' 'one-dimensional arrays.') missing = [] for col in keys: if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, 'ndim', 1) != 1: raise ValueError(err_msg) else: # everything else gets tried as a key; see GH 24969 try: found = col in self.columns except TypeError: raise TypeError(err_msg + ' Received column of ' 'type {}'.format(type(col))) else: if not found: missing.append(col) if missing: raise KeyError('None of {} are in the columns'.format(missing)) if inplace: frame = self else: frame = self.copy() arrays = [] names = [] if append: names = [x for x in self.index.names] if isinstance(self.index, ABCMultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: arrays.append(self.index) to_remove = [] for col in keys: if isinstance(col, ABCMultiIndex): for n in range(col.nlevels): arrays.append(col._get_level_values(n)) names.extend(col.names) elif isinstance(col, (ABCIndexClass, ABCSeries)): # if Index then not MultiIndex (treated above) arrays.append(col) names.append(col.name) elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) elif isinstance(col, abc.Iterator): arrays.append(list(col)) names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) names.append(col) if drop: to_remove.append(col) if len(arrays[-1]) != len(self): # check newest element against length of calling frame, since # ensure_index_from_sequences would not raise for append=False. raise ValueError('Length mismatch: Expected {len_self} rows, ' 'received array of length {len_col}'.format( len_self=len(self), len_col=len(arrays[-1]) )) index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() raise ValueError('Index has duplicate keys: {dup}'.format( dup=duplicates)) # use set to handle duplicate column names gracefully in case of drop for c in set(to_remove): del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): """ Reset the index, or a level of it. Reset the index of the DataFrame, and use the default one instead. If the DataFrame has a MultiIndex, this method can remove one or more levels. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column, and a new sequential index is used: >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, col_fill='genus') genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): values = index._values if not isinstance(index, (PeriodIndex, DatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's if mask.all(): values = np.empty(len(mask)) values.fill(np.nan) else: values = values.take(labels) # TODO(https://github.com/pandas-dev/pandas/issues/24206) # Push this into maybe_upcast_putmask? # We can't pass EAs there right now. Looks a bit # complicated. # So we unbox the ndarray_values, op, re-box. values_type = type(values) values_dtype = values.dtype if issubclass(values_type, DatetimeLikeArray): values = values._data if mask.any(): values, changed = maybe_upcast_putmask( values, mask, np.nan) if issubclass(values_type, DatetimeLikeArray): values = values_type(values, dtype=values_dtype) return values new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): names = [n if n is not None else ('level_%d' % i) for (i, n) in enumerate(self.index.names)] to_insert = zip(self.index.levels, self.index.codes) else: default = 'index' if 'index' not in self else 'level_0' names = ([default] if self.index.name is None else [self.index.name]) to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue name = names[i] if multi_col: col_name = (list(name) if isinstance(name, tuple) else [name]) if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError("col_fill=None is incompatible " "with incomplete column name " "{}".format(name)) col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) new_obj.insert(0, name, level_values) new_obj.index = new_index if not inplace: return new_obj # ---------------------------------------------------------------------- # Reindex-based selection methods @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isna(self): return super().isna() @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): return super().isnull() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notna(self): return super().notna() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): return super().notnull() def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): # GH20987 msg = ("supplying multiple axes to axis is deprecated and " "will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated] def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: return Series(dtype=bool) def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8', copy=False), len(shape) if subset is None: subset = self.columns elif (not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns): subset = subset, # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, keep), index=self.index) # ---------------------------------------------------------------------- # Sorting @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by (%d)' % (len(ascending), len(by))) if len(by) > 1: from pandas.core.sorting import lexsort_indexer keys = [self._get_label_or_level_values(x, axis=axis) for x in by] indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position) indexer = ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort by = by[0] k = self._get_label_or_level_values(by, axis=axis) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort(k, kind=kind, ascending=ascending, na_position=na_position) new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis), verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_index.__doc__) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): # TODO: this can be combined with Series.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') # 10726 if by is not None: warnings.warn("by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) axis = self._get_axis_number(axis) labels = self._get_axis(axis) # make sure that the axis is lexsorted to start # if not we need to reconstruct to get the correct indexer labels = labels._sort_levels_monotonic() if level is not None: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer(labels._get_codes_for_sorting(), orders=ascending, na_position=na_position) else: from pandas.core.sorting import nargsort # Check monotonic-ness before sort an index # GH11080 if ((ascending and labels.is_monotonic_increasing) or (not ascending and labels.is_monotonic_decreasing)): if inplace: return else: return self.copy() indexer = nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) baxis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- DataFrame .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result # ---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns def _arith_op(left, right): # for the mixed_type case where we iterate over columns, # _arith_op(left, right) is equivalent to # left._binop(right, func, fill_value=fill_value) left, right = ops.fill_binop(left, right, fill_value) return func(left, right) if ops.should_series_dispatch(this, other, func): # iterate over columns return ops.dispatch_to_series(this, other, _arith_op) else: result = _arith_op(this.values, other.values) return self._constructor(result, index=new_index, columns=new_columns, copy=False) def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) assert left.index.equals(right.index) if left._is_mixed_type or right._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` return ops.dispatch_to_series(left, right, func) else: # fastpath --> operate directly on values with np.errstate(all="ignore"): new_data = func(left.values.T, right.values).T return self._constructor(new_data, index=left.index, columns=self.columns, copy=False) def _combine_match_columns(self, other, func, level=None): assert isinstance(other, Series) left, right = self.align(other, join='outer', axis=1, level=level, copy=False) assert left.columns.equals(right.index) return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func): assert lib.is_scalar(other) or np.ndim(other) == 0 return ops.dispatch_to_series(self, other, func) def combine(self, other, func, fill_value=None, overwrite=True): """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unnecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) arr = func(series, otherSeries) arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) @deprecate_kwarg(old_arg_name='raise_conflict', new_arg_name='errors', mapping={False: 'ignore', True: 'raise'}) def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either " "'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unnecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Data reshaping _shared_docs['pivot'] = """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation, multiple values will result in a MultiIndex in the columns. See the :ref:`User Guide <reshaping>` for more on reshaping. Parameters ----------%s index : string or object, optional Column to use to make new frame's index. If None, uses existing index. columns : string or object Column to use to make new frame's columns. values : string, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. .. versionchanged :: 0.23.0 Also accept list of column names. Returns ------- DataFrame Returns reshaped DataFrame. Raises ------ ValueError: When there are any `index`, `columns` combinations with multiple values. `DataFrame.pivot_table` when you need to aggregate. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods. Examples -------- >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz') bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) baz zoo bar A B C A B C foo one 1 2 3 x y z two 4 5 6 q w t A ValueError is raised if there are any duplicates. >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 Notice that the first two rows are the same for our `index` and `columns` arguments. >>> df.pivot(index='foo', columns='bar', values='baz') Traceback (most recent call last): ... ValueError: Index contains duplicate entries, cannot reshape """ @Substitution('') @Appender(_shared_docs['pivot']) def pivot(self, index=None, columns=None, values=None): from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs['pivot_table'] = """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ----------%s values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) If dict is passed, the key is column to aggregate and value is function or list of functions fill_value : scalar, default None Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna : boolean, default True Do not include columns whose entries are all NaN margins_name : string, default 'All' Name of the row / column that will contain the totals when margins is True. observed : boolean, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionchanged :: 0.25.0 Returns ------- DataFrame See Also -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 We can also fill missing values using the `fill_value` parameter. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 The next example aggregates by taking the mean across multiple columns. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': np.mean}) >>> table D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 foo large 2.000000 4.500000 small 2.333333 4.333333 We can also calculate multiple types of aggregations for any given value column. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E mean max mean min A C bar large 5.500000 9.0 7.500000 6.0 small 5.500000 9.0 8.500000 8.0 foo large 2.000000 5.0 4.500000 4.0 small 2.333333 6.0 4.333333 2.0 """ @Substitution('') @Appender(_shared_docs['pivot_table']) def pivot_table(self, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All', observed=False): from pandas.core.reshape.pivot import pivot_table return pivot_table(self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed) def stack(self, level=-1, dropna=True): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Parameters ---------- level : int, str, list, default -1 Level(s) to stack from the column axis onto the index axis, defined as one index or label, or a list of indices or labels. dropna : bool, default True Whether to drop rows in the resulting Frame/Series with missing values. Stacking a column level onto the index axis can create combinations of index and column values that are missing from the original dataframe. See Examples section. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack() cat weight 0 height 1 dog weight 2 height 3 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack() height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN **Prescribing the level(s) to be stacked** The first parameter controls which level or levels are stacked: >>> df_multi_level_cols2.stack(0) kg m cat height NaN 2.0 weight 1.0 NaN dog height NaN 4.0 weight 3.0 NaN >>> df_multi_level_cols2.stack([0, 1]) cat height m 2.0 weight kg 1.0 dog height m 4.0 weight kg 3.0 dtype: float64 **Dropping missing values** >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]], ... index=['cat', 'dog'], ... columns=multicol2) Note that rows where all values are missing are dropped by default but this behaviour can be controlled via the dropna keyword parameter: >>> df_multi_level_cols3 weight height kg m cat NaN 1.0 dog 2.0 3.0 >>> df_multi_level_cols3.stack(dropna=False) height weight cat kg NaN NaN m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN >>> df_multi_level_cols3.stack(dropna=True) height weight cat m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN """ from pandas.core.reshape.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values .. versionadded:: 0.18.0 Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) _shared_docs['melt'] = (""" Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. %(versionadded)s Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar Name to use for the 'variable' column. If None it uses ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' Name to use for the 'value' column. col_level : int or string, optional If columns are a MultiIndex then use this level to melt. Returns ------- DataFrame Unpivoted DataFrame. See Also -------- %(other)s pivot_table DataFrame.pivot Examples -------- >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)sid_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 1 b B 3 2 c B 5 3 a C 2 4 b C 4 5 c C 6 The names of 'variable' and 'value' columns can be customized: >>> %(caller)sid_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] >>> df A B C D E F 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) (A, D) variable_0 variable_1 value 0 a B E 1 1 b B E 3 2 c B E 5 """) @Appender(_shared_docs['melt'] % dict(caller='df.melt(', versionadded='.. versionadded:: 0.20.0\n', other='melt')) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): from pandas.core.reshape.melt import melt return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level) # ---------------------------------------------------------------------- # Time series-related def diff(self, periods=1, axis=0): """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : {0 or 'index', 1 or 'columns'}, default 0 Take difference over rows (0) or columns (1). .. versionadded:: 0.16.1. Returns ------- DataFrame See Also -------- Series.diff: First discrete difference for a Series. DataFrame.pct_change: Percent change over given number of periods. DataFrame.shift: Shift index by desired number of periods with an optional time freq. Examples -------- Difference with previous row >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(axis=1) a b c 0 NaN 0.0 0.0 1 NaN -1.0 3.0 2 NaN -1.0 7.0 3 NaN -1.0 13.0 4 NaN 0.0 20.0 5 NaN 2.0 28.0 Difference with 3rd previous row >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ bm_axis = self._get_block_manager_axis(axis) new_data = self._data.diff(n=periods, axis=bm_axis) return self._constructor(new_data) # ---------------------------------------------------------------------- # Function application def _gotitem(self, key: Union[str, List[str]], ndim: int, subset: Optional[Union[Series, ABCDataFrame]] = None, ) -> Union[Series, ABCDataFrame]: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if subset is None: subset = self elif subset.ndim == 1: # is Series return subset # TODO: _shallow_copy(subset)? return subset[key] _agg_summary_and_see_also_doc = dedent(""" The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, `var`), where the default is to compute the aggregation of the flattened array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. core.groupby.GroupBy : Perform operations over groups. core.resample.Resampler : Perform operations over resampled bins. core.window.Rolling : Perform operations over rolling window. core.window.Expanding : Perform operations over expanding window. core.window.EWM : Perform operation over exponential weighted window. """) _agg_examples_doc = dedent(""" Examples -------- >>> df = pd.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) Aggregate these functions over the rows. >>> df.agg(['sum', 'min']) A B C sum 12.0 15.0 18.0 min 1.0 2.0 3.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN Aggregate over the columns. >>> df.agg("mean", axis="columns") 0 2.0 1 5.0 2 8.0 3 NaN dtype: float64 """) @Substitution(see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, versionadded='\n.. versionadded:: 0.20.0\n', **_shared_doc_kwargs) @Appender(_shared_docs['aggregate']) def aggregate(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) result = None try: result, how = self._aggregate(func, axis=axis, *args, **kwargs) except TypeError: pass if result is None: return self.apply(func, axis=axis, args=args, **kwargs) return result def _aggregate(self, arg, axis=0, *args, **kwargs): if axis == 1: # NDFrame.aggregate returns a tuple, and we need to transpose # only result result, how = self.T._aggregate(arg, *args, **kwargs) result = result.T if result is not None else result return result, how return super()._aggregate(arg, *args, **kwargs) agg = aggregate @Appender(_shared_docs['transform'] % _shared_doc_kwargs) def transform(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) if axis == 1: return self.T.transform(func, *args, **kwargs).T return super().transform(func, *args, **kwargs) def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. broadcast : bool, optional Only relevant for aggregation functions: * ``False`` or ``None`` : returns a Series whose length is the length of the index or the number of columns (based on the `axis` parameter) * ``True`` : results will be broadcast to the original shape of the frame, the original index and columns will be retained. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. reduce : bool or None, default None Try to apply reduction procedures. If the DataFrame is empty, `apply` will use `reduce` to determine whether the result should be a Series or a DataFrame. If ``reduce=None`` (the default), `apply`'s return value will be guessed by calling `func` on an empty Series (note: while guessing, exceptions raised by `func` will be ignored). If ``reduce=True`` a Series will always be returned, and if ``reduce=False`` a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by ``result_type='reduce'``. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- In the current implementation apply calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing result_type='expand' will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, args=args, kwds=kwds) return op.get_result() def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Notes ----- In the current implementation applymap calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func) return lib.map_infer(x.astype(object).values, func) return self.apply(infer) # ---------------------------------------------------------------------- # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False, sort=None): """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame or Series objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') if other.name is None: index = None else: # other must have the same index name as self, otherwise # index name will be reset index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) try: combined_columns = self.columns.append(idx_diff) except TypeError: combined_columns = self.columns.astype(object).append(idx_diff) other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns) other = other._convert(datetime=True, timedelta=True) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): from pandas.core.reshape.merge import merge from pandas.core.reshape.concat import concat if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) # join indexes only using concat if can_concat: if how == 'left': how = 'outer' join_axes = [self.index] else: join_axes = None return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None): from pandas.core.reshape.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate) def round(self, decimals=0, *args, **kwargs): """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. *args Additional keywords have no effect but might be accepted for compatibility with numpy. **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame A DataFrame with the affected columns rounded to the specified number of decimal places. See Also -------- numpy.around : Round a numpy array to the given number of decimals. Series.round : Round a Series to the given number of decimals. Examples -------- >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.21 0.32 1 0.01 0.67 2 0.66 0.03 3 0.21 0.18 By providing an integer each column is rounded to the same number of decimal places >>> df.round(1) dogs cats 0 0.2 0.3 1 0.0 0.7 2 0.7 0.0 3 0.2 0.2 With a dict, the number of places for specific columns can be specified with the column names as key and the number of decimal places as value >>> df.round({'dogs': 1, 'cats': 0}) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 Using a Series, the number of places for specific columns can be specified with the column names as index and the number of decimal places as value >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) >>> df.round(decimals) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 """ from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.iteritems(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(s, decimals): if is_integer_dtype(s) or is_float_dtype(s): return s.round(decimals) return s nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series): if not decimals.index.is_unique: raise ValueError("Index of decimals must be unique") new_cols = [col for col in _dict_round(self, decimals)] elif is_integer(decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.iteritems()] else: raise TypeError("decimals must be an integer, a dict-like or a " "Series") if len(new_cols) > 0: return self._constructor(concat(new_cols, axis=1), index=self.index, columns=self.columns) else: return self # ---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method)) return self._constructor(correl, index=idx, columns=cols) def cov(self, min_periods=None): """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. core.window.EWM.cov: Exponential weighted sample covariance. core.window.Expanding.cov : Expanding sample covariance. core.window.Rolling.cov : Rolling sample covariance. Notes ----- Returns the covariance matrix of the DataFrame's time series. The covariance is normalized by N-1. For DataFrames that have Series that are missing data (assuming that data is `missing at random <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__) the returned covariance matrix will be an unbiased estimate of the variance and covariance between the member Series. However, for many applications this estimate may not be acceptable because the estimate covariance matrix is not guaranteed to be positive semi-definite. This could lead to estimate correlations having absolute values which are greater than one, and/or a non-invertible covariance matrix. See `Estimation of covariance matrices <http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_ matrices>`__ for more details. Examples -------- >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> df.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if notna(mat).all(): if min_periods is not None and min_periods > len(mat): baseCov = np.empty((mat.shape[1], mat.shape[1])) baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=idx, columns=cols) def corrwith(self, other, axis=0, drop=False, method='pearson'): """ Compute pairwise correlation between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. Parameters ---------- other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float .. versionadded:: 0.24.0 Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr """ axis = self._get_axis_number(axis) this = self._get_numeric_data() if isinstance(other, Series): return this.apply(lambda x: other.corr(x, method=method), axis=axis) other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) if axis == 1: left = left.T right = right.T if method == 'pearson': # mask missing values left = left + right * 0 right = right + left * 0 # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom elif method in ['kendall', 'spearman'] or callable(method): def c(x): return nanops.nancorr(x[0], x[1], method=method) correl = Series(map(c, zip(left.values.T, right.values.T)), index=left.columns) else: raise ValueError("Invalid method {method} was passed, " "valid methods are: 'pearson', 'kendall', " "'spearman', or callable". format(method=method)) if not drop: # Find non-matching labels along the given axis # and append missing correlations (GH 22375) raxis = 1 if axis == 0 else 0 result_index = (this._get_axis(raxis). union(other._get_axis(raxis))) idx_diff = result_index.difference(correl.index) if len(idx_diff) > 0: correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff)) return correl # ---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): """ Count non-NA cells for each column or row. The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending on `pandas.options.mode.use_inf_as_na`) are considered NA. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index' counts are generated for each column. If 1 or 'columns' counts are generated for each **row**. level : int or str, optional If the axis is a `MultiIndex` (hierarchical), count along a particular `level`, collapsing into a `DataFrame`. A `str` specifies the level name. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- Series or DataFrame For each column/row the number of non-NA/null entries. If `level` is specified returns a `DataFrame`. See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = pd.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 Counts for each **row**: >>> df.count(axis='columns') 0 3 1 2 2 3 3 3 4 3 dtype: int64 Counts for one level of a `MultiIndex`: >>> df.set_index(["Person", "Single"]).count(level="Person") Age Person John 2 Lewis 1 Myla 1 """ axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type or frame._data.any_extension_types: # the or any_extension_types is really only hit for single- # column frames with an extension array result = notna(frame).sum(axis=axis) else: # GH13407 series_counts = notna(frame).sum(axis=axis) counts = series_counts.values result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical " "{ax}.".format(ax=self._get_axis_name(axis))) if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notna(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, str): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): if axis is None and filter_type == 'bool': labels = None constructor = None else: # TODO: Make other agg func handle axis=None properly axis = self._get_axis_number(axis) labels = self._get_agg_axis(axis) constructor = self._constructor def f(x): return op(x, axis=axis, skipna=skipna, **kwds) # exclude timedelta/datetime unless we are uniform types if (axis == 1 and self._is_datelike_mixed_type and (not self._is_homogeneous_type and not is_datetime64tz_dtype(self.dtypes[0]))): numeric_only = True if numeric_only is None: try: values = self.values result = f(values) if (filter_type == 'bool' and is_object_dtype(values) and axis is None): # work around https://github.com/numpy/numpy/issues/10489 # TODO: combine with hasattr(result, 'dtype') further down # hard since we don't have `values` down there. result = np.bool_(result) except Exception as e: # try by-column first if filter_type is None and axis == 0: try: # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series # we only end up here if we have not specified # numeric_only and yet we have tried a # column-by-column reduction, where we have mixed type. # So let's just do what we can from pandas.core.apply import frame_apply opa = frame_apply(self, func=f, result_type='expand', ignore_failures=True) result = opa.get_result() if result.ndim == self.ndim: result = result.iloc[0] return result except Exception: pass if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover e = NotImplementedError( "Handling exception with filter_type {f} not" "implemented.".format(f=filter_type)) raise_with_traceback(e) with np.errstate(all='ignore'): result = f(data.values) labels = data._get_agg_axis(axis) else: if numeric_only: if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': # GH 25101, # GH 24434 data = self._get_bool_data() if axis == 0 else self else: # pragma: no cover msg = ("Generating numeric_only data with filter_type {f}" "not supported.".format(f=filter_type)) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) else: values = self.values result = f(values) if hasattr(result, 'dtype') and is_object_dtype(result.dtype): try: if filter_type is None or filter_type == 'numeric': result = result.astype(np.float64) elif filter_type == 'bool' and notna(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = coerce_to_dtypes(result, self.dtypes) if constructor is not None: result = Series(result, index=labels) return result def nunique(self, axis=0, dropna=True): """ Count distinct observations over requested axis. Return Series with number of distinct observations. Can ignore NaN values. .. versionadded:: 0.20.0 Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64 """ return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin Notes ----- This method is the DataFrame version of ``ndarray.argmin``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of maxima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmax Notes ----- This method is the DataFrame version of ``ndarray.argmax``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): """ Let's be explicit about this. """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False, dropna=True): """ Get the mode(s) of each element along the selected axis. The mode of a set of values is the value that appears most often. It can be multiple values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to iterate over while searching for the mode: * 0 or 'index' : get mode of each column * 1 or 'columns' : get mode of each row numeric_only : bool, default False If True, only apply to numeric columns. dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- DataFrame The modes of each column or row. See Also -------- Series.mode : Return the highest frequency value in a Series. Series.value_counts : Return the counts of values in a Series. Examples -------- >>> df = pd.DataFrame([('bird', 2, 2), ... ('mammal', 4, np.nan), ... ('arthropod', 8, 0), ... ('bird', 2, np.nan)], ... index=('falcon', 'horse', 'spider', 'ostrich'), ... columns=('species', 'legs', 'wings')) >>> df species legs wings falcon bird 2 2.0 horse mammal 4 NaN spider arthropod 8 0.0 ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings are both 0 and 2. The second row of species and legs contains ``NaN``, because they have only one mode, but the DataFrame has two rows. >>> df.mode() species legs wings 0 bird 2.0 0.0 1 NaN NaN 2.0 Setting ``dropna=False`` ``NaN`` values are considered and they can be the mode (like for wings). >>> df.mode(dropna=False) species legs wings 0 bird 2 NaN Setting ``numeric_only=True``, only the mode of numeric columns is computed, and columns of other types are ignored. >>> df.mode(numeric_only=True) legs wings 0 2.0 0.0 1 NaN 2.0 To compute the mode over columns and not rows, use the axis parameter: >>> df.mode(axis='columns', numeric_only=True) 0 1 falcon 2.0 NaN horse 4.0 NaN spider 0.0 8.0 ostrich 2.0 NaN """ data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode(dropna=dropna) return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear'): """ Return values at the given quantile over requested axis. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value between 0 <= q <= 1, the quantile(s) to compute. axis : {0, 1, 'index', 'columns'} (default 0) Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. .. versionadded:: 0.18.0 Returns ------- Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also -------- core.window.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), ... columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 Name: 0.1, dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. >>> df = pd.DataFrame({'A': [1, 2], ... 'B': [pd.Timestamp('2010'), ... pd.Timestamp('2011')], ... 'C': [pd.Timedelta('1 days'), ... pd.Timedelta('2 days')]}) >>> df.quantile(0.5, numeric_only=False) A 1.5 B 2010-07-02 12:00:00 C 1 days 12:00:00 Name: 0.5, dtype: object """ self._check_percentile(q) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 if is_transposed: data = data.T result = data._data.quantile(qs=q, axis=1, interpolation=interpolation, transposed=is_transposed) if result.ndim == 2: result = self._constructor(result) else: result = self._constructor_sliced(result, name=q) if is_transposed: result = result.T return result def to_timestamp(self, freq=None, how='start', axis=0, copy=True): """ Cast to DatetimeIndex of timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with DatetimeIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how)) elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( ax=axis)) return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): """ Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- TimeSeries with PeriodIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( ax=axis)) return self._constructor(new_data) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dict The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dict, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. See Also -------- DataFrame.eq: Equality test for DataFrame. Series.isin: Equivalent method on Series. Series.str.contains: Test if pattern or regex is contained within a string of a Series or Index. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True When ``values`` is a Series or DataFrame the index and column must match. Note that 'falcon' does not match based on the number of legs in df2. >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]}, ... index=['spider', 'falcon']) >>> df.isin(other) num_legs num_wings falcon True True dog False False """ if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are " "allowed to be passed to DataFrame.isin(), " "you passed a " "{0!r}".format(type(values).__name__)) return DataFrame( algorithms.isin(self.values.ravel(), values).reshape(self.shape), self.index, self.columns) # ---------------------------------------------------------------------- # Add plotting methods to DataFrame plot = CachedAccessor("plot", pandas.plotting.FramePlotMethods) hist = pandas.plotting.hist_frame boxplot = pandas.plotting.boxplot_frame sparse = CachedAccessor("sparse", SparseFrameAccessor) DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}, docs={ 'index': 'The index (row labels) of the DataFrame.', 'columns': 'The column labels of the DataFrame.'}) DataFrame._add_numeric_operations() DataFrame._add_series_or_dataframe_operations() ops.add_flex_arithmetic_methods(DataFrame) ops.add_special_arithmetic_methods(DataFrame) def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() for index, s in data.items(): for col, v in s.items(): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data def _put_str(s, space): return '{s}'.format(s=s)[:space].ljust(space)
from copy import deepcopy from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal) from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestDataFrame(Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x, y) def test_rename_mi(self): df = DataFrame([ 11, 21, 31 ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) df.rename(str.lower) def test_set_axis_name(self): df = pd.DataFrame([[1, 2], [3, 4]]) funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, 'foo')(df) assert df.index.name is None assert result.index.name == 'foo' result = methodcaller(func, 'cols', axis=1)(df) assert df.columns.name is None assert result.columns.name == 'cols' def test_set_axis_name_mi(self): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) ) level_names = ['L1', 'L2'] funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): # allow single item via bool method df = DataFrame([[True]]) assert df.bool() df = DataFrame([[False]]) assert not df.bool() df = DataFrame([[False, False]]) with pytest.raises(ValueError): df.bool() with pytest.raises(ValueError): bool(df) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = DataFrame({'A': [1, '2', 3.]}) result = o._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) self._compare(result, expected) def test_metadata_propagation_indiv(self): # groupby df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) result = df.groupby('A').sum() self.check_metadata(df, result) # resample df = DataFrame(np.random.randn(1000, 2), index=date_range('20130101', periods=1000, freq='s')) result = df.resample('1T') self.check_metadata(df, result) # merging with override # GH 6923 _metadata = DataFrame._metadata _finalize = DataFrame.__finalize__ np.random.seed(10) df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) DataFrame._metadata = ['filename'] df1.filename = 'fname1.csv' df2.filename = 'fname2.csv' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'merge': left, right = other.left, other.right value = getattr(left, name, '') + '|' + getattr(right, name, '') object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, '')) return self DataFrame.__finalize__ = finalize result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') assert result.filename == 'fname1.csv|fname2.csv' # concat # GH 6927 DataFrame._metadata = ['filename'] df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) df1.filename = 'foo' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self DataFrame.__finalize__ = finalize result = pd.concat([df1, df1]) assert result.filename == 'foo+foo' # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({'x': [1, 2, 3]}) df.y = 2 df['y'] = [2, 4, 6] df.y = 5 assert df.y == 5 assert_series_equal(df['y'], Series([2, 4, 6], name='y')) @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'CategoricalIndex', 'TimedeltaIndex']) def test_to_xarray_index_types(self, index): from xarray import Dataset index = getattr(tm, 'make{}'.format(index)) df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index = index(3) df.index.name = 'foo' df.columns.name = 'bar' result = df.to_xarray() assert result.dims['foo'] == 3 assert len(result.coords) == 1 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, Dataset) # idempotency # categoricals are not preserved # datetimes w/tz are not preserved # column names are lost expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import Dataset df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index.name = 'foo' result = df[0:0].to_xarray() assert result.dims['foo'] == 0 assert isinstance(result, Dataset) # available in 0.7.1 # MultiIndex df.index = pd.MultiIndex.from_product([['a'], range(3)], names=['one', 'two']) result = df.to_xarray() assert result.dims['one'] == 1 assert result.dims['two'] == 3 assert len(result.coords) == 2 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result, expected, check_index_type=False) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=['A']) empty_frame_copy = deepcopy(empty_frame) self._compare(empty_frame_copy, empty_frame)
cbertinato/pandas
pandas/tests/generic/test_frame.py
pandas/core/frame.py
""" manage PyTables query interface via Expressions """ import ast from functools import partial import numpy as np from pandas._libs.tslibs import Timedelta, Timestamp from pandas.compat.chainmap import DeepChainMap from pandas.core.dtypes.common import is_list_like import pandas as pd from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation import expr, ops from pandas.core.computation.common import _ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import UndefinedVariableError, is_term from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded class Scope(expr.Scope): __slots__ = 'queryables', def __init__(self, level, global_dict=None, local_dict=None, queryables=None): super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) self.queryables = queryables or dict() class Term(ops.Term): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls supr_new = StringMixin.__new__ return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): super().__init__(name, env, side=side, encoding=encoding) def _resolve_name(self): # must be a queryables if self.side == 'left': if self.name not in self.env.queryables: raise NameError('name {name!r} is not defined' .format(name=self.name)) return self.name # resolve the rhs (and allow it to be None) try: return self.env.resolve(self.name, is_local=False) except UndefinedVariableError: return self.name # read-only property overwriting read/write property @property # type: ignore def value(self): return self._value class Constant(Term): def __init__(self, value, env, side=None, encoding=None): super().__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name class BinOp(ops.BinOp): _max_selectors = 31 def __init__(self, op, lhs, rhs, queryables, encoding): super().__init__(op, lhs, rhs) self.queryables = queryables self.encoding = encoding self.filter = None self.condition = None def _disallow_scalar_only_bool_ops(self): pass def prune(self, klass): def pr(left, right): """ create and return a new specialized BinOp from myself """ if left is None: return right elif right is None: return left k = klass if isinstance(left, ConditionBinOp): if (isinstance(left, ConditionBinOp) and isinstance(right, ConditionBinOp)): k = JointConditionBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right elif isinstance(left, FilterBinOp): if (isinstance(left, FilterBinOp) and isinstance(right, FilterBinOp)): k = JointFilterBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right return k(self.op, left, right, queryables=self.queryables, encoding=self.encoding).evaluate() left, right = self.lhs, self.rhs if is_term(left) and is_term(right): res = pr(left.value, right.value) elif not is_term(left) and is_term(right): res = pr(left.prune(klass), right.value) elif is_term(left) and not is_term(right): res = pr(left.value, right.prune(klass)) elif not (is_term(left) or is_term(right)): res = pr(left.prune(klass), right.prune(klass)) return res def conform(self, rhs): """ inplace conform rhs """ if not is_list_like(rhs): rhs = [rhs] if isinstance(rhs, np.ndarray): rhs = rhs.ravel() return rhs @property def is_valid(self): """ return True if this is a valid field """ return self.lhs in self.queryables @property def is_in_table(self): """ return True if this is a valid column name for generation (e.g. an actual column in the table) """ return self.queryables.get(self.lhs) is not None @property def kind(self): """ the kind of my field """ return getattr(self.queryables.get(self.lhs), 'kind', None) @property def meta(self): """ the meta of my field """ return getattr(self.queryables.get(self.lhs), 'meta', None) @property def metadata(self): """ the metadata of my field """ return getattr(self.queryables.get(self.lhs), 'metadata', None) def generate(self, v): """ create and return the op string for this TermValue """ val = v.tostring(self.encoding) return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val) def convert_value(self, v): """ convert the expression that is in the term to something that is accepted by pytables """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) kind = _ensure_decoded(self.kind) meta = _ensure_decoded(self.meta) if kind == 'datetime64' or kind == 'datetime': if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == 'timedelta64' or kind == 'timedelta': v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == 'category': metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata # check that metadata contains v if not result and v not in metadata: result = -1 return TermValue(result, result, 'integer') elif kind == 'integer': v = int(float(v)) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = not v.strip().lower() in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): # string quoting return TermValue(v, stringify(v), 'string') else: raise TypeError("Cannot compare {v} of type {typ} to {kind} column" .format(v=v, typ=type(v), kind=kind)) def convert_values(self): pass class FilterBinOp(BinOp): def __str__(self): return pprint_thing("[Filter : [{lhs}] -> [{op}]" .format(lhs=self.filter[0], op=self.filter[1])) def invert(self): """ invert the filter """ if self.filter is not None: f = list(self.filter) f[1] = self.generate_filter_op(invert=True) self.filter = tuple(f) return self def format(self): """ return the actual filter format """ return [self.filter] def evaluate(self): if not self.is_valid: raise ValueError("query term is not valid [{slf}]" .format(slf=self)) rhs = self.conform(self.rhs) values = [TermValue(v, v, self.kind).value for v in rhs] if self.is_in_table: # if too many values to create the expression, use a filter instead if self.op in ['==', '!='] and len(values) > self._max_selectors: filter_op = self.generate_filter_op() self.filter = ( self.lhs, filter_op, pd.Index(values)) return self return None # equality conditions if self.op in ['==', '!=']: filter_op = self.generate_filter_op() self.filter = ( self.lhs, filter_op, pd.Index(values)) else: raise TypeError("passing a filterable condition to a non-table " "indexer [{slf}]".format(slf=self)) return self def generate_filter_op(self, invert=False): if (self.op == '!=' and not invert) or (self.op == '==' and invert): return lambda axis, vals: ~axis.isin(vals) else: return lambda axis, vals: axis.isin(vals) class JointFilterBinOp(FilterBinOp): def format(self): raise NotImplementedError("unable to collapse Joint Filters") def evaluate(self): return self class ConditionBinOp(BinOp): def __str__(self): return pprint_thing("[Condition : [{cond}]]" .format(cond=self.condition)) def invert(self): """ invert the condition """ # if self.condition is not None: # self.condition = "~(%s)" % self.condition # return self raise NotImplementedError("cannot use an invert condition when " "passing to numexpr") def format(self): """ return the actual ne format """ return self.condition def evaluate(self): if not self.is_valid: raise ValueError("query term is not valid [{slf}]" .format(slf=self)) # convert values if we are in the table if not self.is_in_table: return None rhs = self.conform(self.rhs) values = [self.convert_value(v) for v in rhs] # equality conditions if self.op in ['==', '!=']: # too many values to create the expression? if len(values) <= self._max_selectors: vs = [self.generate(v) for v in values] self.condition = "({cond})".format(cond=' | '.join(vs)) # use a filter after reading else: return None else: self.condition = self.generate(values[0]) return self class JointConditionBinOp(ConditionBinOp): def evaluate(self): self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition, op=self.op, rhs=self.rhs.condition) return self class UnaryOp(ops.UnaryOp): def prune(self, klass): if self.op != '~': raise NotImplementedError("UnaryOp only support invert type ops") operand = self.operand operand = operand.prune(klass) if operand is not None: if issubclass(klass, ConditionBinOp): if operand.condition is not None: return operand.invert() elif issubclass(klass, FilterBinOp): if operand.filter is not None: return operand.invert() return None _op_classes = {'unary': UnaryOp} class ExprVisitor(BaseExprVisitor): const_type = Constant term_type = Term def __init__(self, env, engine, parser, **kwargs): super().__init__(env, engine, parser) for bin_op in self.binary_ops: bin_node = self.binary_op_nodes_map[bin_op] setattr(self, 'visit_{node}'.format(node=bin_node), lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs)) def visit_UnaryOp(self, node, **kwargs): if isinstance(node.op, (ast.Not, ast.Invert)): return UnaryOp('~', self.visit(node.operand)) elif isinstance(node.op, ast.USub): return self.const_type(-self.visit(node.operand).value, self.env) elif isinstance(node.op, ast.UAdd): raise NotImplementedError('Unary addition not supported') def visit_Index(self, node, **kwargs): return self.visit(node.value).value def visit_Assign(self, node, **kwargs): cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]) return self.visit(cmpr) def visit_Subscript(self, node, **kwargs): # only allow simple subscripts value = self.visit(node.value) slobj = self.visit(node.slice) try: value = value.value except AttributeError: pass try: return self.const_type(value[slobj], self.env) except TypeError: raise ValueError("cannot subscript {value!r} with " "{slobj!r}".format(value=value, slobj=slobj)) def visit_Attribute(self, node, **kwargs): attr = node.attr value = node.value ctx = node.ctx.__class__ if ctx == ast.Load: # resolve the value resolved = self.visit(value) # try to get the value to see if we are another expression try: resolved = resolved.value except (AttributeError): pass try: return self.term_type(getattr(resolved, attr), self.env) except AttributeError: # something like datetime.datetime where scope is overridden if isinstance(value, ast.Name) and value.id == attr: return resolved raise ValueError("Invalid Attribute context {name}" .format(name=ctx.__name__)) def translate_In(self, op): return ast.Eq() if isinstance(op, ast.In) else op def _rewrite_membership_op(self, node, left, right): return self.visit(node.op), node.op, left, right def _validate_where(w): """ Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). """ if not (isinstance(w, (Expr, str)) or is_list_like(w)): raise TypeError("where must be passed as a string, Expr, " "or list-like of Exprs") return w class Expr(expr.Expr): """ hold a pytables like expression, comprised of possibly multiple 'terms' Parameters ---------- where : string term expression, Expr, or list-like of Exprs queryables : a "kinds" map (dict of column name -> kind), or None if column is non-indexable encoding : an encoding that will encode the query terms Returns ------- an Expr object Examples -------- 'index>=date' "columns=['A', 'D']" 'columns=A' 'columns==A' "~(columns=['A','B'])" 'index>df.index[3] & string="bar"' '(index>df.index[3] & index<=df.index[6]) | string="bar"' "ts>=Timestamp('2012-02-01')" "major_axis>=20130101" """ def __init__(self, where, queryables=None, encoding=None, scope_level=0): where = _validate_where(where) self.encoding = encoding self.condition = None self.filter = None self.terms = None self._visitor = None # capture the environment if needed local_dict = DeepChainMap() if isinstance(where, Expr): local_dict = where.env.scope where = where.expr elif isinstance(where, (list, tuple)): for idx, w in enumerate(where): if isinstance(w, Expr): local_dict = w.env.scope else: w = _validate_where(w) where[idx] = w where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa self.expr = where self.env = Scope(scope_level + 1, local_dict=local_dict) if queryables is not None and isinstance(self.expr, str): self.env.queryables.update(queryables) self._visitor = ExprVisitor(self.env, queryables=queryables, parser='pytables', engine='pytables', encoding=encoding) self.terms = self.parse() def __str__(self): if self.terms is not None: return pprint_thing(self.terms) return pprint_thing(self.expr) def evaluate(self): """ create and return the numexpr condition and filter """ try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid condition".format(expr=self.expr, slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid filter".format(expr=self.expr, slf=self)) return self.condition, self.filter class TermValue: """ hold a term value the we use to construct a condition/filter """ def __init__(self, value, converted, kind): self.value = value self.converted = converted self.kind = kind def tostring(self, encoding): """ quote the string if not encoded else encode and return """ if self.kind == 'string': if encoding is not None: return self.converted return '"{converted}"'.format(converted=self.converted) elif self.kind == 'float': # python 2 str(float) is not always # round-trippable so use repr() return repr(self.converted) return self.converted def maybe_expression(s): """ loose checking if s is a pytables-acceptable expression """ if not isinstance(s, str): return False ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',) # make sure we have an op at least return any(op in s for op in ops)
from copy import deepcopy from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal) from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestDataFrame(Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x, y) def test_rename_mi(self): df = DataFrame([ 11, 21, 31 ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) df.rename(str.lower) def test_set_axis_name(self): df = pd.DataFrame([[1, 2], [3, 4]]) funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, 'foo')(df) assert df.index.name is None assert result.index.name == 'foo' result = methodcaller(func, 'cols', axis=1)(df) assert df.columns.name is None assert result.columns.name == 'cols' def test_set_axis_name_mi(self): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) ) level_names = ['L1', 'L2'] funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): # allow single item via bool method df = DataFrame([[True]]) assert df.bool() df = DataFrame([[False]]) assert not df.bool() df = DataFrame([[False, False]]) with pytest.raises(ValueError): df.bool() with pytest.raises(ValueError): bool(df) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = DataFrame({'A': [1, '2', 3.]}) result = o._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) self._compare(result, expected) def test_metadata_propagation_indiv(self): # groupby df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) result = df.groupby('A').sum() self.check_metadata(df, result) # resample df = DataFrame(np.random.randn(1000, 2), index=date_range('20130101', periods=1000, freq='s')) result = df.resample('1T') self.check_metadata(df, result) # merging with override # GH 6923 _metadata = DataFrame._metadata _finalize = DataFrame.__finalize__ np.random.seed(10) df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) DataFrame._metadata = ['filename'] df1.filename = 'fname1.csv' df2.filename = 'fname2.csv' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'merge': left, right = other.left, other.right value = getattr(left, name, '') + '|' + getattr(right, name, '') object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, '')) return self DataFrame.__finalize__ = finalize result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') assert result.filename == 'fname1.csv|fname2.csv' # concat # GH 6927 DataFrame._metadata = ['filename'] df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) df1.filename = 'foo' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self DataFrame.__finalize__ = finalize result = pd.concat([df1, df1]) assert result.filename == 'foo+foo' # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({'x': [1, 2, 3]}) df.y = 2 df['y'] = [2, 4, 6] df.y = 5 assert df.y == 5 assert_series_equal(df['y'], Series([2, 4, 6], name='y')) @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'CategoricalIndex', 'TimedeltaIndex']) def test_to_xarray_index_types(self, index): from xarray import Dataset index = getattr(tm, 'make{}'.format(index)) df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index = index(3) df.index.name = 'foo' df.columns.name = 'bar' result = df.to_xarray() assert result.dims['foo'] == 3 assert len(result.coords) == 1 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, Dataset) # idempotency # categoricals are not preserved # datetimes w/tz are not preserved # column names are lost expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import Dataset df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index.name = 'foo' result = df[0:0].to_xarray() assert result.dims['foo'] == 0 assert isinstance(result, Dataset) # available in 0.7.1 # MultiIndex df.index = pd.MultiIndex.from_product([['a'], range(3)], names=['one', 'two']) result = df.to_xarray() assert result.dims['one'] == 1 assert result.dims['two'] == 3 assert len(result.coords) == 2 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result, expected, check_index_type=False) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=['A']) empty_frame_copy = deepcopy(empty_frame) self._compare(empty_frame_copy, empty_frame)
cbertinato/pandas
pandas/tests/generic/test_frame.py
pandas/core/computation/pytables.py
import numpy as np import pytest import pandas as pd from pandas import Index, MultiIndex @pytest.fixture def idx(): # a MultiIndex used to test the general functionality of the # general functionality of this object major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_codes = np.array([0, 0, 1, 2, 3, 3]) minor_codes = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] mi = MultiIndex(levels=[major_axis, minor_axis], codes=[major_codes, minor_codes], names=index_names, verify_integrity=False) return mi @pytest.fixture def idx_dup(): # compare tests/indexes/multi/conftest.py major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_codes = np.array([0, 0, 1, 0, 1, 1]) minor_codes = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] mi = MultiIndex(levels=[major_axis, minor_axis], codes=[major_codes, minor_codes], names=index_names, verify_integrity=False) return mi @pytest.fixture def index_names(): # names that match those in the idx fixture for testing equality of # names assigned to the idx return ['first', 'second'] @pytest.fixture def holder(): # the MultiIndex constructor used to base compatibility with pickle return MultiIndex @pytest.fixture def compat_props(): # a MultiIndex must have these properties associated with it return ['shape', 'ndim', 'size'] @pytest.fixture def narrow_multi_index(): """ Return a MultiIndex that is narrower than the display (<80 characters). """ n = 1000 ci = pd.CategoricalIndex(list('a' * n) + (['abc'] * n)) dti = pd.date_range('2000-01-01', freq='s', periods=n * 2) return pd.MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=['a', 'b', 'dti']) @pytest.fixture def wide_multi_index(): """ Return a MultiIndex that is wider than the display (>80 characters). """ n = 1000 ci = pd.CategoricalIndex(list('a' * n) + (['abc'] * n)) dti = pd.date_range('2000-01-01', freq='s', periods=n * 2) levels = [ci, ci.codes + 9, dti, dti, dti] names = ['a', 'b', 'dti_1', 'dti_2', 'dti_3'] return pd.MultiIndex.from_arrays(levels, names=names)
from copy import deepcopy from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal) from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestDataFrame(Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x, y) def test_rename_mi(self): df = DataFrame([ 11, 21, 31 ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) df.rename(str.lower) def test_set_axis_name(self): df = pd.DataFrame([[1, 2], [3, 4]]) funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, 'foo')(df) assert df.index.name is None assert result.index.name == 'foo' result = methodcaller(func, 'cols', axis=1)(df) assert df.columns.name is None assert result.columns.name == 'cols' def test_set_axis_name_mi(self): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) ) level_names = ['L1', 'L2'] funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): # allow single item via bool method df = DataFrame([[True]]) assert df.bool() df = DataFrame([[False]]) assert not df.bool() df = DataFrame([[False, False]]) with pytest.raises(ValueError): df.bool() with pytest.raises(ValueError): bool(df) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = DataFrame({'A': [1, '2', 3.]}) result = o._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) self._compare(result, expected) def test_metadata_propagation_indiv(self): # groupby df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) result = df.groupby('A').sum() self.check_metadata(df, result) # resample df = DataFrame(np.random.randn(1000, 2), index=date_range('20130101', periods=1000, freq='s')) result = df.resample('1T') self.check_metadata(df, result) # merging with override # GH 6923 _metadata = DataFrame._metadata _finalize = DataFrame.__finalize__ np.random.seed(10) df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) DataFrame._metadata = ['filename'] df1.filename = 'fname1.csv' df2.filename = 'fname2.csv' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'merge': left, right = other.left, other.right value = getattr(left, name, '') + '|' + getattr(right, name, '') object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, '')) return self DataFrame.__finalize__ = finalize result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') assert result.filename == 'fname1.csv|fname2.csv' # concat # GH 6927 DataFrame._metadata = ['filename'] df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) df1.filename = 'foo' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self DataFrame.__finalize__ = finalize result = pd.concat([df1, df1]) assert result.filename == 'foo+foo' # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({'x': [1, 2, 3]}) df.y = 2 df['y'] = [2, 4, 6] df.y = 5 assert df.y == 5 assert_series_equal(df['y'], Series([2, 4, 6], name='y')) @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'CategoricalIndex', 'TimedeltaIndex']) def test_to_xarray_index_types(self, index): from xarray import Dataset index = getattr(tm, 'make{}'.format(index)) df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index = index(3) df.index.name = 'foo' df.columns.name = 'bar' result = df.to_xarray() assert result.dims['foo'] == 3 assert len(result.coords) == 1 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, Dataset) # idempotency # categoricals are not preserved # datetimes w/tz are not preserved # column names are lost expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import Dataset df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index.name = 'foo' result = df[0:0].to_xarray() assert result.dims['foo'] == 0 assert isinstance(result, Dataset) # available in 0.7.1 # MultiIndex df.index = pd.MultiIndex.from_product([['a'], range(3)], names=['one', 'two']) result = df.to_xarray() assert result.dims['one'] == 1 assert result.dims['two'] == 3 assert len(result.coords) == 2 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result, expected, check_index_type=False) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=['A']) empty_frame_copy = deepcopy(empty_frame) self._compare(empty_frame_copy, empty_frame)
cbertinato/pandas
pandas/tests/generic/test_frame.py
pandas/tests/indexes/multi/conftest.py
import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( CategoricalIndex, DatetimeIndex, Index, Int64Index, IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, isna) from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin import pandas.util.testing as tm class Base: """ base class for index sub-class tests """ _holder = None _compat_props = ['shape', 'ndim', 'size', 'nbytes'] def setup_indices(self): for name, idx in self.indices.items(): setattr(self, name, idx) def test_pickle_compat_construction(self): # need an object to create with msg = (r"Index\(\.\.\.\) must be called with a collection of some" r" kind, None was passed|" r"__new__\(\) missing 1 required positional argument: 'data'|" r"__new__\(\) takes at least 2 arguments \(1 given\)") with pytest.raises(TypeError, match=msg): self._holder() def test_to_series(self): # assert that we are creating a copy of the index idx = self.create_index() s = idx.to_series() assert s.values is not idx.values assert s.index is not idx assert s.name == idx.name def test_to_series_with_arguments(self): # GH18699 # index kwarg idx = self.create_index() s = idx.to_series(index=idx) assert s.values is not idx.values assert s.index is idx assert s.name == idx.name # name kwarg idx = self.create_index() s = idx.to_series(name='__test') assert s.values is not idx.values assert s.index is not idx assert s.name != idx.name @pytest.mark.parametrize("name", [None, "new_name"]) def test_to_frame(self, name): # see GH-15230, GH-22580 idx = self.create_index() if name: idx_name = name else: idx_name = idx.name or 0 df = idx.to_frame(name=idx_name) assert df.index is idx assert len(df.columns) == 1 assert df.columns[0] == idx_name assert df[idx_name].values is not idx.values df = idx.to_frame(index=False, name=idx_name) assert df.index is not idx def test_to_frame_datetime_tz(self): # GH 25809 idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D') idx = idx.tz_localize('UTC') result = idx.to_frame() expected = pd.DataFrame(idx, index=idx) tm.assert_frame_equal(result, expected) def test_shift(self): # GH8083 test the base class for shift idx = self.create_index() msg = "Not supported for type {}".format(type(idx).__name__) with pytest.raises(NotImplementedError, match=msg): idx.shift(1) with pytest.raises(NotImplementedError, match=msg): idx.shift(1, 2) def test_create_index_existing_name(self): # GH11193, when an existing index is passed, and a new name is not # specified, the new index should inherit the previous object name expected = self.create_index() if not isinstance(expected, MultiIndex): expected.name = 'foo' result = pd.Index(expected) tm.assert_index_equal(result, expected) result = pd.Index(expected, name='bar') expected.name = 'bar' tm.assert_index_equal(result, expected) else: expected.names = ['foo', 'bar'] result = pd.Index(expected) tm.assert_index_equal( result, Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object'), names=['foo', 'bar'])) result = pd.Index(expected, names=['A', 'B']) tm.assert_index_equal( result, Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object'), names=['A', 'B'])) def test_numeric_compat(self): idx = self.create_index() with pytest.raises(TypeError, match="cannot perform __mul__"): idx * 1 with pytest.raises(TypeError, match="cannot perform __rmul__"): 1 * idx div_err = "cannot perform __truediv__" with pytest.raises(TypeError, match=div_err): idx / 1 div_err = div_err.replace(' __', ' __r') with pytest.raises(TypeError, match=div_err): 1 / idx with pytest.raises(TypeError, match="cannot perform __floordiv__"): idx // 1 with pytest.raises(TypeError, match="cannot perform __rfloordiv__"): 1 // idx def test_logical_compat(self): idx = self.create_index() with pytest.raises(TypeError, match='cannot perform all'): idx.all() with pytest.raises(TypeError, match='cannot perform any'): idx.any() def test_boolean_context_compat(self): # boolean context compat idx = self.create_index() with pytest.raises(ValueError, match='The truth value of a'): if idx: pass def test_reindex_base(self): idx = self.create_index() expected = np.arange(idx.size, dtype=np.intp) actual = idx.get_indexer(idx) tm.assert_numpy_array_equal(expected, actual) with pytest.raises(ValueError, match='Invalid fill method'): idx.get_indexer(idx, method='invalid') def test_get_indexer_consistency(self): # See GH 16819 for name, index in self.indices.items(): if isinstance(index, IntervalIndex): continue if index.is_unique or isinstance(index, CategoricalIndex): indexer = index.get_indexer(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp else: e = "Reindexing only valid with uniquely valued Index objects" with pytest.raises(InvalidIndexError, match=e): index.get_indexer(index[0:2]) indexer, _ = index.get_indexer_non_unique(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp def test_ndarray_compat_properties(self): idx = self.create_index() assert idx.T.equals(idx) assert idx.transpose().equals(idx) values = idx.values for prop in self._compat_props: assert getattr(idx, prop) == getattr(values, prop) # test for validity idx.nbytes idx.values.nbytes def test_repr_roundtrip(self): idx = self.create_index() tm.assert_index_equal(eval(repr(idx)), idx) def test_str(self): # test the string repr idx = self.create_index() idx.name = 'foo' assert "'foo'" in str(idx) assert idx.__class__.__name__ in str(idx) def test_repr_max_seq_item_setting(self): # GH10182 idx = self.create_index() idx = idx.repeat(50) with pd.option_context("display.max_seq_items", None): repr(idx) assert '...' not in str(idx) def test_copy_name(self): # gh-12309: Check that the "name" argument # passed at initialization is honored. for name, index in self.indices.items(): if isinstance(index, MultiIndex): continue first = index.__class__(index, copy=True, name='mario') second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second # Not using tm.assert_index_equal() since names differ. assert index.equals(first) assert first.name == 'mario' assert second.name == 'mario' s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) if not isinstance(index, CategoricalIndex): # See gh-13365 s3 = s1 * s2 assert s3.index.name == 'mario' def test_ensure_copied_data(self): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 for name, index in self.indices.items(): init_kwargs = {} if isinstance(index, PeriodIndex): # Needs "freq" specification: init_kwargs['freq'] = index.freq elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): # RangeIndex cannot be initialized from data # MultiIndex and CategoricalIndex are tested separately continue index_type = index.__class__ result = index_type(index.values, copy=True, **init_kwargs) tm.assert_index_equal(index, result) tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='copy') if isinstance(index, PeriodIndex): # .values an object array of Period, thus copied result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='same') elif isinstance(index, IntervalIndex): # checked in test_interval.py pass else: result = index_type(index.values, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index.values, result.values, check_same='same') tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='same') def test_memory_usage(self): for name, index in self.indices.items(): result = index.memory_usage() if len(index): index.get_loc(index[0]) result2 = index.memory_usage() result3 = index.memory_usage(deep=True) # RangeIndex, IntervalIndex # don't have engines if not isinstance(index, (RangeIndex, IntervalIndex)): assert result2 > result if index.inferred_type == 'object': assert result3 > result2 else: # we report 0 for no-length assert result == 0 def test_argsort(self): for k, ind in self.indices.items(): # separately tested if k in ['catIndex']: continue result = ind.argsort() expected = np.array(ind).argsort() tm.assert_numpy_array_equal(result, expected, check_dtype=False) def test_numpy_argsort(self): for k, ind in self.indices.items(): result = np.argsort(ind) expected = ind.argsort() tm.assert_numpy_array_equal(result, expected) # these are the only two types that perform # pandas compatibility input validation - the # rest already perform separate (or no) such # validation via their 'values' attribute as # defined in pandas.core.indexes/base.py - they # cannot be changed at the moment due to # backwards compatibility concerns if isinstance(type(ind), (CategoricalIndex, RangeIndex)): msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, axis=1) msg = "the 'kind' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, kind='mergesort') msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, order=('a', 'b')) def test_take(self): indexer = [4, 3, 0, 2] for k, ind in self.indices.items(): # separate if k in ['boolIndex', 'tuples', 'empty']: continue result = ind.take(indexer) expected = ind[indexer] assert result.equals(expected) if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # GH 10791 with pytest.raises(AttributeError): ind.freq def test_take_invalid_kwargs(self): idx = self.create_index() indices = [1, 2] msg = r"take\(\) got an unexpected keyword argument 'foo'" with pytest.raises(TypeError, match=msg): idx.take(indices, foo=2) msg = "the 'out' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, out=indices) msg = "the 'mode' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, mode='clip') def test_repeat(self): rep = 2 i = self.create_index() expected = pd.Index(i.values.repeat(rep), name=i.name) tm.assert_index_equal(i.repeat(rep), expected) i = self.create_index() rep = np.arange(len(i)) expected = pd.Index(i.values.repeat(rep), name=i.name) tm.assert_index_equal(i.repeat(rep), expected) def test_numpy_repeat(self): rep = 2 i = self.create_index() expected = i.repeat(rep) tm.assert_index_equal(np.repeat(i, rep), expected) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.repeat(i, rep, axis=0) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): i = self.create_index() cond = [True] * len(i) result = i.where(klass(cond)) expected = i tm.assert_index_equal(result, expected) cond = [False] + [True] * len(i[1:]) expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype) result = i.where(klass(cond)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("case", [0.5, "xxx"]) @pytest.mark.parametrize("method", ["intersection", "union", "difference", "symmetric_difference"]) def test_set_ops_error_cases(self, case, method): for name, idx in self.indices.items(): # non-iterable input msg = "Input must be Index or array-like" with pytest.raises(TypeError, match=msg): getattr(idx, method)(case) def test_intersection_base(self): for name, idx in self.indices.items(): first = idx[:5] second = idx[:3] intersect = first.intersection(second) if isinstance(idx, CategoricalIndex): pass else: assert tm.equalContents(intersect, second) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.intersection(case) assert tm.equalContents(result, second) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.intersection([1, 2, 3]) def test_union_base(self): for name, idx in self.indices.items(): first = idx[3:] second = idx[:5] everything = idx union = first.union(second) assert tm.equalContents(union, everything) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.union(case) assert tm.equalContents(result, everything) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.union([1, 2, 3]) @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): for name, idx in self.indices.items(): first = idx[2:] second = idx[:4] answer = idx[4:] result = first.difference(second, sort) if isinstance(idx, CategoricalIndex): pass else: assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): assert result.__class__ == answer.__class__ tm.assert_numpy_array_equal(result.sort_values().asi8, answer.sort_values().asi8) else: result = first.difference(case, sort) assert tm.equalContents(result, answer) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.difference([1, 2, 3], sort) def test_symmetric_difference(self): for name, idx in self.indices.items(): first = idx[1:] second = idx[:-1] if isinstance(idx, CategoricalIndex): pass else: answer = idx[[0, -1]] result = first.symmetric_difference(second) assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.symmetric_difference(case) assert tm.equalContents(result, answer) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.symmetric_difference([1, 2, 3]) def test_insert_base(self): for name, idx in self.indices.items(): result = idx[1:4] if not len(idx): continue # test 0th element assert idx[0:4].equals(result.insert(0, idx[0])) def test_delete_base(self): for name, idx in self.indices.items(): if not len(idx): continue if isinstance(idx, RangeIndex): # tested in class continue expected = idx[1:] result = idx.delete(0) assert result.equals(expected) assert result.name == expected.name expected = idx[:-1] result = idx.delete(-1) assert result.equals(expected) assert result.name == expected.name with pytest.raises((IndexError, ValueError)): # either depending on numpy version idx.delete(len(idx)) def test_equals(self): for name, idx in self.indices.items(): assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) assert not idx.equals(list(idx)) assert not idx.equals(np.array(idx)) # Cannot pass in non-int64 dtype to RangeIndex if not isinstance(idx, RangeIndex): same_values = Index(idx, dtype=object) assert idx.equals(same_values) assert same_values.equals(idx) if idx.nlevels == 1: # do not test MultiIndex assert not idx.equals(pd.Series(idx)) def test_equals_op(self): # GH9947, GH10637 index_a = self.create_index() if isinstance(index_a, PeriodIndex): pytest.skip('Skip check for PeriodIndex') n = len(index_a) index_b = index_a[0:-1] index_c = index_a[0:-1].append(index_a[-2:-1]) index_d = index_a[0:1] msg = "Lengths must match|could not be broadcast" with pytest.raises(ValueError, match=msg): index_a == index_b expected1 = np.array([True] * n) expected2 = np.array([True] * (n - 1) + [False]) tm.assert_numpy_array_equal(index_a == index_a, expected1) tm.assert_numpy_array_equal(index_a == index_c, expected2) # test comparisons with numpy arrays array_a = np.array(index_a) array_b = np.array(index_a[0:-1]) array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) array_d = np.array(index_a[0:1]) with pytest.raises(ValueError, match=msg): index_a == array_b tm.assert_numpy_array_equal(index_a == array_a, expected1) tm.assert_numpy_array_equal(index_a == array_c, expected2) # test comparisons with Series series_a = Series(array_a) series_b = Series(array_b) series_c = Series(array_c) series_d = Series(array_d) with pytest.raises(ValueError, match=msg): index_a == series_b tm.assert_numpy_array_equal(index_a == series_a, expected1) tm.assert_numpy_array_equal(index_a == series_c, expected2) # cases where length is 1 for one of them with pytest.raises(ValueError, match="Lengths must match"): index_a == index_d with pytest.raises(ValueError, match="Lengths must match"): index_a == series_d with pytest.raises(ValueError, match="Lengths must match"): index_a == array_d msg = "Can only compare identically-labeled Series objects" with pytest.raises(ValueError, match=msg): series_a == series_d with pytest.raises(ValueError, match="Lengths must match"): series_a == array_d # comparing with a scalar should broadcast; note that we are excluding # MultiIndex because in this case each item in the index is a tuple of # length 2, and therefore is considered an array of length 2 in the # comparison instead of a scalar if not isinstance(index_a, MultiIndex): expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) # assuming the 2nd to last item is unique in the data item = index_a[-2] tm.assert_numpy_array_equal(index_a == item, expected3) tm.assert_series_equal(series_a == item, Series(expected3)) def test_hasnans_isnans(self): # GH 11343, added tests for hasnans / isnans for name, index in self.indices.items(): if isinstance(index, MultiIndex): pass else: idx = index.copy() # cases in indices doesn't include NaN expected = np.array([False] * len(idx), dtype=bool) tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is False idx = index.copy() values = np.asarray(idx.values) if len(index) == 0: continue elif isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan if isinstance(index, PeriodIndex): idx = index.__class__(values, freq=index.freq) else: idx = index.__class__(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is True def test_fillna(self): # GH 11343 for name, index in self.indices.items(): if len(index) == 0: pass elif isinstance(index, MultiIndex): idx = index.copy() msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.fillna(idx[0]) else: idx = index.copy() result = idx.fillna(idx[0]) tm.assert_index_equal(result, idx) assert result is not idx msg = "'value' must be a scalar, passed: " with pytest.raises(TypeError, match=msg): idx.fillna([idx[0]]) idx = index.copy() values = np.asarray(idx.values) if isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan if isinstance(index, PeriodIndex): idx = index.__class__(values, freq=index.freq) else: idx = index.__class__(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is True def test_nulls(self): # this is really a smoke test for the methods # as these are adequately tested for function elsewhere for name, index in self.indices.items(): if len(index) == 0: tm.assert_numpy_array_equal( index.isna(), np.array([], dtype=bool)) elif isinstance(index, MultiIndex): idx = index.copy() msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.isna() else: if not index.hasnans: tm.assert_numpy_array_equal( index.isna(), np.zeros(len(index), dtype=bool)) tm.assert_numpy_array_equal( index.notna(), np.ones(len(index), dtype=bool)) else: result = isna(index) tm.assert_numpy_array_equal(index.isna(), result) tm.assert_numpy_array_equal(index.notna(), ~result) def test_empty(self): # GH 15270 index = self.create_index() assert not index.empty assert index[:0].empty def test_join_self_unique(self, join_type): index = self.create_index() if index.is_unique: joined = index.join(index, how=join_type) assert (index == joined).all() def test_map(self): # callable index = self.create_index() # we don't infer UInt64 if isinstance(index, pd.UInt64Index): expected = index.astype('int64') else: expected = index result = index.map(lambda x: x) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "mapper", [ lambda values, index: {i: e for e, i in zip(values, index)}, lambda values, index: pd.Series(values, index)]) def test_map_dictlike(self, mapper): index = self.create_index() if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)): pytest.skip("skipping tests for {}".format(type(index))) identity = mapper(index.values, index) # we don't infer to UInt64 for a dict if isinstance(index, pd.UInt64Index) and isinstance(identity, dict): expected = index.astype('int64') else: expected = index result = index.map(identity) tm.assert_index_equal(result, expected) # empty mappable expected = pd.Index([np.nan] * len(index)) result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) def test_putmask_with_wrong_mask(self): # GH18368 index = self.create_index() with pytest.raises(ValueError): index.putmask(np.ones(len(index) + 1, np.bool), 1) with pytest.raises(ValueError): index.putmask(np.ones(len(index) - 1, np.bool), 1) with pytest.raises(ValueError): index.putmask('foo', 1) @pytest.mark.parametrize('copy', [True, False]) @pytest.mark.parametrize('name', [None, 'foo']) @pytest.mark.parametrize('ordered', [True, False]) def test_astype_category(self, copy, name, ordered): # GH 18630 index = self.create_index() if name: index = index.rename(name) # standard categories dtype = CategoricalDtype(ordered=ordered) result = index.astype(dtype, copy=copy) expected = CategoricalIndex(index.values, name=name, ordered=ordered) tm.assert_index_equal(result, expected) # non-standard categories dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered) result = index.astype(dtype, copy=copy) expected = CategoricalIndex(index.values, name=name, dtype=dtype) tm.assert_index_equal(result, expected) if ordered is False: # dtype='category' defaults to ordered=False, so only test once result = index.astype('category', copy=copy) expected = CategoricalIndex(index.values, name=name) tm.assert_index_equal(result, expected) def test_is_unique(self): # initialize a unique index index = self.create_index().drop_duplicates() assert index.is_unique is True # empty index should be unique index_empty = index[:0] assert index_empty.is_unique is True # test basic dupes index_dup = index.insert(0, index[0]) assert index_dup.is_unique is False # single NA should be unique index_na = index.insert(0, np.nan) assert index_na.is_unique is True # multiple NA should not be unique index_na_dup = index_na.insert(0, np.nan) assert index_na_dup.is_unique is False
from copy import deepcopy from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal) from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestDataFrame(Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x, y) def test_rename_mi(self): df = DataFrame([ 11, 21, 31 ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) df.rename(str.lower) def test_set_axis_name(self): df = pd.DataFrame([[1, 2], [3, 4]]) funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, 'foo')(df) assert df.index.name is None assert result.index.name == 'foo' result = methodcaller(func, 'cols', axis=1)(df) assert df.columns.name is None assert result.columns.name == 'cols' def test_set_axis_name_mi(self): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) ) level_names = ['L1', 'L2'] funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): # allow single item via bool method df = DataFrame([[True]]) assert df.bool() df = DataFrame([[False]]) assert not df.bool() df = DataFrame([[False, False]]) with pytest.raises(ValueError): df.bool() with pytest.raises(ValueError): bool(df) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = DataFrame({'A': [1, '2', 3.]}) result = o._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) self._compare(result, expected) def test_metadata_propagation_indiv(self): # groupby df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) result = df.groupby('A').sum() self.check_metadata(df, result) # resample df = DataFrame(np.random.randn(1000, 2), index=date_range('20130101', periods=1000, freq='s')) result = df.resample('1T') self.check_metadata(df, result) # merging with override # GH 6923 _metadata = DataFrame._metadata _finalize = DataFrame.__finalize__ np.random.seed(10) df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) DataFrame._metadata = ['filename'] df1.filename = 'fname1.csv' df2.filename = 'fname2.csv' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'merge': left, right = other.left, other.right value = getattr(left, name, '') + '|' + getattr(right, name, '') object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, '')) return self DataFrame.__finalize__ = finalize result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') assert result.filename == 'fname1.csv|fname2.csv' # concat # GH 6927 DataFrame._metadata = ['filename'] df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) df1.filename = 'foo' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self DataFrame.__finalize__ = finalize result = pd.concat([df1, df1]) assert result.filename == 'foo+foo' # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({'x': [1, 2, 3]}) df.y = 2 df['y'] = [2, 4, 6] df.y = 5 assert df.y == 5 assert_series_equal(df['y'], Series([2, 4, 6], name='y')) @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'CategoricalIndex', 'TimedeltaIndex']) def test_to_xarray_index_types(self, index): from xarray import Dataset index = getattr(tm, 'make{}'.format(index)) df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index = index(3) df.index.name = 'foo' df.columns.name = 'bar' result = df.to_xarray() assert result.dims['foo'] == 3 assert len(result.coords) == 1 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, Dataset) # idempotency # categoricals are not preserved # datetimes w/tz are not preserved # column names are lost expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import Dataset df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index.name = 'foo' result = df[0:0].to_xarray() assert result.dims['foo'] == 0 assert isinstance(result, Dataset) # available in 0.7.1 # MultiIndex df.index = pd.MultiIndex.from_product([['a'], range(3)], names=['one', 'two']) result = df.to_xarray() assert result.dims['one'] == 1 assert result.dims['two'] == 3 assert len(result.coords) == 2 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result, expected, check_index_type=False) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=['A']) empty_frame_copy = deepcopy(empty_frame) self._compare(empty_frame_copy, empty_frame)
cbertinato/pandas
pandas/tests/generic/test_frame.py
pandas/tests/indexes/common.py
import numpy as np from pandas._libs import algos as libalgos, index as libindex import pandas.util.testing as tm class TestNumericEngine: def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype num = 1000 arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) # monotonic increasing engine = engine_type(lambda: arr, len(arr)) assert engine.is_monotonic_increasing is True assert engine.is_monotonic_decreasing is False # monotonic decreasing engine = engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is True # neither monotonic increasing or decreasing arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype) engine = engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is False def test_is_unique(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype # unique arr = np.array([1, 3, 2], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.is_unique is True # not unique arr = np.array([1, 2, 1], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.is_unique is False def test_get_loc(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype # unique arr = np.array([1, 2, 3], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.get_loc(2) == 1 # monotonic num = 1000 arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.get_loc(2) == slice(1000, 2000) # not monotonic arr = np.array([1, 2, 3] * num, dtype=dtype) engine = engine_type(lambda: arr, len(arr)) expected = np.array([False, True, False] * num, dtype=bool) result = engine.get_loc(2) assert (result == expected).all() def test_get_backfill_indexer( self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype arr = np.array([1, 5, 10], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) new = np.arange(12, dtype=dtype) result = engine.get_backfill_indexer(new) expected = libalgos.backfill(arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer( self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype arr = np.array([1, 5, 10], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) new = np.arange(12, dtype=dtype) result = engine.get_pad_indexer(new) expected = libalgos.pad(arr, new) tm.assert_numpy_array_equal(result, expected) class TestObjectEngine: engine_type = libindex.ObjectEngine dtype = np.object_ values = list('abc') def test_is_monotonic(self): num = 1000 arr = np.array(['a'] * num + ['a'] * num + ['c'] * num, dtype=self.dtype) # monotonic increasing engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_monotonic_increasing is True assert engine.is_monotonic_decreasing is False # monotonic decreasing engine = self.engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is True # neither monotonic increasing or decreasing arr = np.array(['a'] * num + ['b'] * num + ['a'] * num, dtype=self.dtype) engine = self.engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is False def test_is_unique(self): # unique arr = np.array(self.values, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_unique is True # not unique arr = np.array(['a', 'b', 'a'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_unique is False def test_get_loc(self): # unique arr = np.array(self.values, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.get_loc('b') == 1 # monotonic num = 1000 arr = np.array(['a'] * num + ['b'] * num + ['c'] * num, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.get_loc('b') == slice(1000, 2000) # not monotonic arr = np.array(self.values * num, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) expected = np.array([False, True, False] * num, dtype=bool) result = engine.get_loc('b') assert (result == expected).all() def test_get_backfill_indexer(self): arr = np.array(['a', 'e', 'j'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_backfill_indexer(new) expected = libalgos.backfill["object"](arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer(self): arr = np.array(['a', 'e', 'j'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_pad_indexer(new) expected = libalgos.pad["object"](arr, new) tm.assert_numpy_array_equal(result, expected)
from copy import deepcopy from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal) from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestDataFrame(Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x, y) def test_rename_mi(self): df = DataFrame([ 11, 21, 31 ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) df.rename(str.lower) def test_set_axis_name(self): df = pd.DataFrame([[1, 2], [3, 4]]) funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, 'foo')(df) assert df.index.name is None assert result.index.name == 'foo' result = methodcaller(func, 'cols', axis=1)(df) assert df.columns.name is None assert result.columns.name == 'cols' def test_set_axis_name_mi(self): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) ) level_names = ['L1', 'L2'] funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): # allow single item via bool method df = DataFrame([[True]]) assert df.bool() df = DataFrame([[False]]) assert not df.bool() df = DataFrame([[False, False]]) with pytest.raises(ValueError): df.bool() with pytest.raises(ValueError): bool(df) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = DataFrame({'A': [1, '2', 3.]}) result = o._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) self._compare(result, expected) def test_metadata_propagation_indiv(self): # groupby df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) result = df.groupby('A').sum() self.check_metadata(df, result) # resample df = DataFrame(np.random.randn(1000, 2), index=date_range('20130101', periods=1000, freq='s')) result = df.resample('1T') self.check_metadata(df, result) # merging with override # GH 6923 _metadata = DataFrame._metadata _finalize = DataFrame.__finalize__ np.random.seed(10) df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) DataFrame._metadata = ['filename'] df1.filename = 'fname1.csv' df2.filename = 'fname2.csv' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'merge': left, right = other.left, other.right value = getattr(left, name, '') + '|' + getattr(right, name, '') object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, '')) return self DataFrame.__finalize__ = finalize result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') assert result.filename == 'fname1.csv|fname2.csv' # concat # GH 6927 DataFrame._metadata = ['filename'] df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) df1.filename = 'foo' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self DataFrame.__finalize__ = finalize result = pd.concat([df1, df1]) assert result.filename == 'foo+foo' # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({'x': [1, 2, 3]}) df.y = 2 df['y'] = [2, 4, 6] df.y = 5 assert df.y == 5 assert_series_equal(df['y'], Series([2, 4, 6], name='y')) @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'CategoricalIndex', 'TimedeltaIndex']) def test_to_xarray_index_types(self, index): from xarray import Dataset index = getattr(tm, 'make{}'.format(index)) df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index = index(3) df.index.name = 'foo' df.columns.name = 'bar' result = df.to_xarray() assert result.dims['foo'] == 3 assert len(result.coords) == 1 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, Dataset) # idempotency # categoricals are not preserved # datetimes w/tz are not preserved # column names are lost expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import Dataset df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index.name = 'foo' result = df[0:0].to_xarray() assert result.dims['foo'] == 0 assert isinstance(result, Dataset) # available in 0.7.1 # MultiIndex df.index = pd.MultiIndex.from_product([['a'], range(3)], names=['one', 'two']) result = df.to_xarray() assert result.dims['one'] == 1 assert result.dims['two'] == 3 assert len(result.coords) == 2 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result, expected, check_index_type=False) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=['A']) empty_frame_copy = deepcopy(empty_frame) self._compare(empty_frame_copy, empty_frame)
cbertinato/pandas
pandas/tests/generic/test_frame.py
pandas/tests/indexing/test_indexing_engines.py
""" Arithmetic operations for PandasObjects This is not a public API. """ import datetime import operator import textwrap from typing import Dict, Optional import warnings import numpy as np from pandas._libs import algos as libalgos, lib, ops as libops from pandas.errors import NullFrequencyError from pandas.util._decorators import Appender from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, find_common_type, maybe_upcast_putmask) from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike_v_numeric, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCIndexClass, ABCSeries, ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import isna, notna import pandas as pd import pandas.core.common as com import pandas.core.missing as missing # ----------------------------------------------------------------------------- # Ops Wrapping Utilities def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """ # `left` is always a pd.Series when called from within ops if isinstance(right, (ABCSeries, pd.Index)): name = _maybe_match_name(left, right) else: name = left.name return name def _maybe_match_name(a, b): """ Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match of None if they have different names. Parameters ---------- a : object b : object Returns ------- name : str or None See Also -------- pandas.core.common.consensus_name_attr """ a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') if a_has and b_has: if a.name == b.name: return a.name else: # TODO: what if they both have np.nan for their names? return None elif a_has: return a.name elif b_has: return b.name return None def maybe_upcast_for_op(obj): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """ if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError return pd.Timedelta(obj) elif isinstance(obj, np.timedelta64) and not isna(obj): # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') # The isna check is to avoid casting timedelta64("NaT"), which would # return NaT and incorrectly be treated as a datetime-NaT. return pd.Timedelta(obj) elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 return pd.TimedeltaIndex(obj) return obj # ----------------------------------------------------------------------------- # Reversed Operations not available in the stdlib operator module. # Defining these instead of using lambdas allows us to reference them by name. def radd(left, right): return right + left def rsub(left, right): return right - left def rmul(left, right): return right * left def rdiv(left, right): return right / left def rtruediv(left, right): return right / left def rfloordiv(left, right): return right // left def rmod(left, right): # check if right is a string as % is the string # formatting operation; this is a TypeError # otherwise perform the op if isinstance(right, str): raise TypeError("{typ} cannot perform the operation mod".format( typ=type(left).__name__)) return right % left def rdivmod(left, right): return divmod(right, left) def rpow(left, right): return right ** left def rand_(left, right): return operator.and_(right, left) def ror_(left, right): return operator.or_(right, left) def rxor(left, right): return operator.xor(right, left) # ----------------------------------------------------------------------------- def make_invalid_op(name): """ Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function """ def invalid_op(self, other=None): raise TypeError("cannot perform {name} with this index type: " "{typ}".format(name=name, typ=type(self).__name__)) invalid_op.__name__ = name return invalid_op def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. Parameters ---------- name : str Returns ------- eval_kwargs : dict Examples -------- >>> _gen_eval_kwargs("__add__") {} >>> _gen_eval_kwargs("rtruediv") {'reversed': True, 'truediv': True} """ kwargs = {} # Series appear to only pass __add__, __radd__, ... # but DataFrame gets both these dunder names _and_ non-dunder names # add, radd, ... name = name.replace('__', '') if name.startswith('r'): if name not in ['radd', 'rand', 'ror', 'rxor']: # Exclude commutative operations kwargs['reversed'] = True if name in ['truediv', 'rtruediv']: kwargs['truediv'] = True if name in ['ne']: kwargs['masker'] = True return kwargs def _gen_fill_zeros(name): """ Find the appropriate fill value to use when filling in undefined values in the results of the given operation caused by operating on (generally dividing by) zero. Parameters ---------- name : str Returns ------- fill_value : {None, np.nan, np.inf} """ name = name.strip('__') if 'div' in name: # truediv, floordiv, div, and reversed variants fill_value = np.inf elif 'mod' in name: # mod, rmod fill_value = np.nan else: fill_value = None return fill_value def _get_frame_op_default_axis(name): """ Only DataFrame cares about default_axis, specifically: special methods have default_axis=None and flex methods have default_axis='columns'. Parameters ---------- name : str Returns ------- default_axis: str or None """ if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']: # bool methods return 'columns' elif name.startswith('__'): # __add__, __mul__, ... return None else: # add, mul, ... return 'columns' def _get_opstr(op, cls): """ Find the operation string, if any, to pass to numexpr for this operation. Parameters ---------- op : binary operator cls : class Returns ------- op_str : string or None """ # numexpr is available for non-sparse classes subtyp = getattr(cls, '_subtyp', '') use_numexpr = 'sparse' not in subtyp if not use_numexpr: # if we're not using numexpr, then don't pass a str_rep return None return {operator.add: '+', radd: '+', operator.mul: '*', rmul: '*', operator.sub: '-', rsub: '-', operator.truediv: '/', rtruediv: '/', operator.floordiv: '//', rfloordiv: '//', operator.mod: None, # TODO: Why None for mod but '%' for rmod? rmod: '%', operator.pow: '**', rpow: '**', operator.eq: '==', operator.ne: '!=', operator.le: '<=', operator.lt: '<', operator.ge: '>=', operator.gt: '>', operator.and_: '&', rand_: '&', operator.or_: '|', ror_: '|', operator.xor: '^', rxor: '^', divmod: None, rdivmod: None}[op] def _get_op_name(op, special): """ Find the name to attach to this method according to conventions for special and non-special methods. Parameters ---------- op : binary operator special : bool Returns ------- op_name : str """ opname = op.__name__.strip('_') if special: opname = '__{opname}__'.format(opname=opname) return opname # ----------------------------------------------------------------------------- # Docstring Generation and Templates _add_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.add(b, fill_value=0) a 2.0 b 1.0 c 1.0 d 1.0 e NaN dtype: float64 """ _sub_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.subtract(b, fill_value=0) a 0.0 b 1.0 c 1.0 d -1.0 e NaN dtype: float64 """ _mul_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.multiply(b, fill_value=0) a 1.0 b 0.0 c 0.0 d 0.0 e NaN dtype: float64 """ _div_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.divide(b, fill_value=0) a 1.0 b inf c inf d 0.0 e NaN dtype: float64 """ _floordiv_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.floordiv(b, fill_value=0) a 1.0 b NaN c NaN d 0.0 e NaN dtype: float64 """ _mod_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.mod(b, fill_value=0) a 0.0 b NaN c NaN d 0.0 e NaN dtype: float64 """ _pow_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.pow(b, fill_value=0) a 1.0 b 1.0 c 1.0 d 0.0 e NaN dtype: float64 """ _op_descriptions = { # Arithmetic Operators 'add': {'op': '+', 'desc': 'Addition', 'reverse': 'radd', 'series_examples': _add_example_SERIES}, 'sub': {'op': '-', 'desc': 'Subtraction', 'reverse': 'rsub', 'series_examples': _sub_example_SERIES}, 'mul': {'op': '*', 'desc': 'Multiplication', 'reverse': 'rmul', 'series_examples': _mul_example_SERIES, 'df_examples': None}, 'mod': {'op': '%', 'desc': 'Modulo', 'reverse': 'rmod', 'series_examples': _mod_example_SERIES}, 'pow': {'op': '**', 'desc': 'Exponential power', 'reverse': 'rpow', 'series_examples': _pow_example_SERIES, 'df_examples': None}, 'truediv': {'op': '/', 'desc': 'Floating division', 'reverse': 'rtruediv', 'series_examples': _div_example_SERIES, 'df_examples': None}, 'floordiv': {'op': '//', 'desc': 'Integer division', 'reverse': 'rfloordiv', 'series_examples': _floordiv_example_SERIES, 'df_examples': None}, 'divmod': {'op': 'divmod', 'desc': 'Integer division and modulo', 'reverse': 'rdivmod', 'series_examples': None, 'df_examples': None}, # Comparison Operators 'eq': {'op': '==', 'desc': 'Equal to', 'reverse': None, 'series_examples': None}, 'ne': {'op': '!=', 'desc': 'Not equal to', 'reverse': None, 'series_examples': None}, 'lt': {'op': '<', 'desc': 'Less than', 'reverse': None, 'series_examples': None}, 'le': {'op': '<=', 'desc': 'Less than or equal to', 'reverse': None, 'series_examples': None}, 'gt': {'op': '>', 'desc': 'Greater than', 'reverse': None, 'series_examples': None}, 'ge': {'op': '>=', 'desc': 'Greater than or equal to', 'reverse': None, 'series_examples': None} } # type: Dict[str, Dict[str, Optional[str]]] _op_names = list(_op_descriptions.keys()) for key in _op_names: reverse_op = _op_descriptions[key]['reverse'] if reverse_op is not None: _op_descriptions[reverse_op] = _op_descriptions[key].copy() _op_descriptions[reverse_op]['reverse'] = key _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. Parameters ---------- other : Series or scalar value fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result will be missing. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series The result of the operation. See Also -------- Series.{reverse} """ _arith_doc_FRAME = """ Binary operator %s with support to substitute a fill_value for missing data in one of the inputs Parameters ---------- other : Series, DataFrame, or constant axis : {0, 1, 'index', 'columns'} For Series input, axis to match Series index on fill_value : None or float value, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- result : DataFrame Notes ----- Mismatched indices will be unioned together """ _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. Parameters ---------- other : scalar, sequence, Series, or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}} Whether to compare by the index (0 or 'index') or columns (1 or 'columns'). For Series input, axis to match Series index on. level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : float or None, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing. Returns ------- DataFrame Result of the arithmetic operation. See Also -------- DataFrame.add : Add DataFrames. DataFrame.sub : Subtract DataFrames. DataFrame.mul : Multiply DataFrames. DataFrame.div : Divide DataFrames (float division). DataFrame.truediv : Divide DataFrames (float division). DataFrame.floordiv : Divide DataFrames (integer division). DataFrame.mod : Calculate modulo (remainder after division). DataFrame.pow : Calculate exponential power. Notes ----- Mismatched indices will be unioned together. Examples -------- >>> df = pd.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide by constant with reverse version. >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract a list and Series by axis with operator version. >>> df - [1, 2] angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub([1, 2], axis='columns') angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), ... axis='index') angles degrees circle -1 359 triangle 2 179 rectangle 3 359 Multiply a DataFrame of different shape with operator version. >>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, ... index=['circle', 'triangle', 'rectangle']) >>> other angles circle 0 triangle 3 rectangle 4 >>> df * other angles degrees circle 0 NaN triangle 9 NaN rectangle 16 NaN >>> df.mul(other, fill_value=0) angles degrees circle 0 0.0 triangle 9 0.0 rectangle 16 0.0 Divide by a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], ... 'degrees': [360, 180, 360, 360, 540, 720]}}, ... index=[['A', 'A', 'A', 'B', 'B', 'B'], ... ['circle', 'triangle', 'rectangle', ... 'square', 'pentagon', 'hexagon']]) >>> df_multindex angles degrees A circle 0 360 triangle 3 180 rectangle 4 360 B square 4 360 pentagon 5 540 hexagon 6 720 >>> df.div(df_multindex, level=1, fill_value=0) angles degrees A circle NaN 1.0 triangle 1.0 1.0 rectangle 1.0 1.0 B square 0.0 0.0 pentagon 0.0 0.0 hexagon 0.0 0.0 """ _flex_comp_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison operators. Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis (rows or columns) and level for comparison. Parameters ---------- other : scalar, sequence, Series, or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' Whether to compare by the index (0 or 'index') or columns (1 or 'columns'). level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- DataFrame of bool Result of the comparison. See Also -------- DataFrame.eq : Compare DataFrames for equality elementwise. DataFrame.ne : Compare DataFrames for inequality elementwise. DataFrame.le : Compare DataFrames for less than inequality or equality elementwise. DataFrame.lt : Compare DataFrames for strictly less than inequality elementwise. DataFrame.ge : Compare DataFrames for greater than inequality or equality elementwise. DataFrame.gt : Compare DataFrames for strictly greater than inequality elementwise. Notes ----- Mismatched indices will be unioned together. `NaN` values are considered different (i.e. `NaN` != `NaN`). Examples -------- >>> df = pd.DataFrame({{'cost': [250, 150, 100], ... 'revenue': [100, 250, 300]}}, ... index=['A', 'B', 'C']) >>> df cost revenue A 250 100 B 150 250 C 100 300 Comparison with a scalar, using either the operator or method: >>> df == 100 cost revenue A False True B False False C True False >>> df.eq(100) cost revenue A False True B False False C True False When `other` is a :class:`Series`, the columns of a DataFrame are aligned with the index of `other` and broadcast: >>> df != pd.Series([100, 250], index=["cost", "revenue"]) cost revenue A True True B True False C False True Use the method to control the broadcast axis: >>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') cost revenue A True False B True True C True True D True True When comparing to an arbitrary sequence, the number of columns must match the number elements in `other`: >>> df == [250, 100] cost revenue A True True B False False C False False Use the method to control the axis: >>> df.eq([250, 250, 100], axis='index') cost revenue A True False B False True C True False Compare to a DataFrame of different shape. >>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, ... index=['A', 'B', 'C', 'D']) >>> other revenue A 300 B 250 C 100 D 150 >>> df.gt(other) cost revenue A False False B False False C False True D False False Compare to a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], ... 'revenue': [100, 250, 300, 200, 175, 225]}}, ... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], ... ['A', 'B', 'C', 'A', 'B', 'C']]) >>> df_multindex cost revenue Q1 A 250 100 B 150 250 C 100 300 Q2 A 150 200 B 300 175 C 220 225 >>> df.le(df_multindex, level=1) cost revenue Q1 A True True B True True C True True Q2 A False True B True False C True False """ def _make_flex_doc(op_name, typ): """ Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str """ op_name = op_name.replace('__', '') op_desc = _op_descriptions[op_name] if op_name.startswith('r'): equiv = 'other ' + op_desc['op'] + ' ' + typ else: equiv = typ + ' ' + op_desc['op'] + ' other' if typ == 'series': base_doc = _flex_doc_SERIES doc_no_examples = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) if op_desc['series_examples']: doc = doc_no_examples + op_desc['series_examples'] else: doc = doc_no_examples elif typ == 'dataframe': base_doc = _flex_doc_FRAME doc = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) else: raise AssertionError('Invalid typ argument.') return doc # ----------------------------------------------------------------------------- # Masking NA values and fallbacks for operations numpy does not support def fill_binop(left, right, fill_value): """ If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None """ # TODO: can we make a no-copy implementation? if fill_value is not None: left_mask = isna(left) right_mask = isna(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return left, right def mask_cmp_op(x, y, op): """ Apply the function `op` to only non-null points in x and y. Parameters ---------- x : array-like y : array-like op : binary operation Returns ------- result : ndarray[bool] """ xrav = x.ravel() result = np.empty(x.size, dtype=bool) if isinstance(y, (np.ndarray, ABCSeries)): yrav = y.ravel() mask = notna(xrav) & notna(yrav) result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask]))) else: mask = notna(xrav) result[mask] = op(np.array(list(xrav[mask])), y) if op == operator.ne: # pragma: no cover np.putmask(result, ~mask, True) else: np.putmask(result, ~mask, False) result = result.reshape(x.shape) return result def masked_arith_op(x, y, op): """ If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator """ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes # the logic valid for both Series and DataFrame ops. xrav = x.ravel() assert isinstance(x, (np.ndarray, ABCSeries)), type(x) if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) # PeriodIndex.ravel() returns int64 dtype, so we have # to work around that case. See GH#19956 yrav = y if is_period_dtype(y) else y.ravel() mask = notna(xrav) & notna(yrav) if yrav.shape != mask.shape: # FIXME: GH#5284, GH#5035, GH#19448 # Without specifically raising here we get mismatched # errors in Py3 (TypeError) vs Py2 (ValueError) # Note: Only = an issue in DataFrame case raise ValueError('Cannot broadcast operands together.') if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], com.values_from_object(yrav[mask])) else: assert is_scalar(y), type(y) assert isinstance(x, np.ndarray), type(x) # mask is only meaningful for x result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) # 1 ** np.nan is 1. So we have to unmask those. if op == pow: mask = np.where(x == 1, False, mask) elif op == rpow: mask = np.where(y == 1, False, mask) if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], y) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) # 2D compat return result def invalid_comparison(left, right, op): """ If a comparison has mismatched types and is not necessarily meaningful, follow python3 conventions by: - returning all-False for equality - returning all-True for inequality - raising TypeError otherwise Parameters ---------- left : array-like right : scalar, array-like op : operator.{eq, ne, lt, le, gt} Raises ------ TypeError : on inequality comparisons """ if op is operator.eq: res_values = np.zeros(left.shape, dtype=bool) elif op is operator.ne: res_values = np.ones(left.shape, dtype=bool) else: raise TypeError("Invalid comparison between dtype={dtype} and {typ}" .format(dtype=left.dtype, typ=type(right).__name__)) return res_values # ----------------------------------------------------------------------------- # Dispatch logic def should_series_dispatch(left, right, op): """ Identify cases where a DataFrame operation should dispatch to its Series counterpart. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- override : bool """ if left._is_mixed_type or right._is_mixed_type: return True if not len(left.columns) or not len(right.columns): # ensure obj.dtypes[0] exists for each obj return False ldtype = left.dtypes.iloc[0] rdtype = right.dtypes.iloc[0] if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))): # numpy integer dtypes as timedelta64 dtypes in this scenario return True if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): # in particular case where right is an array of DateOffsets return True return False def dispatch_to_series(left, right, func, str_rep=None, axis=None): """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. Parameters ---------- left : DataFrame right : scalar or DataFrame func : arithmetic or comparison operator str_rep : str or None, default None axis : {None, 0, 1, "index", "columns"} Returns ------- DataFrame """ # Note: we use iloc to access columns for compat with cases # with non-unique columns. import pandas.core.computation.expressions as expressions right = lib.item_from_zerodim(right) if lib.is_scalar(right) or np.ndim(right) == 0: def column_op(a, b): return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))} elif isinstance(right, ABCDataFrame): assert right._indexed_same(left) def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries) and axis == "columns": # We only get here if called via left._combine_match_columns, # in which case we specifically want to operate row-by-row assert right.index.equals(left.columns) def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries): assert right.index.equals(left.index) # Handle other cases later def column_op(a, b): return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))} else: # Remaining cases have less-obvious dispatch rules raise NotImplementedError(right) new_data = expressions.evaluate(column_op, str_rep, left, right) result = left._constructor(new_data, index=left.index, copy=False) # Pin columns instead of passing to constructor for compat with # non-unique columns case result.columns = left.columns return result def dispatch_to_index_op(op, left, right, index_class): """ Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series """ left_idx = index_class(left) # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. """ # The op calls will raise TypeError if the op is not defined # on the ExtensionArray # unbox Series and Index to arrays if isinstance(left, (ABCSeries, ABCIndexClass)): new_left = left._values else: new_left = left if isinstance(right, (ABCSeries, ABCIndexClass)): new_right = right._values else: new_right = right res_values = op(new_left, new_right) res_name = get_op_result_name(left, right) if op.__name__ in ['divmod', 'rdivmod']: return _construct_divmod_result( left, res_values, left.index, res_name) return _construct_result(left, res_values, left.index, res_name) # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory # methods def _get_method_wrappers(cls): """ Find the appropriate operation-wrappers to use when defining flex/special arithmetic, boolean, and comparison operations with the given class. Parameters ---------- cls : class Returns ------- arith_flex : function or None comp_flex : function or None arith_special : function comp_special : function bool_special : function Notes ----- None is only returned for SparseArray """ if issubclass(cls, ABCSparseSeries): # Be sure to catch this before ABCSeries and ABCSparseArray, # as they will both come see SparseSeries as a subclass arith_flex = _flex_method_SERIES comp_flex = _flex_method_SERIES arith_special = _arith_method_SPARSE_SERIES comp_special = _arith_method_SPARSE_SERIES bool_special = _bool_method_SERIES # TODO: I don't think the functions defined by bool_method are tested elif issubclass(cls, ABCSeries): # Just Series; SparseSeries is caught above arith_flex = _flex_method_SERIES comp_flex = _flex_method_SERIES arith_special = _arith_method_SERIES comp_special = _comp_method_SERIES bool_special = _bool_method_SERIES elif issubclass(cls, ABCSparseArray): arith_flex = None comp_flex = None arith_special = _arith_method_SPARSE_ARRAY comp_special = _arith_method_SPARSE_ARRAY bool_special = _arith_method_SPARSE_ARRAY elif issubclass(cls, ABCDataFrame): # Same for DataFrame and SparseDataFrame arith_flex = _arith_method_FRAME comp_flex = _flex_comp_method_FRAME arith_special = _arith_method_FRAME comp_special = _comp_method_FRAME bool_special = _arith_method_FRAME return arith_flex, comp_flex, arith_special, comp_special, bool_special def _create_methods(cls, arith_method, comp_method, bool_method, special): # creates actual methods based upon arithmetic, comp and bool method # constructors. have_divmod = issubclass(cls, ABCSeries) # divmod is available for Series and SparseSeries # yapf: disable new_methods = dict( add=arith_method(cls, operator.add, special), radd=arith_method(cls, radd, special), sub=arith_method(cls, operator.sub, special), mul=arith_method(cls, operator.mul, special), truediv=arith_method(cls, operator.truediv, special), floordiv=arith_method(cls, operator.floordiv, special), # Causes a floating point exception in the tests when numexpr enabled, # so for now no speedup mod=arith_method(cls, operator.mod, special), pow=arith_method(cls, operator.pow, special), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(cls, rmul, special), rsub=arith_method(cls, rsub, special), rtruediv=arith_method(cls, rtruediv, special), rfloordiv=arith_method(cls, rfloordiv, special), rpow=arith_method(cls, rpow, special), rmod=arith_method(cls, rmod, special)) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] if have_divmod: # divmod doesn't have an op that is supported by numexpr new_methods['divmod'] = arith_method(cls, divmod, special) new_methods['rdivmod'] = arith_method(cls, rdivmod, special) new_methods.update(dict( eq=comp_method(cls, operator.eq, special), ne=comp_method(cls, operator.ne, special), lt=comp_method(cls, operator.lt, special), gt=comp_method(cls, operator.gt, special), le=comp_method(cls, operator.le, special), ge=comp_method(cls, operator.ge, special))) if bool_method: new_methods.update( dict(and_=bool_method(cls, operator.and_, special), or_=bool_method(cls, operator.or_, special), # For some reason ``^`` wasn't used in original. xor=bool_method(cls, operator.xor, special), rand_=bool_method(cls, rand_, special), ror_=bool_method(cls, ror_, special), rxor=bool_method(cls, rxor, special))) if special: dunderize = lambda x: '__{name}__'.format(name=x.strip('_')) else: dunderize = lambda x: x new_methods = {dunderize(k): v for k, v in new_methods.items()} return new_methods def add_methods(cls, new_methods): for name, method in new_methods.items(): # For most methods, if we find that the class already has a method # of the same name, it is OK to over-write it. The exception is # inplace methods (__iadd__, __isub__, ...) for SparseArray, which # retain the np.ndarray versions. force = not (issubclass(cls, ABCSparseArray) and name.startswith('__i')) if force or name not in cls.__dict__: setattr(cls, name, method) # ---------------------------------------------------------------------- # Arithmetic def add_special_arithmetic_methods(cls): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. Parameters ---------- cls : class special methods will be defined and pinned to this class """ _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, arith_method, comp_method, bool_method, special=True) # inplace operators (I feel like these should get passed an `inplace=True` # or just be removed def _wrap_inplace_method(method): """ return an inplace wrapper for this method """ def f(self, other): result = method(self, other) # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace(result.reindex_like(self, copy=False)._data, verify_is_copy=False) return self f.__name__ = "__i{name}__".format(name=method.__name__.strip("__")) return f new_methods.update( dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]), __isub__=_wrap_inplace_method(new_methods["__sub__"]), __imul__=_wrap_inplace_method(new_methods["__mul__"]), __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]), __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]), __imod__=_wrap_inplace_method(new_methods["__mod__"]), __ipow__=_wrap_inplace_method(new_methods["__pow__"]))) new_methods.update( dict(__iand__=_wrap_inplace_method(new_methods["__and__"]), __ior__=_wrap_inplace_method(new_methods["__or__"]), __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) add_methods(cls, new_methods=new_methods) def add_flex_arithmetic_methods(cls): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class """ flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method, bool_method=None, special=False) new_methods.update(dict(multiply=new_methods['mul'], subtract=new_methods['sub'], divide=new_methods['div'])) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in ('ror_', 'rxor', 'rand_')) add_methods(cls, new_methods=new_methods) # ----------------------------------------------------------------------------- # Series def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ # ToDo: Different from _align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 if isinstance(right, ABCSeries): # avoid repeated alignment if not left.index.equals(right.index): if align_asobject: # to keep original value's dtype for bool ops left = left.astype(object) right = right.astype(object) left, right = left.align(right, copy=False) return left, right def _construct_result(left, result, index, name, dtype=None): """ If the raw op result has a non-None name (e.g. it is an Index object) and the name argument is None, then passing name to the constructor will not be enough; we still need to override the name attribute. """ out = left._constructor(result, index=index, dtype=dtype) out = out.__finalize__(left) out.name = name return out def _construct_divmod_result(left, result, index, name, dtype=None): """divmod returns a tuple of like indexed series instead of a single series. """ return ( _construct_result(left, result[0], index=index, name=name, dtype=dtype), _construct_result(left, result[1], index=index, name=name, dtype=dtype), ) def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) construct_result = (_construct_divmod_result if op in [divmod, rdivmod] else _construct_result) def na_op(x, y): """ Return the result of evaluating op on the passed in values. If native types are not compatible, try coersion to object dtype. Parameters ---------- x : array-like y : array-like or scalar Returns ------- array-like Raises ------ TypeError : invalid operation """ import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) except Exception: # TODO: more specific? if is_object_dtype(x): return libalgos.arrmap_object(x, lambda val: op(val, y)) raise result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result def wrapper(left, right): if isinstance(right, ABCDataFrame): return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) right = maybe_upcast_for_op(right) if is_categorical_dtype(left): raise TypeError("{typ} cannot perform the operation " "{op}".format(typ=type(left).__name__, op=str_rep)) elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): # Give dispatch_to_index_op a chance for tests like # test_dt64_series_add_intlike, which the index dispatching handles # specifically. result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) elif (is_extension_array_dtype(left) or (is_extension_array_dtype(right) and not is_scalar(right))): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" return dispatch_to_extension_op(op, left, right) elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): # We should only get here with non-scalar or timedelta64('NaT') # values for right # Note: we cannot use dispatch_to_index_op because # that may incorrectly raise TypeError when we # should get NullFrequencyError result = op(pd.Index(left), right) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): rvalues = rvalues.values with np.errstate(all='ignore'): result = na_op(lvalues, rvalues) return construct_result(left, result, index=left.index, name=res_name, dtype=None) wrapper.__name__ = op_name return wrapper def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): if not is_object_dtype(y.dtype): y = y.astype(np.object_) if isinstance(y, (ABCSeries, ABCIndex)): y = y.values result = libops.vec_compare(x, y, op) else: result = libops.scalar_compare(x, y, op) return result def _comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) masker = _gen_eval_kwargs(op_name).get('masker', False) def na_op(x, y): # TODO: # should have guarantess on what x, y can be type-wise # Extension Dtypes are not called here # Checking that cases that were once handled here are no longer # reachable. assert not (is_categorical_dtype(y) and not is_scalar(y)) if is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) elif is_datetimelike_v_numeric(x, y): return invalid_comparison(x, y, op) else: # we want to compare like types # we only want to convert to integer like if # we are not NotImplemented, otherwise # we would allow datetime64 (but viewed as i8) against # integer comparisons # we have a datetime/timedelta and may need to convert assert not needs_i8_conversion(x) mask = None if not is_scalar(y) and needs_i8_conversion(y): mask = isna(x) | isna(y) y = y.view('i8') x = x.view('i8') method = getattr(x, op_name, None) if method is not None: with np.errstate(all='ignore'): result = method(y) if result is NotImplemented: return invalid_comparison(x, y, op) else: result = op(x, y) if mask is not None and mask.any(): result[mask] = masker return result def wrapper(self, other, axis=None): # Validate the axis parameter if axis is not None: self._get_axis_number(axis) res_name = get_op_result_name(self, other) if isinstance(other, list): # TODO: same for tuples? other = np.asarray(other) if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled " "Series objects") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; pd.CategoricalIndex # behavior is non-canonical GH#19513 res_values = dispatch_to_index_op(op, self, other, pd.Categorical) return self._constructor(res_values, index=self.index, name=res_name) elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior if (isinstance(other, datetime.date) and not isinstance(other, datetime.datetime)): # https://github.com/pandas-dev/pandas/issues/21152 # Compatibility for difference between Series comparison w/ # datetime and date msg = ( "Comparing Series of datetimes with 'datetime.date'. " "Currently, the 'datetime.date' is coerced to a " "datetime. In the future pandas will not coerce, " "and {future}. " "To retain the current behavior, " "convert the 'datetime.date' to a datetime with " "'pd.Timestamp'." ) if op in {operator.lt, operator.le, operator.gt, operator.ge}: future = "a TypeError will be raised" else: future = ( "'the values will not compare equal to the " "'datetime.date'" ) msg = '\n'.join(textwrap.wrap(msg.format(future=future))) warnings.warn(msg, FutureWarning, stacklevel=2) other = pd.Timestamp(other) res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) return self._constructor(res_values, index=self.index, name=res_name) elif is_timedelta64_dtype(self): res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) return self._constructor(res_values, index=self.index, name=res_name) elif (is_extension_array_dtype(self) or (is_extension_array_dtype(other) and not is_scalar(other))): # Note: the `not is_scalar(other)` condition rules out # e.g. other == "category" return dispatch_to_extension_op(op, self, other) elif isinstance(other, ABCSeries): # By this point we have checked that self._indexed_same(other) res_values = na_op(self.values, other.values) # rename is needed in case res_name is None and res_values.name # is not. return self._constructor(res_values, index=self.index, name=res_name).rename(res_name) elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast if other.ndim != 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') res_values = na_op(self.values, np.asarray(other)) result = self._constructor(res_values, index=self.index) # rename is needed in case res_name is None and self.name # is not. return result.__finalize__(self).rename(res_name) elif is_scalar(other) and isna(other): # numpy does not like comparisons vs None if op is operator.ne: res_values = np.ones(len(self), dtype=bool) else: res_values = np.zeros(len(self), dtype=bool) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') else: values = self.get_values() with np.errstate(all='ignore'): res = na_op(values, other) if is_scalar(res): raise TypeError('Could not compare {typ} type with Series' .format(typ=type(other))) # always return a full value series here res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') wrapper.__name__ = op_name return wrapper def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here assert not (is_bool_dtype(x) and is_bool_dtype(y)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError): raise TypeError("cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" .format(dtype=x.dtype, typ=type(y).__name__)) return result fill_int = lambda x: x.fillna(0) fill_bool = lambda x: x.fillna(False).astype(bool) def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) ovalues = other.values finalizer = lambda x: x else: # scalars, list, tuple, np.array is_other_int_dtype = is_integer_dtype(np.asarray(other)) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) ovalues = other finalizer = lambda x: x.__finalize__(self) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) res_values = na_op(self.values, ovalues) unfilled = self._constructor(res_values, index=self.index, name=res_name) filled = filler(unfilled) return finalizer(filled) wrapper.__name__ = op_name return wrapper def _flex_method_SERIES(cls, op, special): name = _get_op_name(op, special) doc = _make_flex_doc(name, 'series') @Appender(doc) def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # validate axis if axis is not None: self._get_axis_number(axis) if isinstance(other, ABCSeries): return self._binop(other, op, level=level, fill_value=fill_value) elif isinstance(other, (np.ndarray, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') other = self._constructor(other, self.index) return self._binop(other, op, level=level, fill_value=fill_value) else: if fill_value is not None: self = self.fillna(fill_value) return self._constructor(op(self, other), self.index).__finalize__(self) flex_wrapper.__name__ = name return flex_wrapper # ----------------------------------------------------------------------------- # DataFrame def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None): """ Apply binary operator `func` to self, other using alignment and fill conventions determined by the fill_value, axis, and level kwargs. Parameters ---------- self : DataFrame other : Series func : binary operator fill_value : object, default None axis : {0, 1, 'columns', 'index', None}, default None level : int or None, default None Returns ------- result : DataFrame """ if fill_value is not None: raise NotImplementedError("fill_value {fill} not supported." .format(fill=fill_value)) if axis is not None: axis = self._get_axis_number(axis) if axis == 0: return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level) else: if not len(other): return self * np.nan if not len(self): # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # default axis is columns return self._combine_match_columns(other, func, level=level) def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ def to_series(right): msg = ('Unable to coerce to Series, length must be {req_len}: ' 'given {given_len}') if axis is not None and left._get_axis_name(axis) == 'index': if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) elif (is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame))): # GH17901 right = to_series(right) return right def _arith_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result if op_name in _op_descriptions: # i.e. include "add" but not "__add__" doc = _make_flex_doc(op_name, 'dataframe') else: doc = _arith_doc_FRAME % op_name @Appender(doc) def f(self, other, axis=default_axis, level=None, fill_value=None): other = _align_method_FRAME(self, other, axis) if isinstance(other, ABCDataFrame): # Another DataFrame pass_op = op if should_series_dispatch(self, other, op) else na_op return self._combine_frame(other, pass_op, fill_value, level) elif isinstance(other, ABCSeries): # For these values of `axis`, we end up dispatching to Series op, # so do not want the masked op. pass_op = op if axis in [0, "columns", None] else na_op return _combine_series_frame(self, other, pass_op, fill_value=fill_value, axis=axis, level=level) else: if fill_value is not None: self = self.fillna(fill_value) assert np.ndim(other) == 0 return self._combine_const(other, op) f.__name__ = op_name return f def _flex_comp_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): try: with np.errstate(invalid='ignore'): result = op(x, y) except TypeError: result = mask_cmp_op(x, y, op) return result doc = _flex_comp_doc_FRAME.format(op_name=op_name, desc=_op_descriptions[op_name]['desc']) @Appender(doc) def f(self, other, axis=default_axis, level=None): other = _align_method_FRAME(self, other, axis) if isinstance(other, ABCDataFrame): # Another DataFrame if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) return dispatch_to_series(self, other, na_op, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, na_op, fill_value=None, axis=axis, level=level) else: assert np.ndim(other) == 0, other return self._combine_const(other, na_op) f.__name__ = op_name return f def _comp_method_FRAME(cls, func, special): str_rep = _get_opstr(func, cls) op_name = _get_op_name(func, special) @Appender('Wrapper for comparison method {name}'.format(name=op_name)) def f(self, other): other = _align_method_FRAME(self, other, axis=None) if isinstance(other, ABCDataFrame): # Another DataFrame if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') return dispatch_to_series(self, other, func, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None) else: # straight boolean comparisons we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. res = self._combine_const(other, func) return res.fillna(True).astype(bool) f.__name__ = op_name return f # ----------------------------------------------------------------------------- # Sparse def _cast_sparse_series_op(left, right, opname): """ For SparseSeries operation, coerce to float64 if the result is expected to have NaN or inf values Parameters ---------- left : SparseArray right : SparseArray opname : str Returns ------- left : SparseArray right : SparseArray """ from pandas.core.sparse.api import SparseDtype opname = opname.strip('_') # TODO: This should be moved to the array? if is_integer_dtype(left) and is_integer_dtype(right): # series coerces to float64 if result should have NaN/inf if opname in ('floordiv', 'mod') and (right.to_dense() == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) elif opname in ('rfloordiv', 'rmod') and (left.to_dense() == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) return left, right def _arith_method_SPARSE_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): if isinstance(other, ABCDataFrame): return NotImplemented elif isinstance(other, ABCSeries): if not isinstance(other, ABCSparseSeries): other = other.to_sparse(fill_value=self.fill_value) return _sparse_series_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): new_values = op(self.values, other) return self._constructor(new_values, index=self.index, name=self.name) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index new_name = get_op_result_name(left, right) from pandas.core.arrays.sparse import _sparse_array_op lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name) result = _sparse_array_op(lvalues, rvalues, op, name) return left._constructor(result, index=new_index, name=new_name) def _arith_method_SPARSE_ARRAY(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): from pandas.core.arrays.sparse.array import ( SparseArray, _sparse_array_op, _wrap_result, _get_fill) if isinstance(other, np.ndarray): if len(self) != len(other): raise AssertionError("length mismatch: {self} vs. {other}" .format(self=len(self), other=len(other))) if not isinstance(other, SparseArray): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) return _sparse_array_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) result = op(self.sp_values, other) return _wrap_result(op_name, result, self.sp_index, fill) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper
from copy import deepcopy from distutils.version import LooseVersion from operator import methodcaller import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, MultiIndex, Series, date_range import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, assert_frame_equal, assert_series_equal) from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestDataFrame(Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x, y) def test_rename_mi(self): df = DataFrame([ 11, 21, 31 ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) df.rename(str.lower) def test_set_axis_name(self): df = pd.DataFrame([[1, 2], [3, 4]]) funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, 'foo')(df) assert df.index.name is None assert result.index.name == 'foo' result = methodcaller(func, 'cols', axis=1)(df) assert df.columns.name is None assert result.columns.name == 'cols' def test_set_axis_name_mi(self): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) ) level_names = ['L1', 'L2'] funcs = ['_set_axis_name', 'rename_axis'] for func in funcs: result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): # allow single item via bool method df = DataFrame([[True]]) assert df.bool() df = DataFrame([[False]]) assert not df.bool() df = DataFrame([[False, False]]) with pytest.raises(ValueError): df.bool() with pytest.raises(ValueError): bool(df) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = DataFrame({'A': [1, '2', 3.]}) result = o._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) self._compare(result, expected) def test_metadata_propagation_indiv(self): # groupby df = DataFrame( {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) result = df.groupby('A').sum() self.check_metadata(df, result) # resample df = DataFrame(np.random.randn(1000, 2), index=date_range('20130101', periods=1000, freq='s')) result = df.resample('1T') self.check_metadata(df, result) # merging with override # GH 6923 _metadata = DataFrame._metadata _finalize = DataFrame.__finalize__ np.random.seed(10) df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) DataFrame._metadata = ['filename'] df1.filename = 'fname1.csv' df2.filename = 'fname2.csv' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'merge': left, right = other.left, other.right value = getattr(left, name, '') + '|' + getattr(right, name, '') object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, '')) return self DataFrame.__finalize__ = finalize result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') assert result.filename == 'fname1.csv|fname2.csv' # concat # GH 6927 DataFrame._metadata = ['filename'] df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) df1.filename = 'foo' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self DataFrame.__finalize__ = finalize result = pd.concat([df1, df1]) assert result.filename == 'foo+foo' # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({'x': [1, 2, 3]}) df.y = 2 df['y'] = [2, 4, 6] df.y = 5 assert df.y == 5 assert_series_equal(df['y'], Series([2, 4, 6], name='y')) @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'CategoricalIndex', 'TimedeltaIndex']) def test_to_xarray_index_types(self, index): from xarray import Dataset index = getattr(tm, 'make{}'.format(index)) df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index = index(3) df.index.name = 'foo' df.columns.name = 'bar' result = df.to_xarray() assert result.dims['foo'] == 3 assert len(result.coords) == 1 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, Dataset) # idempotency # categoricals are not preserved # datetimes w/tz are not preserved # column names are lost expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import Dataset df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern')} ) df.index.name = 'foo' result = df[0:0].to_xarray() assert result.dims['foo'] == 0 assert isinstance(result, Dataset) # available in 0.7.1 # MultiIndex df.index = pd.MultiIndex.from_product([['a'], range(3)], names=['one', 'two']) result = df.to_xarray() assert result.dims['one'] == 1 assert result.dims['two'] == 3 assert len(result.coords) == 2 assert len(result.data_vars) == 8 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected['f'] = expected['f'].astype(object) expected['h'] = expected['h'].astype('datetime64[ns]') expected.columns.name = None assert_frame_equal(result, expected, check_index_type=False) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=['A']) empty_frame_copy = deepcopy(empty_frame) self._compare(empty_frame_copy, empty_frame)
cbertinato/pandas
pandas/tests/generic/test_frame.py
pandas/core/ops.py
import requests import json import httplib as http import celery from celery.utils.log import get_task_logger from framework.celery_tasks import app as celery_app from framework.celery_tasks.utils import logged from framework.exceptions import HTTPError from api.base.utils import waterbutler_api_url_for from website.archiver import ( ARCHIVER_SUCCESS, ARCHIVER_FAILURE, ARCHIVER_SIZE_EXCEEDED, ARCHIVER_NETWORK_ERROR, ARCHIVER_FILE_NOT_FOUND, ARCHIVER_UNCAUGHT_ERROR, NO_ARCHIVE_LIMIT, AggregateStatResult, ) from website.archiver import utils from website.archiver import signals as archiver_signals from website.project import signals as project_signals from website import settings from website.app import init_addons from osf.models import ( ArchiveJob, AbstractNode, DraftRegistration, ) def create_app_context(): try: init_addons(settings) except AssertionError: # ignore AssertionErrors pass logger = get_task_logger(__name__) class ArchiverSizeExceeded(Exception): def __init__(self, result, *args, **kwargs): super(ArchiverSizeExceeded, self).__init__(*args, **kwargs) self.result = result class ArchiverStateError(Exception): def __init__(self, info, *args, **kwargs): super(ArchiverStateError, self).__init__(*args, **kwargs) self.info = info class ArchivedFileNotFound(Exception): def __init__(self, registration, missing_files, *args, **kwargs): super(ArchivedFileNotFound, self).__init__(*args, **kwargs) self.draft_registration = DraftRegistration.objects.get(registered_node=registration) self.missing_files = missing_files class ArchiverTask(celery.Task): abstract = True max_retries = 0 ignore_result = False def on_failure(self, exc, task_id, args, kwargs, einfo): job = ArchiveJob.load(kwargs.get('job_pk')) if not job: raise ArchiverStateError({ 'exception': exc, 'args': args, 'kwargs': kwargs, 'einfo': einfo, }) if job.status == ARCHIVER_FAILURE: # already captured return src, dst, user = job.info() errors = [] if isinstance(exc, ArchiverSizeExceeded): dst.archive_status = ARCHIVER_SIZE_EXCEEDED errors = exc.result elif isinstance(exc, HTTPError): dst.archive_status = ARCHIVER_NETWORK_ERROR errors = [ each for each in dst.archive_job.target_info() if each is not None ] elif isinstance(exc, ArchivedFileNotFound): dst.archive_status = ARCHIVER_FILE_NOT_FOUND errors = { 'missing_files': exc.missing_files, 'draft': exc.draft_registration } else: dst.archive_status = ARCHIVER_UNCAUGHT_ERROR errors = [einfo] if einfo else [] dst.save() archiver_signals.archive_fail.send(dst, errors=errors) @celery_app.task(base=ArchiverTask, ignore_result=False) @logged('stat_addon') def stat_addon(addon_short_name, job_pk): """Collect metadata about the file tree of a given addon :param addon_short_name: AddonConfig.short_name of the addon to be examined :param job_pk: primary key of archive_job :return: AggregateStatResult containing file tree metadata """ # Dataverse reqires special handling for draft and # published content addon_name = addon_short_name version = None if 'dataverse' in addon_short_name: addon_name = 'dataverse' version = 'latest' if addon_short_name.split('-')[-1] == 'draft' else 'latest-published' create_app_context() job = ArchiveJob.load(job_pk) src, dst, user = job.info() src_addon = src.get_addon(addon_name) if hasattr(src_addon, 'configured') and not src_addon.configured: # Addon enabled but not configured - no file trees, nothing to archive. return AggregateStatResult(src_addon._id, addon_short_name) try: file_tree = src_addon._get_file_tree(user=user, version=version) except HTTPError as e: dst.archive_job.update_target( addon_short_name, ARCHIVER_NETWORK_ERROR, errors=[e.data['error']], ) raise result = AggregateStatResult( src_addon._id, addon_short_name, targets=[utils.aggregate_file_tree_metadata(addon_short_name, file_tree, user)], ) return result @celery_app.task(base=ArchiverTask, ignore_result=False) @logged('make_copy_request') def make_copy_request(job_pk, url, data): """Make the copy request to the WaterBulter API and handle successful and failed responses :param job_pk: primary key of ArchiveJob :param url: URL to send request to :param data: <dict> of setting to send in POST to WaterBulter API :return: None """ create_app_context() job = ArchiveJob.load(job_pk) src, dst, user = job.info() logger.info('Sending copy request for addon: {0} on node: {1}'.format(data['provider'], dst._id)) res = requests.post(url, data=json.dumps(data)) if res.status_code not in (http.OK, http.CREATED, http.ACCEPTED): raise HTTPError(res.status_code) def make_waterbutler_payload(dst_id, rename): return { 'action': 'copy', 'path': '/', 'rename': rename.replace('/', '-'), 'resource': dst_id, 'provider': settings.ARCHIVE_PROVIDER, } @celery_app.task(base=ArchiverTask, ignore_result=False) @logged('archive_addon') def archive_addon(addon_short_name, job_pk): """Archive the contents of an addon by making a copy request to the WaterBulter API :param addon_short_name: AddonConfig.short_name of the addon to be archived :param job_pk: primary key of ArchiveJob :return: None """ create_app_context() job = ArchiveJob.load(job_pk) src, dst, user = job.info() logger.info('Archiving addon: {0} on node: {1}'.format(addon_short_name, src._id)) cookie = user.get_or_create_cookie() params = {'cookie': cookie} rename_suffix = '' # The dataverse API will not differentiate between published and draft files # unless expcicitly asked. We need to create seperate folders for published and # draft in the resulting archive. # # Additionally trying to run the archive without this distinction creates a race # condition that non-deterministically caused archive jobs to fail. if 'dataverse' in addon_short_name: params['revision'] = 'latest' if addon_short_name.split('-')[-1] == 'draft' else 'latest-published' rename_suffix = ' (draft)' if addon_short_name.split('-')[-1] == 'draft' else ' (published)' addon_short_name = 'dataverse' src_provider = src.get_addon(addon_short_name) folder_name = src_provider.archive_folder_name rename = '{}{}'.format(folder_name, rename_suffix) url = waterbutler_api_url_for(src._id, addon_short_name, _internal=True, base_url=src.osfstorage_region.waterbutler_url, **params) data = make_waterbutler_payload(dst._id, rename) make_copy_request.delay(job_pk=job_pk, url=url, data=data) @celery_app.task(base=ArchiverTask, ignore_result=False) @logged('archive_node') def archive_node(stat_results, job_pk): """First use the results of #stat_node to check disk usage of the initiated registration, then either fail the registration or create a celery.group group of subtasks to archive addons :param results: results from the #stat_addon subtasks spawned in #stat_node :param job_pk: primary key of ArchiveJob :return: None """ create_app_context() job = ArchiveJob.load(job_pk) src, dst, user = job.info() logger.info('Archiving node: {0}'.format(src._id)) if not isinstance(stat_results, list): stat_results = [stat_results] stat_result = AggregateStatResult( dst._id, dst.title, targets=stat_results ) if (NO_ARCHIVE_LIMIT not in job.initiator.system_tags) and (stat_result.disk_usage > settings.MAX_ARCHIVE_SIZE): raise ArchiverSizeExceeded(result=stat_result) else: if not stat_result.targets: job.status = ARCHIVER_SUCCESS job.save() for result in stat_result.targets: if not result['num_files']: job.update_target(result['target_name'], ARCHIVER_SUCCESS) else: archive_addon.delay( addon_short_name=result['target_name'], job_pk=job_pk ) project_signals.archive_callback.send(dst) def archive(job_pk): """Starts a celery.chord that runs stat_addon for each complete addon attached to the Node, then runs #archive_node with the result :param job_pk: primary key of ArchiveJob :return: None """ create_app_context() job = ArchiveJob.load(job_pk) src, dst, user = job.info() logger = get_task_logger(__name__) logger.info('Received archive task for Node: {0} into Node: {1}'.format(src._id, dst._id)) return celery.chain( [ celery.group([ stat_addon.si( addon_short_name=target.name, job_pk=job_pk, ) for target in job.target_addons.all() ]), archive_node.s( job_pk=job_pk ) ] ) @celery_app.task(base=ArchiverTask, ignore_result=False) @logged('archive_success') def archive_success(dst_pk, job_pk): """Archiver's final callback. For the time being the use case for this task is to rewrite references to files selected in a registration schema (the Prereg Challenge being the first to expose this feature). The created references point to files on the registered_from Node (needed for previewing schema data), and must be re-associated with the corresponding files in the newly created registration. :param str dst_pk: primary key of registration Node note:: At first glance this task makes redundant calls to utils.get_file_map (which returns a generator yielding (<sha256>, <file_metadata>) pairs) on the dst Node. Two notes about utils.get_file_map: 1) this function memoizes previous results to reduce overhead and 2) this function returns a generator that lazily fetches the file metadata of child Nodes (it is possible for a selected file to belong to a child Node) using a non-recursive DFS. Combined this allows for a relatively effient implementation with seemingly redundant calls. """ create_app_context() dst = AbstractNode.load(dst_pk) # The filePicker extension addded with the Prereg Challenge registration schema # allows users to select files in OSFStorage as their response to some schema # questions. These files are references to files on the unregistered Node, and # consequently we must migrate those file paths after archiver has run. Using # sha256 hashes is a convenient way to identify files post-archival. for schema in dst.registered_schema.all(): if schema.has_files: utils.migrate_file_metadata(dst, schema) job = ArchiveJob.load(job_pk) if not job.sent: job.sent = True job.save() dst.sanction.ask(dst.get_active_contributors_recursive(unique_users=True))
#!/usr/bin/env python # -*- coding: utf-8 -*- """Functional tests using WebTest.""" import datetime as dt import httplib as http import logging import unittest import markupsafe import mock import pytest from nose.tools import * # flake8: noqa (PEP8 asserts) import re from addons.wiki.utils import to_mongo_key from framework.auth import exceptions as auth_exc from framework.auth.core import Auth from tests.base import OsfTestCase from tests.base import fake from osf_tests.factories import ( fake_email, AuthUserFactory, NodeFactory, PreprintFactory, PreprintProviderFactory, PrivateLinkFactory, ProjectFactory, RegistrationFactory, SubjectFactory, UserFactory, UnconfirmedUserFactory, UnregUserFactory, ) from addons.wiki.models import WikiPage, WikiVersion from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory from website import settings, language from addons.osfstorage.models import OsfStorageFile from website.util import web_url_for, api_url_for from api_tests import utils as test_utils logging.getLogger('website.project.model').setLevel(logging.ERROR) def assert_in_html(member, container, **kwargs): """Looks for the specified member in markupsafe-escaped HTML output""" member = markupsafe.escape(member) return assert_in(member, container, **kwargs) def assert_not_in_html(member, container, **kwargs): """Looks for the specified member in markupsafe-escaped HTML output""" member = markupsafe.escape(member) return assert_not_in(member, container, **kwargs) class TestDisabledUser(OsfTestCase): def setUp(self): super(TestDisabledUser, self).setUp() self.user = UserFactory() self.user.set_password('Korben Dallas') self.user.is_disabled = True self.user.save() def test_profile_disabled_returns_401(self): res = self.app.get(self.user.url, expect_errors=True) assert_equal(res.status_code, 410) class TestAnUnregisteredUser(OsfTestCase): def test_cant_see_profile_if_not_logged_in(self): url = web_url_for('profile_view') res = self.app.get(url) res = res.follow() assert_equal(res.status_code, 301) assert_in('/login/', res.headers['Location']) @pytest.mark.enable_bookmark_creation @pytest.mark.enable_quickfiles_creation class TestAUser(OsfTestCase): def setUp(self): super(TestAUser, self).setUp() self.user = AuthUserFactory() self.auth = self.user.auth def test_can_see_profile_url(self): res = self.app.get(self.user.url).maybe_follow() assert_in(self.user.url, res) # `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator # if user is not logged in, she/he is further redirected to CAS login page def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self): res = self.app.get('/login/').follow() assert_equal(res.status_code, 302) location = res.headers.get('Location') assert_in('login?service=', location) def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self): res = self.app.get('/login/', auth=self.user.auth) assert_equal(res.status_code, 302) assert 'dashboard' in res.headers.get('Location') def test_register_page(self): res = self.app.get('/register/') assert_equal(res.status_code, 200) def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self): res = self.app.get('/register/', auth=self.user.auth) assert_equal(res.status_code, 302) assert 'dashboard' in res.headers.get('Location') def test_sees_projects_in_her_dashboard(self): # the user already has a project project = ProjectFactory(creator=self.user) project.add_contributor(self.user) project.save() res = self.app.get('/myprojects/', auth=self.user.auth) assert_in('Projects', res) # Projects heading def test_does_not_see_osffiles_in_user_addon_settings(self): res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True) assert_not_in('OSF Storage', res) def test_sees_osffiles_in_project_addon_settings(self): project = ProjectFactory(creator=self.user) project.add_contributor( self.user, permissions=['read', 'write', 'admin'], save=True) res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True) assert_in('OSF Storage', res) def test_sees_correct_title_on_dashboard(self): # User goes to dashboard res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True) title = res.html.title.string assert_equal('OSF | My Projects', title) def test_can_see_make_public_button_if_admin(self): # User is a contributor on a project project = ProjectFactory() project.add_contributor( self.user, permissions=['read', 'write', 'admin'], save=True) # User goes to the project page res = self.app.get(project.url, auth=self.auth).maybe_follow() assert_in('Make Public', res) def test_cant_see_make_public_button_if_not_admin(self): # User is a contributor on a project project = ProjectFactory() project.add_contributor( self.user, permissions=['read', 'write'], save=True) # User goes to the project page res = self.app.get(project.url, auth=self.auth).maybe_follow() assert_not_in('Make Public', res) def test_can_see_make_private_button_if_admin(self): # User is a contributor on a project project = ProjectFactory(is_public=True) project.add_contributor( self.user, permissions=['read', 'write', 'admin'], save=True) # User goes to the project page res = self.app.get(project.url, auth=self.auth).maybe_follow() assert_in('Make Private', res) def test_cant_see_make_private_button_if_not_admin(self): # User is a contributor on a project project = ProjectFactory(is_public=True) project.add_contributor( self.user, permissions=['read', 'write'], save=True) # User goes to the project page res = self.app.get(project.url, auth=self.auth).maybe_follow() assert_not_in('Make Private', res) def test_sees_logs_on_a_project(self): project = ProjectFactory(is_public=True) # User goes to the project's page res = self.app.get(project.url, auth=self.auth).maybe_follow() # Can see log event assert_in('created', res) def test_no_wiki_content_message(self): project = ProjectFactory(creator=self.user) # Goes to project's wiki, where there is no content res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth) # Sees a message indicating no content assert_in('Add important information, links, or images here to describe your project.', res) # Sees that edit panel is open by default when home wiki has no content assert_in('panelsUsed: ["view", "menu", "edit"]', res) def test_wiki_content(self): project = ProjectFactory(creator=self.user) wiki_page_name = 'home' wiki_content = 'Kittens' wiki_page = WikiFactory( user=self.user, node=project, ) wiki = WikiVersionFactory( wiki_page=wiki_page, content=wiki_content ) res = self.app.get('/{0}/wiki/{1}/'.format( project._primary_key, wiki_page_name, ), auth=self.auth) assert_not_in('Add important information, links, or images here to describe your project.', res) assert_in(wiki_content, res) assert_in('panelsUsed: ["view", "menu"]', res) def test_wiki_page_name_non_ascii(self): project = ProjectFactory(creator=self.user) non_ascii = to_mongo_key('WöRlÐé') WikiPage.objects.create_for_node(project, 'WöRlÐé', 'new content', Auth(self.user)) wv = WikiVersion.objects.get_for_node(project, non_ascii) assert wv.wiki_page.page_name.upper() == non_ascii.decode('utf-8').upper() def test_noncontributor_cannot_see_wiki_if_no_content(self): user2 = UserFactory() # user2 creates a public project and adds no wiki content project = ProjectFactory(creator=user2, is_public=True) # self navigates to project res = self.app.get(project.url).maybe_follow() # Should not see wiki widget (since non-contributor and no content) assert_not_in('Add important information, links, or images here to describe your project.', res) def test_wiki_does_not_exist(self): project = ProjectFactory(creator=self.user) res = self.app.get('/{0}/wiki/{1}/'.format( project._primary_key, 'not a real page yet', ), auth=self.auth, expect_errors=True) assert_in('Add important information, links, or images here to describe your project.', res) def test_sees_own_profile(self): res = self.app.get('/profile/', auth=self.auth) td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile')) td2 = td1.find_next_sibling('td') assert_equal(td2.text, self.user.display_absolute_url) def test_sees_another_profile(self): user2 = UserFactory() res = self.app.get(user2.url, auth=self.auth) td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile')) td2 = td1.find_next_sibling('td') assert_equal(td2.text, user2.display_absolute_url) @pytest.mark.enable_bookmark_creation class TestComponents(OsfTestCase): def setUp(self): super(TestComponents, self).setUp() self.user = AuthUserFactory() self.consolidate_auth = Auth(user=self.user) self.project = ProjectFactory(creator=self.user) self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth) # A non-project componenet self.component = NodeFactory( category='hypothesis', creator=self.user, parent=self.project, ) self.component.save() self.component.set_privacy('public', self.consolidate_auth) self.component.set_privacy('private', self.consolidate_auth) self.project.save() self.project_url = self.project.web_url_for('view_project') def test_sees_parent(self): res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow() parent_title = res.html.find_all('h2', class_='node-parent-title') assert_equal(len(parent_title), 1) assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here def test_delete_project(self): res = self.app.get( self.component.url + 'settings/', auth=self.user.auth ).maybe_follow() assert_in( 'Delete {0}'.format(self.component.project_or_component), res ) def test_cant_delete_project_if_not_admin(self): non_admin = AuthUserFactory() self.component.add_contributor( non_admin, permissions=['read', 'write'], auth=self.consolidate_auth, save=True, ) res = self.app.get( self.component.url + 'settings/', auth=non_admin.auth ).maybe_follow() assert_not_in( 'Delete {0}'.format(self.component.project_or_component), res ) def test_can_configure_comments_if_admin(self): res = self.app.get( self.component.url + 'settings/', auth=self.user.auth, ).maybe_follow() assert_in('Commenting', res) def test_cant_configure_comments_if_not_admin(self): non_admin = AuthUserFactory() self.component.add_contributor( non_admin, permissions=['read', 'write'], auth=self.consolidate_auth, save=True, ) res = self.app.get( self.component.url + 'settings/', auth=non_admin.auth ).maybe_follow() assert_not_in('Commenting', res) def test_components_should_have_component_list(self): res = self.app.get(self.component.url, auth=self.user.auth) assert_in('Components', res) @pytest.mark.enable_bookmark_creation class TestPrivateLinkView(OsfTestCase): def setUp(self): super(TestPrivateLinkView, self).setUp() self.user = AuthUserFactory() # Is NOT a contributor self.project = ProjectFactory(is_public=False) self.link = PrivateLinkFactory(anonymous=True) self.link.nodes.add(self.project) self.link.save() self.project_url = self.project.web_url_for('view_project') def test_anonymous_link_hide_contributor(self): res = self.app.get(self.project_url, {'view_only': self.link.key}) assert_in('Anonymous Contributors', res.body) assert_not_in(self.user.fullname, res) def test_anonymous_link_hides_citations(self): res = self.app.get(self.project_url, {'view_only': self.link.key}) assert_not_in('Citation:', res) def test_no_warning_for_read_only_user_with_valid_link(self): link2 = PrivateLinkFactory(anonymous=False) link2.nodes.add(self.project) link2.save() self.project.add_contributor( self.user, permissions=['read'], save=True, ) res = self.app.get(self.project_url, {'view_only': link2.key}, auth=self.user.auth) assert_not_in( 'is being viewed through a private, view-only link. ' 'Anyone with the link can view this project. Keep ' 'the link safe.', res.body ) def test_no_warning_for_read_only_user_with_invalid_link(self): self.project.add_contributor( self.user, permissions=['read'], save=True, ) res = self.app.get(self.project_url, {'view_only': 'not_valid'}, auth=self.user.auth) assert_not_in( 'is being viewed through a private, view-only link. ' 'Anyone with the link can view this project. Keep ' 'the link safe.', res.body ) @pytest.mark.enable_bookmark_creation @pytest.mark.enable_quickfiles_creation class TestMergingAccounts(OsfTestCase): def setUp(self): super(TestMergingAccounts, self).setUp() self.user = UserFactory.build() self.user.fullname = "tess' test string" self.user.set_password('science') self.user.save() self.dupe = UserFactory.build() self.dupe.set_password('example') self.dupe.save() def test_merged_user_is_not_shown_as_a_contributor(self): project = ProjectFactory(is_public=True) # Both the master and dupe are contributors project.add_contributor(self.dupe, log=False) project.add_contributor(self.user, log=False) project.save() # At the project page, both are listed as contributors res = self.app.get(project.url).maybe_follow() assert_in_html(self.user.fullname, res) assert_in_html(self.dupe.fullname, res) # The accounts are merged self.user.merge_user(self.dupe) self.user.save() # Now only the master user is shown at the project page res = self.app.get(project.url).maybe_follow() assert_in_html(self.user.fullname, res) assert_true(self.dupe.is_merged) assert_not_in(self.dupe.fullname, res) def test_merged_user_has_alert_message_on_profile(self): # Master merges dupe self.user.merge_user(self.dupe) self.user.save() # At the dupe user's profile there is an alert message at the top # indicating that the user is merged res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow() assert_in('This account has been merged', res) @pytest.mark.enable_bookmark_creation class TestShortUrls(OsfTestCase): def setUp(self): super(TestShortUrls, self).setUp() self.user = AuthUserFactory() self.auth = self.user.auth self.consolidate_auth = Auth(user=self.user) self.project = ProjectFactory(creator=self.user) # A non-project componenet self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user) # Hack: Add some logs to component; should be unnecessary pending # improvements to factories from @rliebz self.component.set_privacy('public', auth=self.consolidate_auth) self.component.set_privacy('private', auth=self.consolidate_auth) self.wiki = WikiFactory( user=self.user, node=self.component, ) def _url_to_body(self, url): return self.app.get( url, auth=self.auth ).maybe_follow( auth=self.auth, ).normal_body def test_project_url(self): assert_equal( self._url_to_body(self.project.deep_url), self._url_to_body(self.project.url), ) def test_component_url(self): assert_equal( self._url_to_body(self.component.deep_url), self._url_to_body(self.component.url), ) def test_wiki_url(self): assert_equal( self._url_to_body(self.wiki.deep_url), self._url_to_body(self.wiki.url), ) @pytest.mark.enable_bookmark_creation @pytest.mark.enable_implicit_clean class TestClaiming(OsfTestCase): def setUp(self): super(TestClaiming, self).setUp() self.referrer = AuthUserFactory() self.project = ProjectFactory(creator=self.referrer, is_public=True) def test_correct_name_shows_in_contributor_list(self): name1, email = fake.name(), fake_email() UnregUserFactory(fullname=name1, email=email) name2, email = fake.name(), fake_email() # Added with different name self.project.add_unregistered_contributor(fullname=name2, email=email, auth=Auth(self.referrer)) self.project.save() res = self.app.get(self.project.url, auth=self.referrer.auth) # Correct name is shown assert_in_html(name2, res) assert_not_in(name1, res) def test_user_can_set_password_on_claim_page(self): name, email = fake.name(), fake_email() new_user = self.project.add_unregistered_contributor( email=email, fullname=name, auth=Auth(self.referrer) ) self.project.save() claim_url = new_user.get_claim_url(self.project._primary_key) res = self.app.get(claim_url) self.project.reload() assert_in('Set Password', res) form = res.forms['setPasswordForm'] #form['username'] = new_user.username #Removed as long as E-mail can't be updated. form['password'] = 'killerqueen' form['password2'] = 'killerqueen' res = form.submit().follow() new_user.reload() assert_true(new_user.check_password('killerqueen')) def test_sees_is_redirected_if_user_already_logged_in(self): name, email = fake.name(), fake_email() new_user = self.project.add_unregistered_contributor( email=email, fullname=name, auth=Auth(self.referrer) ) self.project.save() existing = AuthUserFactory() claim_url = new_user.get_claim_url(self.project._primary_key) # a user is already logged in res = self.app.get(claim_url, auth=existing.auth, expect_errors=True) assert_equal(res.status_code, 302) def test_unregistered_users_names_are_project_specific(self): name1, name2, email = fake.name(), fake.name(), fake_email() project2 = ProjectFactory(creator=self.referrer) # different projects use different names for the same unreg contributor self.project.add_unregistered_contributor( email=email, fullname=name1, auth=Auth(self.referrer) ) self.project.save() project2.add_unregistered_contributor( email=email, fullname=name2, auth=Auth(self.referrer) ) project2.save() self.app.authenticate(*self.referrer.auth) # Each project displays a different name in the contributor list res = self.app.get(self.project.url) assert_in_html(name1, res) res2 = self.app.get(project2.url) assert_in_html(name2, res2) @unittest.skip('as long as E-mails cannot be changed') def test_cannot_set_email_to_a_user_that_already_exists(self): reg_user = UserFactory() name, email = fake.name(), fake_email() new_user = self.project.add_unregistered_contributor( email=email, fullname=name, auth=Auth(self.referrer) ) self.project.save() # Goes to claim url and successfully claims account claim_url = new_user.get_claim_url(self.project._primary_key) res = self.app.get(claim_url) self.project.reload() assert_in('Set Password', res) form = res.forms['setPasswordForm'] # Fills out an email that is the username of another user form['username'] = reg_user.username form['password'] = 'killerqueen' form['password2'] = 'killerqueen' res = form.submit().maybe_follow(expect_errors=True) assert_in( language.ALREADY_REGISTERED.format(email=reg_user.username), res ) def test_correct_display_name_is_shown_at_claim_page(self): original_name = fake.name() unreg = UnregUserFactory(fullname=original_name) different_name = fake.name() new_user = self.project.add_unregistered_contributor( email=unreg.username, fullname=different_name, auth=Auth(self.referrer), ) self.project.save() claim_url = new_user.get_claim_url(self.project._primary_key) res = self.app.get(claim_url) # Correct name (different_name) should be on page assert_in_html(different_name, res) class TestConfirmingEmail(OsfTestCase): def setUp(self): super(TestConfirmingEmail, self).setUp() self.user = UnconfirmedUserFactory() self.confirmation_url = self.user.get_confirmation_url( self.user.username, external=False, ) self.confirmation_token = self.user.get_confirmation_token( self.user.username ) def test_cannot_remove_another_user_email(self): user1 = AuthUserFactory() user2 = AuthUserFactory() url = api_url_for('update_user') header = {'id': user1.username, 'emails': [{'address': user1.username}]} res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannnot_make_primary_email_for_another_user(self): user1 = AuthUserFactory() user2 = AuthUserFactory() email = 'test@cos.io' user1.emails.create(address=email) user1.save() url = api_url_for('update_user') header = {'id': user1.username, 'emails': [{'address': user1.username, 'primary': False, 'confirmed': True}, {'address': email, 'primary': True, 'confirmed': True} ]} res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannnot_add_email_for_another_user(self): user1 = AuthUserFactory() user2 = AuthUserFactory() email = 'test@cos.io' url = api_url_for('update_user') header = {'id': user1.username, 'emails': [{'address': user1.username, 'primary': True, 'confirmed': True}, {'address': email, 'primary': False, 'confirmed': False} ]} res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_error_page_if_confirm_link_is_used(self): self.user.confirm_email(self.confirmation_token) self.user.save() res = self.app.get(self.confirmation_url, expect_errors=True) assert_in(auth_exc.InvalidTokenError.message_short, res) assert_equal(res.status_code, http.BAD_REQUEST) @pytest.mark.enable_implicit_clean @pytest.mark.enable_bookmark_creation class TestClaimingAsARegisteredUser(OsfTestCase): def setUp(self): super(TestClaimingAsARegisteredUser, self).setUp() self.referrer = AuthUserFactory() self.project = ProjectFactory(creator=self.referrer, is_public=True) name, email = fake.name(), fake_email() self.user = self.project.add_unregistered_contributor( fullname=name, email=email, auth=Auth(user=self.referrer) ) self.project.save() def test_claim_user_registered_with_correct_password(self): reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86' url = self.user.get_claim_url(self.project._primary_key) # Follow to password re-enter page res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth) # verify that the "Claim Account" form is returned assert_in('Claim Contributor', res.body) form = res.forms['claimContributorForm'] form['password'] = 'queenfan86' res = form.submit(auth=reg_user.auth) res = res.follow(auth=reg_user.auth) self.project.reload() self.user.reload() # user is now a contributor to the project assert_in(reg_user, self.project.contributors) # the unregistered user (self.user) is removed as a contributor, and their assert_not_in(self.user, self.project.contributors) # unclaimed record for the project has been deleted assert_not_in(self.project, self.user.unclaimed_records) @pytest.mark.enable_implicit_clean class TestExplorePublicActivity(OsfTestCase): def setUp(self): super(TestExplorePublicActivity, self).setUp() self.project = ProjectFactory(is_public=True) self.registration = RegistrationFactory(project=self.project) self.private_project = ProjectFactory(title='Test private project') self.popular_project = ProjectFactory(is_public=True) self.popular_registration = RegistrationFactory(project=self.project, is_public=True) # Add project to new and noteworthy projects self.new_and_noteworthy_links_node = ProjectFactory(is_public=True) self.new_and_noteworthy_links_node._id = settings.NEW_AND_NOTEWORTHY_LINKS_NODE self.new_and_noteworthy_links_node.add_pointer(self.project, auth=Auth(self.new_and_noteworthy_links_node.creator), save=True) # Set up popular projects and registrations self.popular_links_node = ProjectFactory(is_public=True) settings.POPULAR_LINKS_NODE = self.popular_links_node._id self.popular_links_node.add_pointer(self.popular_project, auth=Auth(self.popular_links_node.creator), save=True) self.popular_links_registrations = ProjectFactory(is_public=True) settings.POPULAR_LINKS_REGISTRATIONS = self.popular_links_registrations._id self.popular_links_registrations.add_pointer(self.popular_registration, auth=Auth(self.popular_links_registrations.creator), save=True) def test_explore_page_loads_when_settings_not_configured(self): old_settings_values = settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS settings.POPULAR_LINKS_NODE = 'notanode' settings.NEW_AND_NOTEWORTHY_LINKS_NODE = 'alsototallywrong' settings.POPULAR_LINKS_REGISTRATIONS = 'nopenope' url = self.project.web_url_for('activity') res = self.app.get(url) assert_equal(res.status_code, 200) settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS = old_settings_values def test_new_and_noteworthy_and_popular_nodes_show_in_explore_activity(self): url = self.project.web_url_for('activity') res = self.app.get(url) assert_equal(res.status_code, 200) # New and Noteworthy assert_in(str(self.project.title), res) assert_in(str(self.project.created.date()), res) assert_in(str(self.registration.title), res) assert_in(str(self.registration.registered_date.date()), res) assert_not_in(str(self.private_project.title), res) # Popular Projects and Registrations assert_in(str(self.popular_project.title), res) assert_in(str(self.popular_project.created.date()), res) assert_in(str(self.popular_registration.title), res) assert_in(str(self.popular_registration.registered_date.date()), res) class TestResendConfirmation(OsfTestCase): def setUp(self): super(TestResendConfirmation, self).setUp() self.unconfirmed_user = UnconfirmedUserFactory() self.confirmed_user = UserFactory() self.get_url = web_url_for('resend_confirmation_get') self.post_url = web_url_for('resend_confirmation_post') # test that resend confirmation page is load correctly def test_resend_confirmation_get(self): res = self.app.get(self.get_url) assert_equal(res.status_code, 200) assert_in('Resend Confirmation', res.body) assert_in('resendForm', res.forms) # test that unconfirmed user can receive resend confirmation email @mock.patch('framework.auth.views.mails.send_mail') def test_can_receive_resend_confirmation_email(self, mock_send_mail): # load resend confirmation page and submit email res = self.app.get(self.get_url) form = res.forms['resendForm'] form['email'] = self.unconfirmed_user.unconfirmed_emails[0] res = form.submit() # check email, request and response assert_true(mock_send_mail.called) assert_equal(res.status_code, 200) assert_equal(res.request.path, self.post_url) assert_in_html('If there is an OSF account', res) # test that confirmed user cannot receive resend confirmation email @mock.patch('framework.auth.views.mails.send_mail') def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail): # load resend confirmation page and submit email res = self.app.get(self.get_url) form = res.forms['resendForm'] form['email'] = self.confirmed_user.emails.first().address res = form.submit() # check email, request and response assert_false(mock_send_mail.called) assert_equal(res.status_code, 200) assert_equal(res.request.path, self.post_url) assert_in_html('has already been confirmed', res) # test that non-existing user cannot receive resend confirmation email @mock.patch('framework.auth.views.mails.send_mail') def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail): # load resend confirmation page and submit email res = self.app.get(self.get_url) form = res.forms['resendForm'] form['email'] = 'random@random.com' res = form.submit() # check email, request and response assert_false(mock_send_mail.called) assert_equal(res.status_code, 200) assert_equal(res.request.path, self.post_url) assert_in_html('If there is an OSF account', res) # test that user cannot submit resend confirmation request too quickly @mock.patch('framework.auth.views.mails.send_mail') def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail): # load resend confirmation page and submit email res = self.app.get(self.get_url) form = res.forms['resendForm'] form['email'] = self.unconfirmed_user.email res = form.submit() res = form.submit() # check request and response assert_equal(res.status_code, 200) assert_in_html('Please wait', res) class TestForgotPassword(OsfTestCase): def setUp(self): super(TestForgotPassword, self).setUp() self.user = UserFactory() self.auth_user = AuthUserFactory() self.get_url = web_url_for('forgot_password_get') self.post_url = web_url_for('forgot_password_post') self.user.verification_key_v2 = {} self.user.save() # log users out before they land on forgot password page def test_forgot_password_logs_out_user(self): # visit forgot password link while another user is logged in res = self.app.get(self.get_url, auth=self.auth_user.auth) # check redirection to CAS logout assert_equal(res.status_code, 302) location = res.headers.get('Location') assert_not_in('reauth', location) assert_in('logout?service=', location) assert_in('forgotpassword', location) # test that forgot password page is loaded correctly def test_get_forgot_password(self): res = self.app.get(self.get_url) assert_equal(res.status_code, 200) assert_in('Forgot Password', res.body) assert_in('forgotPasswordForm', res.forms) # test that existing user can receive reset password email @mock.patch('framework.auth.views.mails.send_mail') def test_can_receive_reset_password_email(self, mock_send_mail): # load forgot password page and submit email res = self.app.get(self.get_url) form = res.forms['forgotPasswordForm'] form['forgot_password-email'] = self.user.username res = form.submit() # check mail was sent assert_true(mock_send_mail.called) # check http 200 response assert_equal(res.status_code, 200) # check request URL is /forgotpassword assert_equal(res.request.path, self.post_url) # check push notification assert_in_html('If there is an OSF account', res) assert_not_in_html('Please wait', res) # check verification_key_v2 is set self.user.reload() assert_not_equal(self.user.verification_key_v2, {}) # test that non-existing user cannot receive reset password email @mock.patch('framework.auth.views.mails.send_mail') def test_cannot_receive_reset_password_email(self, mock_send_mail): # load forgot password page and submit email res = self.app.get(self.get_url) form = res.forms['forgotPasswordForm'] form['forgot_password-email'] = 'fake' + self.user.username res = form.submit() # check mail was not sent assert_false(mock_send_mail.called) # check http 200 response assert_equal(res.status_code, 200) # check request URL is /forgotpassword assert_equal(res.request.path, self.post_url) # check push notification assert_in_html('If there is an OSF account', res) assert_not_in_html('Please wait', res) # check verification_key_v2 is not set self.user.reload() assert_equal(self.user.verification_key_v2, {}) # test that non-existing user cannot receive reset password email @mock.patch('framework.auth.views.mails.send_mail') def test_not_active_user_no_reset_password_email(self, mock_send_mail): self.user.disable_account() self.user.save() # load forgot password page and submit email res = self.app.get(self.get_url) form = res.forms['forgotPasswordForm'] form['forgot_password-email'] = self.user.username res = form.submit() # check mail was not sent assert_false(mock_send_mail.called) # check http 200 response assert_equal(res.status_code, 200) # check request URL is /forgotpassword assert_equal(res.request.path, self.post_url) # check push notification assert_in_html('If there is an OSF account', res) assert_not_in_html('Please wait', res) # check verification_key_v2 is not set self.user.reload() assert_equal(self.user.verification_key_v2, {}) # test that user cannot submit forgot password request too quickly @mock.patch('framework.auth.views.mails.send_mail') def test_cannot_reset_password_twice_quickly(self, mock_send_mail): # load forgot password page and submit email res = self.app.get(self.get_url) form = res.forms['forgotPasswordForm'] form['forgot_password-email'] = self.user.username res = form.submit() res = form.submit() # check http 200 response assert_equal(res.status_code, 200) # check push notification assert_in_html('Please wait', res) assert_not_in_html('If there is an OSF account', res) @unittest.skip('Public projects/components are dynamically loaded now.') class TestAUserProfile(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) self.user = AuthUserFactory() self.me = AuthUserFactory() self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs()) self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs()) # regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623 def test_has_public_projects_and_components(self): # I go to my own profile url = web_url_for('profile_view_id', uid=self.me._primary_key) # I see the title of both my project and component res = self.app.get(url, auth=self.me.auth) assert_in_html(self.component.title, res) assert_in_html(self.project.title, res) # Another user can also see my public project and component url = web_url_for('profile_view_id', uid=self.me._primary_key) # I see the title of both my project and component res = self.app.get(url, auth=self.user.auth) assert_in_html(self.component.title, res) assert_in_html(self.project.title, res) def test_shows_projects_with_many_contributors(self): # My project has many contributors for _ in range(5): user = UserFactory() self.project.add_contributor(user, auth=Auth(self.project.creator), save=True) # I go to my own profile url = web_url_for('profile_view_id', uid=self.me._primary_key) res = self.app.get(url, auth=self.me.auth) # I see '3 more' as a link assert_in('3 more', res) res = res.click('3 more') assert_equal(res.request.path, self.project.url) def test_has_no_public_projects_or_components_on_own_profile(self): # User goes to their profile url = web_url_for('profile_view_id', uid=self.user._id) res = self.app.get(url, auth=self.user.auth) # user has no public components/projects assert_in('You have no public projects', res) assert_in('You have no public components', res) def test_user_no_public_projects_or_components(self): # I go to other user's profile url = web_url_for('profile_view_id', uid=self.user._id) # User has no public components/projects res = self.app.get(url, auth=self.me.auth) assert_in('This user has no public projects', res) assert_in('This user has no public components', res) # regression test def test_does_not_show_registrations(self): project = ProjectFactory(creator=self.user) component = NodeFactory(parent=project, creator=self.user, is_public=False) # User has a registration with public components reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True) for each in reg.nodes: each.is_public = True each.save() # I go to other user's profile url = web_url_for('profile_view_id', uid=self.user._id) # Registration does not appear on profile res = self.app.get(url, auth=self.me.auth) assert_in('This user has no public components', res) assert_not_in(reg.title, res) assert_not_in(reg.nodes[0].title, res) @pytest.mark.enable_bookmark_creation class TestPreprintBannerView(OsfTestCase): def setUp(self): super(TestPreprintBannerView, self).setUp() self.admin = AuthUserFactory() self.provider_one = PreprintProviderFactory() self.provider_two = PreprintProviderFactory() self.project_one = ProjectFactory(creator=self.admin, is_public=True) self.project_two = ProjectFactory(creator=self.admin, is_public=True) self.project_three = ProjectFactory(creator=self.admin, is_public=True) self.subject_one = SubjectFactory() self.subject_two = SubjectFactory() self.file_one = test_utils.create_test_file(self.project_one, self.admin, 'mgla.pdf') self.file_two = test_utils.create_test_file(self.project_two, self.admin, 'saor.pdf') self.published_preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True) self.unpublished_preprint = PreprintFactory(creator=self.admin, filename='saor.pdf', provider=self.provider_two, subjects=[[self.subject_two._id]], project=self.project_two, is_published=False) def test_public_project_published_preprint(self): url = self.project_one.web_url_for('view_project') res = self.app.get(url, auth=self.admin.auth) assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body) def test_private_project_published_preprint(self): self.project_one.is_public = False self.project_one.save() url = self.project_one.web_url_for('view_project') res = self.app.get(url, auth=self.admin.auth) assert_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body) def test_public_project_unpublished_preprint(self): url = self.project_two.web_url_for('view_project') res = self.app.get(url, auth=self.admin.auth) assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body) def test_private_project_unpublished_preprint(self): # Do not show banner on unpublished preprints self.project_two.is_public = False self.project_two.save() url = self.project_two.web_url_for('view_project') res = self.app.get(url, auth=self.admin.auth) assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body) def test_public_project_no_preprint(self): url = self.project_three.web_url_for('view_project') res = self.app.get(url, auth=self.admin.auth) assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body) def test_private_project_no_preprint(self): self.project_three.is_public = False self.project_three.save() url = self.project_three.web_url_for('view_project') res = self.app.get(url, auth=self.admin.auth) assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body) if __name__ == '__main__': unittest.main()
caseyrollins/osf.io
tests/test_webtests.py
website/archiver/tasks.py
import os.path as osp import numpy as np # Import sile objects from .sile import * from sisl._internal import set_module from sisl import Geometry, AtomUnknown, SuperCell from sisl.utils import str_spec __all__ = ['xsfSile', 'axsfSile'] @set_module("sisl.io") class xsfSile(Sile): """ XSF file for XCrySDen """ def _setup(self, *args, **kwargs): """ Setup the `xsfSile` after initialization """ self._comment = ['#'] self._md_steps = kwargs.get('steps', None) self._md_index = 0 def _step_md(self): """ Step the MD counter """ self._md_index += 1 @sile_fh_open() def write_supercell(self, sc, fmt='.8f'): """ Writes the supercell to the contained file Parameters ---------- sc : SuperCell the supercell to be written fmt : str, optional used format for the precision of the data """ # Implementation notice! # The XSF files are compatible with Vesta, but ONLY # if there are no empty lines! # Check that we can write to the file sile_raise_write(self) # Write out top-header stuff from time import gmtime, strftime self._write('# File created by: sisl {}\n#\n'.format(strftime("%Y-%m-%d", gmtime()))) # Print out the number of ANIMSTEPS (if required) if not self._md_steps is None: self._write(f'ANIMSTEPS {self._md_steps}\n') self._write('CRYSTAL\n#\n') if self._md_index == 1: self._write('# Primitive lattice vectors:\n#\n') if self._md_steps is None: self._write('PRIMVEC\n') else: self._write(f'PRIMVEC {self._md_index}\n') # We write the cell coordinates as the cell coordinates fmt_str = f'{{:{fmt}}} ' * 3 + '\n' for i in [0, 1, 2]: self._write(fmt_str.format(*sc.cell[i, :])) # Currently not written (we should convert the unit cell # to a conventional cell (90-90-90)) # It seems this simply allows to store both formats in # the same file. self._write('#\n# Conventional lattice vectors:\n#\n') if self._md_steps is None: self._write('CONVVEC\n') else: self._write(f'CONVVEC {self._md_index}\n') convcell = sc.toCuboid(True)._v for i in [0, 1, 2]: self._write(fmt_str.format(*convcell[i, :])) @sile_fh_open() def write_geometry(self, geometry, fmt='.8f', data=None): """ Writes the geometry to the contained file Parameters ---------- geometry : Geometry the geometry to be written fmt : str, optional used format for the precision of the data data : (geometry.na, 3), optional auxiliary data associated with the geometry to be saved along side. Internally in XCrySDen this data is named *Forces* """ self._step_md() self.write_supercell(geometry.sc, fmt) has_data = not data is None if has_data: data.shape = (-1, 3) # The current geometry is currently only a single # one, and does not write the convvec # Is it a necessity? if self._md_index == 1: self._write('#\n# Atomic coordinates (in primitive coordinates)\n#\n') if self._md_steps is None: self._write('PRIMCOORD\n') else: self._write(f'PRIMCOORD {self._md_index}\n') self._write('{} {}\n'.format(len(geometry), 1)) non_valid_Z = (geometry.atoms.Z <= 0).nonzero()[0] if len(non_valid_Z) > 0: geometry = geometry.remove(non_valid_Z) if has_data: fmt_str = '{{0:3d}} {{1:{0}}} {{2:{0}}} {{3:{0}}} {{4:{0}}} {{5:{0}}} {{6:{0}}}\n'.format(fmt) for ia in geometry: tmp = np.append(geometry.xyz[ia, :], data[ia, :]) self._write(fmt_str.format(geometry.atoms[ia].Z, *tmp)) else: fmt_str = '{{0:3d}} {{1:{0}}} {{2:{0}}} {{3:{0}}}\n'.format(fmt) for ia in geometry: self._write(fmt_str.format(geometry.atoms[ia].Z, *geometry.xyz[ia, :])) @sile_fh_open() def read_geometry(self, data=False): """ Returns Geometry object from the XSF file Parameters ---------- data : bool, optional in case the XSF file has auxiliary data, return that as well. """ # Prepare containers... cell = np.zeros([3, 3], np.float64) cell_set = False atom = [] xyz = [] na = 0 line = ' ' while line != '': # skip comments line = self.readline() key = line.strip() # We prefer the primvec if key.startswith('CONVVEC') and not cell_set: for i in [0, 1, 2]: line = self.readline() cell[i, :] = [float(x) for x in line.split()] elif key.startswith('PRIMVEC'): cell_set = True for i in [0, 1, 2]: line = self.readline() cell[i, :] = [float(x) for x in line.split()] elif key.startswith('PRIMCOORD'): # First read # of atoms line = self.readline().split() na = int(line[0]) # currently line[1] is unused! for _ in range(na): line = self.readline().split() atom.append(int(line[0])) xyz.append([float(x) for x in line[1:]]) xyz = np.array(xyz, np.float64) if data: dat = None if xyz.shape[1] == 6: dat = xyz[:, 3:] xyz = xyz[:, :3] if len(atom) == 0: geom = Geometry(xyz, sc=SuperCell(cell)) elif len(atom) == 1 and atom[0] == -999: geom = None else: geom = Geometry(xyz, atoms=atom, sc=SuperCell(cell)) if data: return geom, dat return geom @sile_fh_open() def write_grid(self, *args, **kwargs): """ Store grid(s) data to an XSF file Examples -------- >>> g1 = Grid(0.1, sc=2.) >>> g2 = Grid(0.1, sc=2.) >>> get_sile('output.xsf', 'w').write_grid(g1, g2) Parameters ---------- *args : Grid a list of data-grids to be written to the XSF file. Each argument gets the field name ``?grid_<>`` where <> starts with the integer 0, and *?* is ``real_``/``imag_`` for complex valued grids. geometry : Geometry, optional the geometry stored in the file, defaults to ``args[0].geometry`` fmt : str, optional floating point format for data (.5e) buffersize : int, optional size of the buffer while writing the data, (6144) """ sile_raise_write(self) geom = kwargs.get('geometry', args[0].geometry) if geom is None: geom = Geometry([0, 0, 0], AtomUnknown(999), sc=args[0].sc) self.write_geometry(geom) # Buffer size for writing buffersize = kwargs.get('buffersize', min(6144, args[0].grid.size)) # Format for precision fmt = kwargs.get('fmt', '.5e') self._write('BEGIN_BLOCK_DATAGRID_3D\n') name = kwargs.get('name', 'sisl_grid_{}'.format(len(args))) # Transfer all spaces to underscores (no spaces allowed) self._write(' ' + name.replace(' ', '_') + '\n') _v3 = (('{:' + fmt + '} ') * 3).strip() + '\n' def write_cell(grid): # Now write the grid self._write(' {} {} {}\n'.format(*grid.shape)) self._write(' ' + _v3.format(*grid.origo)) self._write(' ' + _v3.format(*grid.cell[0, :])) self._write(' ' + _v3.format(*grid.cell[1, :])) self._write(' ' + _v3.format(*grid.cell[2, :])) for i, grid in enumerate(args): is_complex = np.iscomplexobj(grid.grid) name = kwargs.get('grid' + str(i), str(i)) if is_complex: self._write(f' BEGIN_DATAGRID_3D_real_{name}\n') else: self._write(f' BEGIN_DATAGRID_3D_{name}\n') write_cell(grid) # for z # for y # for x # write... _fmt = '{:' + fmt + '}\n' for x in np.nditer(np.asarray(grid.grid.real.T, order='C').reshape(-1), flags=['external_loop', 'buffered'], op_flags=[['readonly']], order='C', buffersize=buffersize): self._write((_fmt * x.shape[0]).format(*x.tolist())) self._write(' END_DATAGRID_3D\n') # Skip if not complex if not is_complex: continue self._write(f' BEGIN_DATAGRID_3D_imag_{name}\n') write_cell(grid) for x in np.nditer(np.asarray(grid.grid.imag.T, order='C').reshape(-1), flags=['external_loop', 'buffered'], op_flags=[['readonly']], order='C', buffersize=buffersize): self._write((_fmt * x.shape[0]).format(*x.tolist())) self._write(' END_DATAGRID_3D\n') self._write('END_BLOCK_DATAGRID_3D\n') def ArgumentParser(self, p=None, *args, **kwargs): """ Returns the arguments that is available for this Sile """ newkw = Geometry._ArgumentParser_args_single() newkw.update(kwargs) return self.read_geometry().ArgumentParser(p, *args, **newkw) def ArgumentParser_out(self, p, *args, **kwargs): """ Adds arguments only if this file is an output file Parameters ---------- p : ``argparse.ArgumentParser`` the parser which gets amended the additional output options. """ import argparse ns = kwargs.get("namespace", None) if ns is None: class _(): pass ns = _() # We will add the vector data class VectorNoScale(argparse.Action): def __call__(self, parser, ns, no_value, option_string=None): setattr(ns, "_vector_scale", False) p.add_argument("--no-vector-scale", "-nsv", nargs=0, action=VectorNoScale, help='''Do not modify vector components (same as --vector-scale 1.)''') # Default to scale the vectors setattr(ns, "_vector_scale", True) # We will add the vector data class VectorScale(argparse.Action): def __call__(self, parser, ns, value, option_string=None): setattr(ns, '_vector_scale', float(value)) p.add_argument('--vector-scale', '-sv', metavar='SCALE', action=VectorScale, help='''Scale vector components by this factor.''') # We will add the vector data class Vectors(argparse.Action): def __call__(self, parser, ns, values, option_string=None): routine = values.pop(0) # Default input file input_file = getattr(ns, '_input_file', None) # Figure out which of the segments are a file for i, val in enumerate(values): if osp.isfile(str_spec(val)[0]): input_file = values.pop(i) break # Quick return if there is no input-file... if input_file is None: return # Try and read the vector from sisl.io import get_sile input_sile = get_sile(input_file, mode='r') vector = None if hasattr(input_sile, f'read_{routine}'): vector = getattr(input_sile, f'read_{routine}')(*values) if vector is None: # Try the read_data function d = {routine: True} vector = input_sile.read_data(*values, **d) if vector is None and len(values) > 1: # try and see if the first argument is a str, if # so use that as a keyword if isinstance(values[0], str): d = {values[0]: True} vector = input_sile.read_data(*values[1:], **d) # Clean the sile del input_sile if vector is None: # Use title to capitalize raise ValueError('{} could not be read from file: {}.'.format(routine.title(), input_file)) if len(vector) != len(ns._geometry): raise ValueError(f'read_{routine} could read from file: {input_file}, sizes does not conform to geometry.') setattr(ns, '_vector', vector) p.add_argument('--vector', '-v', metavar=('DATA', '*ARGS[, FILE]'), nargs='+', action=Vectors, help='''Adds vector arrows for each atom, first argument is type (force, moment, ...). If the current input file contains the vectors no second argument is necessary, else the file containing the data is required as the last input. Any arguments inbetween are passed to the `read_data` function (in order). By default the vectors scaled by 1 / max(|V|) such that the longest vector has length 1. ''') @set_module("sisl.io") class axsfSile(xsfSile): """ AXSF file for XCrySDen When creating an AXSF file one should denote how many MD steps to write out: >>> axsf = axsfSile('file.axsf', steps=100) >>> for i in range(100): ... axsf.write_geometry(geom) """ def _setup(self, *args, **kwargs): # Correct number of steps super()._setup(*args, **kwargs) if not hasattr(self, '_md_steps'): self._md_steps = 1 write_grid = None add_sile('xsf', xsfSile, case=False, gzip=True) add_sile('axsf', axsfSile, case=False, gzip=True)
import pytest pytestmark = pytest.mark.spin import math as m import numpy as np from sisl import Spin def test_spin1(): for val in ['unpolarized', '', Spin.UNPOLARIZED, 'polarized', 'p', Spin.POLARIZED, 'non-collinear', 'nc', Spin.NONCOLINEAR, 'spin-orbit', 'so', Spin.SPINORBIT]: s = Spin(val) str(s) s1 = s.copy() assert s == s1 def test_spin2(): s1 = Spin() s2 = Spin('p') s3 = Spin('nc') s4 = Spin('so') assert s1.kind == Spin.UNPOLARIZED assert s2.kind == Spin.POLARIZED assert s3.kind == Spin.NONCOLINEAR assert s4.kind == Spin.SPINORBIT assert s1 == s1.copy() assert s2 == s2.copy() assert s3 == s3.copy() assert s4 == s4.copy() assert s1 < s2 assert s2 < s3 assert s3 < s4 assert s1 <= s2 assert s2 <= s3 assert s3 <= s4 assert s2 > s1 assert s3 > s2 assert s4 > s3 assert s2 >= s1 assert s3 >= s2 assert s4 >= s3 assert s1.is_unpolarized assert not s1.is_polarized assert not s1.is_noncolinear assert not s1.is_spinorbit assert not s2.is_unpolarized assert s2.is_polarized assert not s2.is_noncolinear assert not s2.is_spinorbit assert not s3.is_unpolarized assert not s3.is_polarized assert s3.is_noncolinear assert not s3.is_spinorbit assert not s4.is_unpolarized assert not s4.is_polarized assert not s4.is_noncolinear assert s4.is_spinorbit def test_spin3(): with pytest.raises(ValueError): s = Spin('satoehus') def test_spin4(): s1 = Spin(Spin.UNPOLARIZED) S1 = Spin(Spin.UNPOLARIZED, np.complex64) s2 = Spin(Spin.POLARIZED) S2 = Spin(Spin.POLARIZED, np.complex64) s3 = Spin(Spin.NONCOLINEAR) S3 = Spin(Spin.NONCOLINEAR, np.complex64) s4 = Spin(Spin.SPINORBIT) S4 = Spin(Spin.SPINORBIT, np.complex64) assert s1 == S1 assert s2 == S2 assert s3 == S3 assert s4 == S4 # real comparison assert s1 < S2 assert s1 < S3 assert s1 < S4 assert s2 > S1 assert s2 < S3 assert s2 < S4 assert s3 > S1 assert s3 > S2 assert s3 < S4 assert s4 > S1 assert s4 > S2 assert s4 > S3 # complex complex assert S1 < S2 assert S1 < S3 assert S1 < S4 assert S2 > S1 assert S2 < S3 assert S2 < S4 assert S3 > S1 assert S3 > S2 assert S3 < S4 assert S4 > S1 assert S4 > S2 assert S4 > S3 # real comparison assert S1 < s2 assert S1 < s3 assert S1 < s4 assert S2 > s1 assert S2 < s3 assert S2 < s4 assert S3 > s1 assert S3 > s2 assert S3 < s4 assert S4 > s1 assert S4 > s2 assert S4 > s3 # complex complex assert S1 < s2 assert S1 < s3 assert S1 < s4 assert S2 > s1 assert S2 < s3 assert S2 < s4 assert S3 > s1 assert S3 > s2 assert S3 < s4 assert S4 > s1 assert S4 > s2 assert S4 > s3 def test_pauli(): # just grab the default spin S = Spin() # Create a fictituous wave-function sq2 = 2 ** .5 W = np.array([ [1/sq2, 1/sq2], # M_x = 1 [1/sq2, -1/sq2], # M_x = -1 [0.5 + 0.5j, 0.5 + 0.5j], # M_x = 1 [0.5 - 0.5j, -0.5 + 0.5j], # M_x = -1 [1/sq2, 1j/sq2], # M_y = 1 [1/sq2, -1j/sq2], # M_y = -1 [0.5 - 0.5j, 0.5 + 0.5j], # M_y = 1 [0.5 + 0.5j, 0.5 - 0.5j], # M_y = -1 [1, 0], # M_z = 1 [0, 1], # M_z = -1 ]) x = np.array([1, -1, 1, -1, 0, 0, 0, 0, 0, 0]) assert np.allclose(x, (np.conj(W)*S.X.dot(W.T).T).sum(1).real) y = np.array([0, 0, 0, 0, 1, -1, 1, -1, 0, 0]) assert np.allclose(y, (np.conj(W)*np.dot(S.Y, W.T).T).sum(1).real) z = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, -1]) assert np.allclose(z, (np.conj(W)*np.dot(S.Z, W.T).T).sum(1).real) def test_pickle(): import pickle as p S = Spin('nc') n = p.dumps(S) s = p.loads(n) assert S == s
zerothi/sids
sisl/physics/tests/test_spin.py
sisl/io/xsf.py
import warnings from datetime import datetime import numpy as np import scipy as sp from os.path import isfile import itertools as itools from ..sile import add_sile, get_sile_class, sile_fh_open, sile_raise_write, SileError from .sile import SileSiesta from .._help import * from sisl._internal import set_module from sisl import constant from sisl.unit.siesta import units import sisl._array as _a from sisl._indices import indices_only from sisl.utils.ranges import list2str from sisl.messages import SislError, info, warn from sisl.utils.mathematics import fnorm from .binaries import tshsSileSiesta, tsdeSileSiesta from .binaries import dmSileSiesta, hsxSileSiesta, onlysSileSiesta from .eig import eigSileSiesta from .fc import fcSileSiesta from .fa import faSileSiesta from .siesta_grid import gridncSileSiesta from .siesta_nc import ncSileSiesta from .basis import ionxmlSileSiesta, ionncSileSiesta from .orb_indx import orbindxSileSiesta from .xv import xvSileSiesta from sisl import Geometry, Orbital, Atom, AtomGhost, Atoms, SuperCell, DynamicalMatrix from sisl.utils.cmd import default_ArgumentParser, default_namespace from sisl.utils.misc import merge_instances from sisl.unit.siesta import unit_convert, unit_default, unit_group __all__ = ['fdfSileSiesta'] _LOGICAL_TRUE = ['.true.', 'true', 'yes', 'y', 't'] _LOGICAL_FALSE = ['.false.', 'false', 'no', 'n', 'f'] _LOGICAL = _LOGICAL_FALSE + _LOGICAL_TRUE Bohr2Ang = unit_convert('Bohr', 'Ang') def _listify_str(arg): if isinstance(arg, str): return [arg] return arg def _track(method, msg): if method.__self__.track: info(f"{method.__self__.__class__.__name__}.{method.__name__}: {msg}") def _track_file(method, f, msg=None): if msg is None: if f.is_file(): msg = f"reading file {f}" else: msg = f"could not find file {f}" if method.__self__.track: info(f"{method.__self__.__class__.__name__}.{method.__name__}: {msg}") @set_module("sisl.io.siesta") class fdfSileSiesta(SileSiesta): """ FDF-input file By supplying base you can reference files in other directories. By default the ``base`` is the directory given in the file name. Parameters ---------- filename: str fdf file mode : str, optional opening mode, default to read-only base : str, optional base-directory to read output files from. Examples -------- >>> fdf = fdfSileSiesta('tmp/RUN.fdf') # reads output files in 'tmp/' folder >>> fdf = fdfSileSiesta('tmp/RUN.fdf', base='.') # reads output files in './' folder """ def _setup(self, *args, **kwargs): """ Setup the `fdfSileSiesta` after initialization """ self._comment = ['#', '!', ';'] # List of parent file-handles used while reading # This is because fdf enables inclusion of other files self._parent_fh = [] # Public key for printing information about where stuff comes from self.track = kwargs.get("track", False) def _pushfile(self, f): if self.dir_file(f).is_file(): self._parent_fh.append(self.fh) self.fh = self.dir_file(f).open(self._mode) else: warn(str(self) + f' is trying to include file: {f} but the file seems not to exist? Will disregard file!') def _popfile(self): if len(self._parent_fh) > 0: self.fh.close() self.fh = self._parent_fh.pop() return True return False def _seek(self): """ Closes all files, and starts over from beginning """ try: while self._popfile(): pass self.fh.seek(0) except: pass @sile_fh_open() def includes(self): """ Return a list of all files that are *included* or otherwise necessary for reading the fdf file """ self._seek() # In FDF files, %include marks files that progress # down in a tree structure def add(f): f = self.dir_file(f) if f not in includes: includes.append(f) # List of includes includes = [] l = self.readline() while l != '': ls = l.split() if '%include' == ls[0].lower(): add(ls[1]) self._pushfile(ls[1]) elif '<' in ls: # TODO, in principle the < could contain # include if this line is not a %block. add(ls[ls.index('<')+1]) l = self.readline() while l == '': # last line of file if self._popfile(): l = self.readline() else: break return includes @sile_fh_open() def _read_label(self, label): """ Try and read the first occurence of a key This will take care of blocks, labels and piped in labels Parameters ---------- label : str label to find in the fdf file """ self._seek() def tolabel(label): return label.lower().replace('_', '').replace('-', '').replace('.', '') labell = tolabel(label) def valid_line(line): ls = line.strip() if len(ls) == 0: return False return not (ls[0] in self._comment) def process_line(line): # Split line by spaces ls = line.split() if len(ls) == 0: return None # Make a lower equivalent of ls lsl = list(map(tolabel, ls)) # Check if there is a pipe in the line if '<' in lsl: idx = lsl.index('<') # Now there are two cases # 1. It is a block, in which case # the full block is piped into the label # %block Label < file if lsl[0] == '%block' and lsl[1] == labell: # Correct line found # Read the file content, removing any empty and/or comment lines lines = self.dir_file(ls[3]).open('r').readlines() return [l.strip() for l in lines if valid_line(l)] # 2. There are labels that should be read from a subsequent file # Label1 Label2 < other.fdf if labell in lsl[:idx]: # Valid line, read key from other.fdf return fdfSileSiesta(self.dir_file(ls[idx+1]), base=self._directory)._read_label(label) # It is not in this line, either key is # on the RHS of <, or the key could be "block". Say. return None # The last case is if the label is the first word on the line # In that case we have found what we are looking for if lsl[0] == labell: return (' '.join(ls[1:])).strip() elif lsl[0] == '%block': if lsl[1] == labell: # Read in the block content lines = [] # Now read lines l = self.readline().strip() while not tolabel(l).startswith('%endblock'): if len(l) > 0: lines.append(l) l = self.readline().strip() return lines elif lsl[0] == '%include': # We have to open a new file self._pushfile(ls[1]) return None # Perform actual reading of line l = self.readline().split('#')[0] if len(l) == 0: return None l = process_line(l) while l is None: l = self.readline().split('#')[0] if len(l) == 0: if not self._popfile(): return None l = process_line(l) return l @classmethod def _type(cls, value): """ Determine the type by the value Parameters ---------- value : str or list or numpy.ndarray the value to check for fdf-type """ if value is None: return None if isinstance(value, list): # A block, %block ... return 'B' if isinstance(value, np.ndarray): # A list, Label [...] return 'a' # Grab the entire line (beside the key) values = value.split() if len(values) == 1: fdf = values[0].lower() if fdf in _LOGICAL: # logical return 'b' try: float(fdf) if '.' in fdf: # a real number (otherwise an integer) return 'r' return 'i' except: pass # fall-back to name with everything elif len(values) == 2: # possibly a physical value try: float(values[0]) return 'p' except: pass return 'n' @sile_fh_open() def type(self, label): """ Return the type of the fdf-keyword Parameters ---------- label : str the label to look-up """ self._seek() return self._type(self._read_label(label)) @sile_fh_open() def get(self, label, default=None, unit=None, with_unit=False): """ Retrieve fdf-keyword from the file Parameters ---------- label : str the fdf-label to search for default : optional if the label is not found, this will be the returned value (default to ``None``) unit : str, optional unit of the physical quantity to return with_unit : bool, optional whether the physical quantity gets returned with the found unit in the fdf file. Returns ------- value : the value of the fdf-label. If the label is a block, a `list` is returned, for a real value a `float` (or if the default is of `float`), for an integer, an `int` is returned. unit : if `with_unit` is true this will contain the associated unit if it is specified Examples -------- >>> print(open(...).readlines()) LabeleV 1. eV LabelRy 1. Ry Label name FakeInt 1 %block Hello line 1 line2 %endblock >>> fdf.get('LabeleV') == 1. # default unit is eV >>> fdf.get('LabelRy') == unit.siesta.unit_convert('Ry', 'eV') >>> fdf.get('LabelRy', unit='Ry') == 1. >>> fdf.get('LabelRy', with_unit=True) == (1., 'Ry') >>> fdf.get('FakeInt', '0') == '1' >>> fdf.get('LabeleV', with_unit=True) == (1., 'eV') >>> fdf.get('Label', with_unit=True) == 'name' # no unit present on line >>> fdf.get('Hello') == ['line 1', 'line2'] """ # Try and read a line value = self._read_label(label) # Simply return the default value if not found if value is None: return default # Figure out what it is t = self._type(value) # We will only do something if it is a real, int, or physical. # Else we simply return, as-is if t == 'r': if default is None: return float(value) t = type(default) return t(value) elif t == 'i': if default is None: return int(value) t = type(default) return t(value) elif t == 'p': value = value.split() if with_unit: # Simply return, as is. Let the user do whatever. return float(value[0]), value[1] if unit is None: default = unit_default(unit_group(value[1])) else: if unit_group(value[1]) != unit_group(unit): raise ValueError(f"Requested unit for {label} is not the same type. " "Found/Requested {value[1]}/{unit}'") default = unit return float(value[0]) * unit_convert(value[1], default) elif t == 'b': return value.lower() in _LOGICAL_TRUE return value def set(self, key, value, keep=True): """ Add the key and value to the FDF file Parameters ---------- key : str the fdf-key value to be set in the fdf file value : str or list of str the value of the string. If a `str` is passed a regular fdf-key is used, if a `list` it will be a %block. keep : bool, optional whether old flags will be kept in the fdf file. In this case a time-stamp will be written to show when the key was overwritten. """ # To set a key we first need to figure out if it is # already present, if so, we will add the new key, just above # the already present key. top_file = str(self.file) # 1. find the old value, and thus the file in which it is found with self: try: self.get(key) # Get the file of the containing data top_file = str(self.fh.name) except: pass # Ensure that all files are closed self._seek() # Now we should re-read and edit the file lines = open(top_file, 'r').readlines() def write(fh, value): if value is None: return if isinstance(value, str): fh.write(self.print(key, value)) if '\n' not in value: fh.write('\n') else: raise NotImplementedError('Currently blocks are not implemented in set!') # Now loop, write and edit do_write = True lkey = key.lower() with open(top_file, 'w') as fh: for line in lines: if self.line_has_key(line, lkey, case=False) and do_write: write(fh, value) if keep: fh.write('# Old value ({})\n'.format(datetime.today().strftime('%Y-%m-%d %H:%M'))) fh.write(f'{line}') do_write = False else: fh.write(line) @staticmethod def print(key, value): """ Return a string which is pretty-printing the key+value """ if isinstance(value, list): s = f'%block {key}' # if the value has any new-values has_nl = False for v in value: if '\n' in v: has_nl = True break if has_nl: # do not skip to next line in next segment value[-1].replace('\n', '') s += '\n{}'.format(''.join(value)) else: s += '\n{} {}'.format(value[0], '\n'.join(value[1:])) s += f'%endblock {key}' else: s = f'{key} {value}' return s @sile_fh_open() def write_supercell(self, sc, fmt='.8f', *args, **kwargs): """ Writes the supercell to the contained file """ # Check that we can write to the file sile_raise_write(self) fmt_str = ' {{0:{0}}} {{1:{0}}} {{2:{0}}}\n'.format(fmt) unit = kwargs.get('unit', 'Ang').capitalize() conv = 1. if unit in ['Ang', 'Bohr']: conv = unit_convert('Ang', unit) else: unit = 'Ang' # Write out the cell self._write(f'LatticeConstant 1.0 {unit}\n') self._write('%block LatticeVectors\n') self._write(fmt_str.format(*sc.cell[0, :] * conv)) self._write(fmt_str.format(*sc.cell[1, :] * conv)) self._write(fmt_str.format(*sc.cell[2, :] * conv)) self._write('%endblock LatticeVectors\n') @sile_fh_open() def write_geometry(self, geometry, fmt='.8f', *args, **kwargs): """ Writes the geometry to the contained file """ # Check that we can write to the file sile_raise_write(self) self.write_supercell(geometry.sc, fmt, *args, **kwargs) self._write('\n') self._write(f'NumberOfAtoms {geometry.na}\n') unit = kwargs.get('unit', 'Ang').capitalize() is_fractional = unit in ['Frac', 'Fractional'] if is_fractional: self._write('AtomicCoordinatesFormat Fractional\n') else: conv = unit_convert('Ang', unit) self._write(f'AtomicCoordinatesFormat {unit}\n') self._write('%block AtomicCoordinatesAndAtomicSpecies\n') n_species = len(geometry.atoms.atom) # Count for the species if is_fractional: xyz = geometry.fxyz else: xyz = geometry.xyz * conv if fmt[0] == '.': # Correct for a "same" length of all coordinates c_max = len(str((f'{{:{fmt}}}').format(xyz.max()))) c_min = len(str((f'{{:{fmt}}}').format(xyz.min()))) fmt = str(max(c_min, c_max)) + fmt fmt_str = ' {{3:{0}}} {{4:{0}}} {{5:{0}}} {{0}} # {{1:{1}d}}: {{2}}\n'.format(fmt, len(str(len(geometry)))) for ia, a, isp in geometry.iter_species(): self._write(fmt_str.format(isp + 1, ia + 1, a.tag, *xyz[ia, :])) self._write('%endblock AtomicCoordinatesAndAtomicSpecies\n\n') # Write out species # First swap key and value self._write(f'NumberOfSpecies {n_species}\n') self._write('%block ChemicalSpeciesLabel\n') for i, a in enumerate(geometry.atoms.atom): if isinstance(a, AtomGhost): self._write(' {} {} {}\n'.format(i + 1, -a.Z, a.tag)) else: self._write(' {} {} {}\n'.format(i + 1, a.Z, a.tag)) self._write('%endblock ChemicalSpeciesLabel\n') _write_block = True def write_block(atoms, append, write_block): if write_block: self._write('\n# Constraints\n%block Geometry.Constraints\n') write_block = False self._write(f' atom [{atoms}]{append}\n') return write_block for d in range(4): append = {0: '', 1: ' 1. 0. 0.', 2: ' 0. 1. 0.', 3: ' 0. 0. 1.'}.get(d) n = 'CONSTRAIN' + {0: '', 1: '-x', 2: '-y', 3: '-z'}.get(d) if n in geometry.names: idx = list2str(geometry.names[n] + 1).replace('-', ' -- ') if len(idx) > 200: info(f"{str(self)}.write_geometry will not write the constraints for {n} (too long line).") else: _write_block = write_block(idx, append, _write_block) if not _write_block: self._write('%endblock\n') @staticmethod def _SpGeom_replace_geom(spgeom, geometry): """ Replace all atoms in spgeom with the atom in geometry while retaining the number of orbitals Currently we need some way of figuring out whether the number of atoms and orbitals are consistent. Parameters ---------- spgeom : SparseGeometry the sparse object with attached geometry geometry : Geometry geometry to grab atoms from full_replace : bool, optional whether the full geometry may be replaced in case ``spgeom.na != geometry.na && spgeom.no == geometry.no``. This is required when `spgeom` does not contain information about atoms. """ if spgeom.na != geometry.na and spgeom.no == geometry.no: # In this case we cannot compare individiual atoms # of orbitals. # I.e. we suspect the incoming geometry to be correct. spgeom._geometry = geometry return True elif spgeom.na != geometry.na: warn('cannot replace geometry due to insufficient information regarding number of ' 'atoms and orbitals, ensuring correct geometry failed...') no_no = spgeom.no == geometry.no # Loop and make sure the number of orbitals is consistent for a, idx in geometry.atoms.iter(True): if len(idx) == 0: continue Sa = spgeom.geometry.atoms[idx[0]] if Sa.no != a.no: # Make sure the atom we replace with retains the same information # *except* the number of orbitals. a = a.__class__(a.Z, Sa.orbital, mass=a.mass, tag=a.tag) spgeom.geometry.atoms.replace(idx, a) spgeom.geometry.reduce() return no_no def read_supercell_nsc(self, *args, **kwargs): """ Read supercell size using any method available Raises ------ SislWarning if none of the files can be read """ order = _listify_str(kwargs.pop('order', ['nc', 'ORB_INDX'])) for f in order: v = getattr(self, '_r_supercell_nsc_{}'.format(f.lower()))(*args, **kwargs) if v is not None: _track(self.read_supercell_nsc, f"found file {f}") return v warn('number of supercells could not be read from output files. Assuming molecule cell ' '(no supercell connections)') return _a.onesi(3) def _r_supercell_nsc_nc(self, *args, **kwargs): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') _track_file(self._r_supercell_nsc_nc, f) if f.is_file(): return ncSileSiesta(f).read_supercell_nsc() return None def _r_supercell_nsc_orb_indx(self, *args, **kwargs): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.ORB_INDX') _track_file(self._r_supercell_nsc_orb_indx, f) if f.is_file(): return orbindxSileSiesta(f).read_supercell_nsc() return None def read_supercell(self, output=False, *args, **kwargs): """ Returns SuperCell object by reading fdf or Siesta output related files. One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- output: bool, optional whether to read supercell from output files (default to read from the fdf file). order: list of str, optional the order of which to try and read the supercell. By default this is ``['XV', 'nc', 'fdf']`` if `output` is true. If `order` is present `output` is disregarded. Examples -------- >>> fdf = get_sile('RUN.fdf') >>> fdf.read_supercell() # read from fdf >>> fdf.read_supercell(True) # read from [XV, nc, fdf] >>> fdf.read_supercell(order=['nc']) # read from [nc] >>> fdf.read_supercell(True, order=['nc']) # read from [nc] """ if output: order = _listify_str(kwargs.pop('order', ['XV', 'nc', 'fdf'])) else: order = _listify_str(kwargs.pop('order', ['fdf'])) for f in order: v = getattr(self, '_r_supercell_{}'.format(f.lower()))(*args, **kwargs) if v is not None: _track(self.read_supercell, f"found file {f}") return v return None def _r_supercell_fdf(self, *args, **kwargs): """ Returns `SuperCell` object from the FDF file """ s = self.get('LatticeConstant', unit='Ang') if s is None: raise SileError('Could not find LatticeConstant in file') # Read in cell cell = _a.emptyd([3, 3]) lc = self.get('LatticeVectors') if lc: for i in range(3): cell[i, :] = [float(k) for k in lc[i].split()[:3]] else: lc = self.get('LatticeParameters') if lc: tmp = [float(k) for k in lc[0].split()[:6]] cell = SuperCell.tocell(*tmp) if lc is None: # the fdf file contains neither the latticevectors or parameters raise SileError('Could not find LatticeVectors or LatticeParameters block in file') cell *= s # When reading from the fdf, the warning should be suppressed with warnings.catch_warnings(): warnings.simplefilter("ignore") nsc = self.read_supercell_nsc() return SuperCell(cell, nsc=nsc) def _r_supercell_nc(self): # Read supercell from <>.nc file f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') _track_file(self._r_supercell_nc, f) if f.is_file(): return ncSileSiesta(f).read_supercell() return None def _r_supercell_xv(self, *args, **kwargs): """ Returns `SuperCell` object from the FDF file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.XV') _track_file(self._r_supercell_xv, f) if f.is_file(): nsc = self.read_supercell_nsc() sc = xvSileSiesta(f).read_supercell() sc.set_nsc(nsc) return sc return None def _r_supercell_tshs(self, *args, **kwargs): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS') _track_file(self._r_supercell_tshs, f) if f.is_file(): return tshsSileSiesta(f).read_supercell() return None def _r_supercell_onlys(self, *args, **kwargs): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.onlyS') _track_file(self._r_supercell_onlys, f) if f.is_file(): return onlysSileSiesta(f).read_supercell() return None def read_force(self, *args, **kwargs): """ Read forces from the output of the calculation (forces are not defined in the input) Parameters ---------- order : list of str, optional the order of the forces we are trying to read, default to ``['FA', 'nc']`` Returns ------- (*, 3) : vector with forces for each of the atoms """ order = _listify_str(kwargs.pop('order', ['FA', 'nc'])) for f in order: v = getattr(self, '_r_force_{}'.format(f.lower()))(*args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_force) found in file={f}") return v return None def _r_force_fa(self, *args, **kwargs): """ Read forces from the FA file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FA') if f.is_file(): return faSileSiesta(f).read_force() return None def _r_force_fac(self, *args, **kwargs): """ Read forces from the FAC file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FAC') if f.is_file(): return faSileSiesta(f).read_force() return None def _r_force_nc(self, *args, **kwargs): """ Read forces from the nc file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') if f.is_file(): return ncSileSiesta(f).read_force() return None def read_force_constant(self, *args, **kwargs): """ Read force constant from the output of the calculation Returns ------- force_constant : numpy.ndarray vector [*, 3, 2, *, 3] with force constant element for each of the atomic displacements """ order = _listify_str(kwargs.pop('order', ['nc', 'FC'])) for f in order: v = getattr(self, '_r_force_constant_{}'.format(f.lower()))(*args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_force_constant) found in file={f}") return v return None def _r_force_constant_nc(self, *args, **kwargs): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') if f.is_file(): if not 'FC' in ncSileSiesta(f).groups: return None fc = ncSileSiesta(f).read_force_constant() return fc return None def _r_force_constant_fc(self, *args, **kwargs): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FC') if f.is_file(): na = self.get('NumberOfAtoms', default=None) return fcSileSiesta(f).read_force_constant(na=na) return None def read_fermi_level(self, *args, **kwargs): """ Read fermi-level from output of the calculation Parameters ---------- order: list of str, optional the order of which to try and read the fermi-level. By default this is ``['nc', 'TSDE', 'TSHS', 'EIG']``. Returns ------- Ef : float fermi-level """ order = _listify_str(kwargs.pop('order', ['nc', 'TSDE', 'TSHS', 'EIG'])) for f in order: v = getattr(self, '_r_fermi_level_{}'.format(f.lower()))(*args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_fermi_level) found in file={f}") return v return None def _r_fermi_level_nc(self): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') if isfile(f): return ncSileSiesta(f).read_fermi_level() return None def _r_fermi_level_tsde(self): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE') if isfile(f): return tsdeSileSiesta(f).read_fermi_level() return None def _r_fermi_level_tshs(self): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS') if isfile(f): return tshsSileSiesta(f).read_fermi_level() return None def _r_fermi_level_eig(self): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.EIG') if isfile(f): return eigSileSiesta(f).read_fermi_level() return None def read_dynamical_matrix(self, *args, **kwargs): """ Read dynamical matrix from output of the calculation Generally the mass is stored in the basis information output, but for dynamical matrices it makes sense to let the user control this, e.g. through the fdf file. By default the mass will be read from the AtomicMass key in the fdf file and _not_ from the basis set information. Parameters ---------- order: list of str, optional the order of which to try and read the dynamical matrix. By default this is ``['nc', 'FC']``. cutoff_dist : float, optional cutoff value for the distance of the force-constants (everything farther than `cutoff_dist` will be set to 0 Ang). Default, no cutoff. cutoff : float, optional absolute values below the cutoff are considered 0. Defaults to 0. eV/Ang**2. trans_inv : bool, optional if true (default), the force-constant matrix will be fixed so that translational invariance will be enforced sum0 : bool, optional if true (default), the sum of forces on atoms for each displacement will be forced to 0. hermitian: bool, optional if true (default), the returned dynamical matrix will be hermitian Returns ------- dynamic_matrix : DynamicalMatrix the dynamical matrix """ order = _listify_str(kwargs.pop('order', ['nc', 'FC'])) for f in order: v = getattr(self, '_r_dynamical_matrix_{}'.format(f.lower()))(*args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_dynamical_matrix) found in file={f}") return v return None def _r_dynamical_matrix_fc(self, *args, **kwargs): FC = self.read_force_constant(*args, order="FC", **kwargs) if FC is None: return None geom = self.read_geometry() basis_fdf = self.read_basis(order="fdf") for i, atom in enumerate(basis_fdf): geom.atoms.replace(i, atom) # Get list of FC atoms FC_atoms = _a.arangei(self.get('MD.FCFirst', default=1) - 1, self.get('MD.FCLast', default=geom.na)) return self._dynamical_matrix_from_fc(geom, FC, FC_atoms, *args, **kwargs) def _r_dynamical_matrix_nc(self, *args, **kwargs): FC = self.read_force_constant(*args, order=['nc'], **kwargs) if FC is None: return None geom = self.read_geometry(order=['nc']) basis_fdf = self.read_basis(order="fdf") for i, atom in enumerate(basis_fdf): geom.atoms.replace(i, atom) # Get list of FC atoms # TODO change to read in from the NetCDF file FC_atoms = _a.arangei(self.get('MD.FCFirst', default=1) - 1, self.get('MD.FCLast', default=geom.na)) return self._dynamical_matrix_from_fc(geom, FC, FC_atoms, *args, **kwargs) def _dynamical_matrix_from_fc(self, geom, FC, FC_atoms, *args, **kwargs): # We have the force constant matrix. # Now handle it... # FC(OLD) = (n_displ, 3, 2, na, 3) # FC(NEW) = (n_displ, 3, na, 3) # In fact, after averaging this becomes the Hessian FC = FC.sum(axis=2) * 0.5 hermitian = kwargs.get("hermitian", True) # Figure out the "original" periodic directions periodic = geom.nsc > 1 # Create conversion from eV/Ang^2 to correct units # Further down we are multiplying with [1 / amu] scale = constant.hbar / units('Ang', 'm') / units('eV amu', 'J kg') ** 0.5 # Cut-off too small values fc_cut = kwargs.get('cutoff', 0.) FC = np.where(np.abs(FC) > fc_cut, FC, 0.) # Convert the force constant such that a diagonalization returns eV ^ 2 # FC is in [eV / Ang^2] # Convert the geometry to contain 3 orbitals per atom (x, y, z) R = kwargs.get('cutoff_dist', -2.) orbs = [Orbital(R / 2, tag=tag) for tag in 'xyz'] with warnings.catch_warnings(): warnings.simplefilter("ignore") for atom, _ in geom.atoms.iter(True): new_atom = atom.__class__(atom.Z, orbs, mass=atom.mass, tag=atom.tag) geom.atoms.replace(atom, new_atom) # Figure out the supercell indices supercell = kwargs.get('supercell', [1, 1, 1]) if supercell is False: supercell = [1] * 3 elif supercell is True: _, supercell = geom.as_primary(FC.shape[0], ret_super=True) info("{}.read_dynamical_matrix(FC) guessed on a [{}, {}, {}] " "supercell calculation.".format(str(self), *supercell)) # Convert to integer array supercell = _a.asarrayi(supercell) # Reshape to supercell FC.shape = (FC.shape[0], 3, *supercell, -1, 3) na_fc = len(FC_atoms) assert FC.shape[0] == len(FC_atoms) assert FC.shape[5] == len(geom) // np.prod(supercell) # Now we are in a problem since the tiling of the geometry # is not necessarily in x, y, z order. # Say for users who did: # geom.tile(*, 2).tile(*, 1).tile(*, 0).write(...) # then we need to pivot the data to be consistent with the # supercell information if np.any(supercell > 1): # Re-arange FC before we use _fc_correct # Now we need to figure out how the atoms are laid out. # It *MUST* either be repeated or tiled (preferentially tiled). # We have an actual supercell. Lets try and fix it. # First lets recreate the smallest geometry sc = geom.sc.cell.copy() sc[0, :] /= supercell[0] sc[1, :] /= supercell[1] sc[2, :] /= supercell[2] # Ensure nsc is at least an odd number, later down we will symmetrize the FC matrix nsc = supercell + (supercell + 1) % 2 if R > 0: # Correct for the optional radius sc_norm = fnorm(sc) # R is already "twice" the "orbital" range nsc_R = 1 + 2 * np.ceil(R / sc_norm).astype(np.int32) for i in range(3): nsc[i] = min(nsc[i], nsc_R[i]) del nsc_R # Construct the minimal unit-cell geometry sc = SuperCell(sc, nsc=nsc) # TODO check that the coordinates are in the cell geom_small = Geometry(geom.xyz[FC_atoms], geom.atoms[FC_atoms], sc) # Convert the big geometry's coordinates to fractional coordinates of the small unit-cell. isc_xyz = (geom.xyz.dot(geom_small.sc.icell.T) - np.tile(geom_small.fxyz, (np.product(supercell), 1))) axis_tiling = [] offset = len(geom_small) for _ in (supercell > 1).nonzero()[0]: first_isc = (np.around(isc_xyz[FC_atoms + offset, :]) == 1.).sum(0) axis_tiling.append(np.argmax(first_isc)) # Fix the offset offset *= supercell[axis_tiling[-1]] while len(axis_tiling) < 3: for i in range(3): if not i in axis_tiling: axis_tiling.append(i) # Now we have the tiling operation, check it sort of matches geom_tile = geom_small.copy() for axis in axis_tiling: geom_tile = geom_tile.tile(supercell[axis], axis) # Proximity check of 0.01 Ang (TODO add this as an argument) if not np.allclose(geom_tile.xyz, geom.xyz, rtol=0, atol=0.01): raise SislError(f"{str(self)}.read_dynamical_matrix(FC) could " "not figure out the tiling method for the supercell") # Convert the FC matrix to a "rollable" matrix # This will make it easier to symmetrize # 0. displaced atoms # 1. x, y, z (displacements) # 2. tile-axis_tiling[0] # 3. tile-axis_tiling[1] # 4. tile-axis_tiling[2] # 5. na # 6. x, y, z (force components) FC.shape = (na_fc, 3, *supercell[axis_tiling], na_fc, 3) # Now swap the [2, 3, 4] dimensions so that we get in order of lattice vectors swap = np.array([2, 3, 4])[axis_tiling] swap = (0, 1, *swap, 5, 6) FC = np.transpose(FC, swap) del axis_tiling # Now FC is sorted according to the supercell tiling # TODO this will probably fail if: FC_atoms.size != FC.shape[5] from ._help import _fc_correct FC = _fc_correct(FC, trans_inv=kwargs.get("trans_inv", True), sum0=kwargs.get("sum0", True), hermitian=hermitian) # Remove ghost-atoms or atoms with 0 mass! # TODO check if ghost-atoms should be taken into account in _fc_correct idx = (geom.atoms.mass == 0.).nonzero()[0] if len(idx) > 0: FC = np.delete(FC, idx, axis=5) geom = geom.remove(idx) geom.set_nsc([1] * 3) raise NotImplementedError(f"{self}.read_dynamical_matrix could not reduce geometry " "since there are atoms with 0 mass.") # Now we can build the dynamical matrix (it will always be real) na = len(geom) if np.all(supercell <= 1): # also catches supercell == 0 D = sp.sparse.lil_matrix((geom.no, geom.no), dtype=np.float64) FC = np.squeeze(FC, axis=(2, 3, 4)) # Instead of doing the sqrt in all D = FC (below) we do it here m = scale / geom.atoms.mass ** 0.5 FC *= m[FC_atoms].reshape(-1, 1, 1, 1) * m.reshape(1, 1, -1, 1) j_FC_atoms = FC_atoms idx = _a.arangei(len(FC_atoms)) for ia, fia in enumerate(FC_atoms): if R > 0: # find distances between the other atoms to cut-off the distance idx = geom.close(fia, R=R, atoms=FC_atoms) idx = indices_only(FC_atoms, idx) j_FC_atoms = FC_atoms[idx] for ja, fja in zip(idx, j_FC_atoms): D[ia*3:(ia+1)*3, ja*3:(ja+1)*3] = FC[ia, :, fja, :] else: geom = geom_small if np.any(np.diff(FC_atoms) != 1): raise SislError(f"{self}.read_dynamical_matrix(FC) requires the FC atoms to be consecutive!") # Re-order FC matrix so the FC-atoms are first if FC.shape[0] != FC.shape[5]: ordered = _a.arangei(FC.shape[5]) ordered = np.concatenate(FC_atoms, np.delete(ordered, FC_atoms)) FC = FC[:, :, :, :, :, ordered, :] FC_atoms = _a.arangei(len(FC_atoms)) if FC_atoms[0] != 0: # TODO we could roll the axis such that the displaced atoms moves into the # first elements raise SislError(f"{self}.read_dynamical_matrix(FC) requires the displaced atoms to start from 1!") # After having done this we can easily mass scale all FC components m = scale / geom.atoms.mass ** 0.5 FC *= m.reshape(-1, 1, 1, 1, 1, 1, 1) * m.reshape(1, 1, 1, 1, 1, -1, 1) # Check whether we need to "halve" the equivalent supercell # This will be present in calculations done on an even number of supercells. # I.e. for 4 supercells # [0] [1] [2] [3] # where in the supercell approach: # *[2] [3] [0] [1] *[2] # I.e. since we are double counting [2] we will halve it. # This is not *exactly* true because depending on the range one should do the symmetry operations. # However the FC does not contain such symmetry considerations. for i in range(3): if supercell[i] % 2 == 1: # We don't need to do anything continue # Figure out the supercell to halve halve_idx = supercell[i] // 2 if i == 0: FC[:, :, halve_idx, :, :, :, :] *= 0.5 elif i == 1: FC[:, :, :, halve_idx, :, :, :] *= 0.5 else: FC[:, :, :, :, halve_idx, :, :] *= 0.5 # Now create the dynamical matrix # Currently this will be in lil_matrix (changed in the end) D = sp.sparse.lil_matrix((geom.no, geom.no_s), dtype=np.float64) # When x, y, z are negative we simply look-up from the back of the array # which is exactly what is required isc_off = geom.sc.isc_off nxyz, na = geom.no, geom.na dist = geom.rij # Now take all positive supercell connections (including inner cell) nsc = geom.nsc // 2 list_nsc = [range(-x, x + 1) for x in nsc] iter_FC_atoms = _a.arangei(len(FC_atoms)) iter_j_FC_atoms = iter_FC_atoms for x, y, z in itools.product(*list_nsc): isc = isc_off[x, y, z] aoff = isc * na joff = isc * nxyz for ia in iter_FC_atoms: # Reduce second loop based on radius cutoff if R > 0: iter_j_FC_atoms = iter_FC_atoms[dist(ia, aoff + iter_FC_atoms) <= R] for ja in iter_j_FC_atoms: D[ia*3:(ia+1)*3, joff+ja*3:joff+(ja+1)*3] = FC[ia, :, x, y, z, ja, :] D = D.tocsr() # Remove all zeros D.eliminate_zeros() D = DynamicalMatrix.fromsp(geom, D) if hermitian: D.finalize() D = (D + D.transpose()) * 0.5 return D def read_geometry(self, output=False, *args, **kwargs): """ Returns Geometry object by reading fdf or Siesta output related files. One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- output: bool, optional whether to read geometry from output files (default to read from the fdf file). order: list of str, optional the order of which to try and read the geometry. By default this is ``['XV', 'nc', 'fdf', 'TSHS']`` if `output` is true If `order` is present `output` is disregarded. Examples -------- >>> fdf = get_sile('RUN.fdf') >>> fdf.read_geometry() # read from fdf >>> fdf.read_geometry(True) # read from [XV, nc, fdf] >>> fdf.read_geometry(order=['nc']) # read from [nc] >>> fdf.read_geometry(True, order=['nc']) # read from [nc] """ ## # NOTE # When adding more capabilities please check the read_geometry(True, order=...) in this # code to correct. ## if output: order = _listify_str(kwargs.pop('order', ['XV', 'nc', 'fdf', 'TSHS'])) else: order = _listify_str(kwargs.pop('order', ['fdf'])) for f in order: v = getattr(self, '_r_geometry_{}'.format(f.lower()))(*args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_geometry) found in file={f}") return v return None def _r_geometry_xv(self, *args, **kwargs): """ Returns `SuperCell` object from the FDF file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.XV') geom = None if f.is_file(): basis = self.read_basis() if basis is None: geom = xvSileSiesta(f).read_geometry() else: geom = xvSileSiesta(f).read_geometry(species_Z=True) with warnings.catch_warnings(): warnings.simplefilter('ignore') for atom, _ in geom.atoms.iter(True): geom.atoms.replace(atom, basis[atom.Z-1]) geom.reduce() nsc = self.read_supercell_nsc() geom.set_nsc(nsc) return geom def _r_geometry_nc(self): # Read geometry from <>.nc file f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') if f.is_file(): return ncSileSiesta(f).read_geometry() return None def _r_geometry_tshs(self): # Read geometry from <>.TSHS file f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS') if f.is_file(): # Default to a geometry with the correct atomic numbers etc. return tshsSileSiesta(f).read_geometry(geometry=self.read_geometry(False)) return None def _r_geometry_fdf(self, *args, **kwargs): """ Returns Geometry object from the FDF file NOTE: Interaction range of the Atoms are currently not read. """ sc = self.read_supercell(order='fdf') # No fractional coordinates is_frac = False # Read atom scaling lc = self.get('AtomicCoordinatesFormat', default='Bohr').lower() if 'ang' in lc or 'notscaledcartesianang' in lc: s = 1. elif 'bohr' in lc or 'notscaledcartesianbohr' in lc: s = Bohr2Ang elif 'scaledcartesian' in lc: # the same scaling as the lattice-vectors s = self.get('LatticeConstant', unit='Ang') elif 'fractional' in lc or 'scaledbylatticevectors' in lc: # no scaling of coordinates as that is entirely # done by the latticevectors s = 1. is_frac = True # If the user requests a shifted geometry # we correct for this origo = _a.zerosd([3]) lor = self.get('AtomicCoordinatesOrigin') if lor: if kwargs.get('origin', True): if isinstance(lor, str): origo = lor.lower() else: origo = _a.asarrayd(list(map(float, lor[0].split()[:3]))) * s # Origo cannot be interpreted with fractional coordinates # hence, it is not transformed. # Read atom block atms = self.get('AtomicCoordinatesAndAtomicSpecies') if atms is None: raise SileError('AtomicCoordinatesAndAtomicSpecies block could not be found') # Read number of atoms and block # We default to the number of elements in the # AtomicCoordinatesAndAtomicSpecies block na = self.get('NumberOfAtoms', default=len(atms)) # Reduce space if number of atoms specified if na < len(atms): # align number of atoms and atms array atms = atms[:na] elif na > len(atms): raise SileError('NumberOfAtoms is larger than the atoms defined in the blocks') elif na == 0: raise SileError('NumberOfAtoms has been determined to be zero, no atoms.') # Create array xyz = _a.emptyd([na, 3]) species = _a.emptyi([na]) for ia in range(na): l = atms[ia].split() xyz[ia, :] = [float(k) for k in l[:3]] species[ia] = int(l[3]) - 1 if is_frac: xyz = np.dot(xyz, sc.cell) xyz *= s # Read the block (not strictly needed, if so we simply set all atoms to H) atoms = self.read_basis() if atoms is None: warn(SileWarning('Block ChemicalSpeciesLabel does not exist, cannot determine the basis (all Hydrogen).')) # Default atom (hydrogen) atoms = Atom(1) else: atoms = [atoms[i] for i in species] atoms = Atoms(atoms, na=len(xyz)) if isinstance(origo, str): opt = origo if opt.startswith('cop'): origo = sc.cell.sum(0) * 0.5 - np.average(xyz, 0) elif opt.startswith('com'): # TODO for ghost atoms its mass should not be used w = atom.mass w /= w.sum() origo = sc.cell.sum(0) * 0.5 - np.average(xyz, 0, weights=w) elif opt.startswith('min'): origo = - np.amin(xyz, 0) if len(opt) > 4: opt = opt[4:] if opt == 'x': origo[1:] = 0. elif opt == 'y': origo[[0, 2]] = 0. elif opt == 'z': origo[:2] = 0. elif opt == 'xy' or opt == 'yx': origo[2] = 0. elif opt == 'xz' or opt == 'zx': origo[1] = 0. elif opt == 'yz' or opt == 'zy': origo[0] = 0. xyz += origo # Create and return geometry object return Geometry(xyz, atoms, sc=sc) def read_grid(self, name, *args, **kwargs): """ Read grid related information from any of the output files The order of the readed data is shown below. One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- name : str name of data to read. The list of names correspond to the Siesta output manual (Rho, TotalPotential, etc.), the strings are case insensitive. order: list of str, optional the order of which to try and read the geometry. By default this is ``['nc', 'grid.nc', 'bin']`` (bin refers to the binary files) """ order = _listify_str(kwargs.pop('order', ['nc', 'grid.nc', 'bin'])) for f in order: v = getattr(self, '_r_grid_{}'.format(f.lower().replace('.', '_')))(name, *args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_grid) found in file={f}") return v return None def _r_grid_nc(self, name, *args, **kwargs): # Read grid from the <>.nc file f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') if f.is_file(): # Capitalize correctly name = {'rho': 'Rho', 'rhoinit': 'RhoInit', 'vna': 'Vna', 'ioch': 'Chlocal', 'chlocal': 'Chlocal', 'toch': 'RhoTot', 'totalcharge': 'RhoTot', 'rhotot': 'RhoTot', 'drho': 'RhoDelta', 'deltarho': 'RhoDelta', 'rhodelta': 'RhoDelta', 'vh': 'Vh', 'electrostaticpotential': 'Vh', 'rhoxc': 'RhoXC', 'vt': 'Vt', 'totalpotential': 'Vt', 'bader': 'RhoBader', 'baderrho': 'RhoBader', 'rhobader': 'RhoBader' }.get(name.lower()) return ncSileSiesta(f).read_grid(name, **kwargs) return None def _r_grid_grid_nc(self, name, *args, **kwargs): # Read grid from the <>.nc file name = {'rho': 'Rho', 'rhoinit': 'RhoInit', 'vna': 'Vna', 'ioch': 'Chlocal', 'chlocal': 'Chlocal', 'toch': 'TotalCharge', 'totalcharge': 'TotalCharge', 'rhotot': 'TotalCharge', 'drho': 'DeltaRho', 'deltarho': 'DeltaRho', 'rhodelta': 'DeltaRho', 'vh': 'ElectrostaticPotential', 'electrostaticpotential': 'ElectrostaticPotential', 'rhoxc': 'RhoXC', 'vt': 'TotalPotential', 'totalpotential': 'TotalPotential', 'bader': 'BaderCharge', 'baderrho': 'BaderCharge', 'rhobader': 'BaderCharge' }.get(name.lower()) + '.grid.nc' f = self.dir_file(name) if f.is_file(): grid = gridncSileSiesta(f).read_grid(*args, **kwargs) grid.set_geometry(self.read_geometry(True)) return grid return None def _r_grid_bin(self, name, *args, **kwargs): # Read grid from the <>.VT/... file name = {'rho': '.RHO', 'rhoinit': '.RHOINIT', 'vna': '.VNA', 'ioch': '.IOCH', 'chlocal': '.IOCH', 'toch': '.TOCH', 'totalcharge': '.TOCH', 'rhotot': '.TOCH', 'drho': '.DRHO', 'deltarho': '.DRHO', 'rhodelta': '.DRHO', 'vh': '.VH', 'electrostaticpotential': '.VH', 'rhoxc': '.RHOXC', 'vt': '.VT', 'totalpotential': '.VT', 'bader': '.BADER', 'baderrho': '.BADER', 'rhobader': '.BADER' }.get(name.lower()) f = self.dir_file(self.get('SystemLabel', default='siesta') + name) if f.is_file(): grid = get_sile_class(f)(f).read_grid(*args, **kwargs) grid.set_geometry(self.read_geometry(True)) return grid return None def read_basis(self, *args, **kwargs): """ Read the atomic species and figure out the number of atomic orbitals in their basis The order of the read is shown below. One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- order: list of str, optional the order of which to try and read the basis information. By default this is ``['nc', 'ion', 'ORB_INDX', 'fdf']`` """ order = _listify_str(kwargs.pop('order', ['nc', 'ion', 'ORB_INDX', 'fdf'])) for f in order: v = getattr(self, '_r_basis_{}'.format(f.lower()))(*args, **kwargs) if v is not None: if self.track: info(f"{self.file}(read_basis) found in file={f}") return v return None def _r_basis_nc(self): # Read basis from <>.nc file f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') if f.is_file(): return ncSileSiesta(f).read_basis() return None def _r_basis_ion(self): # Read basis from <>.ion.nc file or <>.ion.xml spcs = self.get('ChemicalSpeciesLabel') if spcs is None: # We haven't found the chemical and species label # so return nothing return None # Now spcs contains the block of the chemicalspecieslabel atoms = [None] * len(spcs) found_one = False found_all = True for spc in spcs: idx, Z, lbl = spc.split()[:3] idx = int(idx) - 1 # F-indexing Z = int(Z) lbl = lbl.strip() f = self.dir_file(lbl + ".ext") # now try and read the basis if f.with_suffix('.ion.nc').is_file(): atoms[idx] = ionncSileSiesta(f.with_suffix('.ion.nc')).read_basis() found_one = True elif f.with_suffix('.ion.xml').is_file(): atoms[idx] = ionxmlSileSiesta(f.with_suffix('.ion.xml')).read_basis() found_one = True else: # default the atom to not have a range, and no associated orbitals atoms[idx] = Atom(Z=Z, tag=lbl) found_all = False if found_one and not found_all: warn("Siesta basis information could not read all ion.nc/ion.xml files. " "Only a subset of the basis information is accessible.") elif not found_one: return None return atoms def _r_basis_orb_indx(self): f = self.dir_file(self.get('SystemLabel', default='siesta') + '.ORB_INDX') if f.is_file(): info(f"Siesta basis information is read from {f}, the radial functions are not accessible.") return orbindxSileSiesta(f).read_basis(atoms=self._r_basis_fdf()) return None def _r_basis_fdf(self): # Read basis from fdf file spcs = self.get('ChemicalSpeciesLabel') if spcs is None: # We haven't found the chemical and species label # so return nothing return None all_mass = self.get('AtomicMass', default=[]) # default mass mass = None # Now spcs contains the block of the chemicalspecieslabel atoms = [None] * len(spcs) for spc in spcs: idx, Z, lbl = spc.split()[:3] idx = int(idx) - 1 # F-indexing Z = int(Z) lbl = lbl.strip() if len(all_mass) > 0: for mass_line in all_mass: s, mass = mass_line.split() if int(s) - 1 == idx: mass = float(mass) break else: mass = None atoms[idx] = Atom(Z=Z, mass=mass, tag=lbl) return atoms def _r_add_overlap(self, parent_call, M): """ Internal routine to ensure that the overlap matrix is read and added to the matrix `M` """ try: S = self.read_overlap() # Check for the same sparsity pattern if np.all(M._csr.col == S._csr.col): M._csr._D[:, -1] = S._csr._D[:, 0] else: raise ValueError except: warn(str(self) + f' could not succesfully read the overlap matrix in {parent_call}.') def read_density_matrix(self, *args, **kwargs): """ Try and read density matrix by reading the <>.nc, <>.TSDE files, <>.DM (in that order) One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- order: list of str, optional the order of which to try and read the density matrix By default this is ``['nc', 'TSDE', 'DM']``. """ order = _listify_str(kwargs.pop('order', ['nc', 'TSDE', 'DM'])) for f in order: DM = getattr(self, '_r_density_matrix_{}'.format(f.lower()))(*args, **kwargs) if DM is not None: _track(self.read_density_matrix, f"found file {f}") return DM return None def _r_density_matrix_nc(self, *args, **kwargs): """ Try and read the density matrix by reading the <>.nc """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') _track_file(self._r_density_matrix_nc, f) DM = None if f.is_file(): # this *should* also contain the overlap matrix DM = ncSileSiesta(f).read_density_matrix(*args, **kwargs) return DM def _r_density_matrix_tsde(self, *args, **kwargs): """ Read density matrix from the TSDE file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE') _track_file(self._r_density_matrix_tsde, f) DM = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf']) DM = tsdeSileSiesta(f).read_density_matrix(*args, **kwargs) self._r_add_overlap('_r_density_matrix_tsde', DM) return DM def _r_density_matrix_dm(self, *args, **kwargs): """ Read density matrix from the DM file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.DM') _track_file(self._r_density_matrix_dm, f) DM = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf']) DM = dmSileSiesta(f).read_density_matrix(*args, **kwargs) self._r_add_overlap('_r_density_matrix_dm', DM) return DM def read_energy_density_matrix(self, *args, **kwargs): """ Try and read energy density matrix by reading the <>.nc or <>.TSDE files (in that order) One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- order: list of str, optional the order of which to try and read the density matrix By default this is ``['nc', 'TSDE']``. """ order = _listify_str(kwargs.pop('order', ['nc', 'TSDE'])) for f in order: EDM = getattr(self, '_r_energy_density_matrix_{}'.format(f.lower()))(*args, **kwargs) if EDM is not None: _track(self.read_energy_density_matrix, f"found file {f}") return EDM return None def _r_energy_density_matrix_nc(self, *args, **kwargs): """ Read energy density matrix by reading the <>.nc """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') _track_file(self._r_energy_density_matrix_nc, f) if f.is_file(): return ncSileSiesta(f).read_energy_density_matrix(*args, **kwargs) return None def _r_energy_density_matrix_tsde(self, *args, **kwargs): """ Read energy density matrix from the TSDE file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE') _track_file(self._r_energy_density_matrix_tsde, f) EDM = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS']) EDM = tsdeSileSiesta(f).read_energy_density_matrix(*args, **kwargs) self._r_add_overlap('_r_energy_density_matrix_tsde', EDM) return EDM def read_overlap(self, *args, **kwargs): """ Try and read the overlap matrix by reading the <>.nc, <>.TSHS files, <>.HSX, <>.onlyS (in that order) One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- order: list of str, optional the order of which to try and read the overlap matrix By default this is ``['nc', 'TSHS', 'HSX', 'onlyS']``. """ order = _listify_str(kwargs.pop('order', ['nc', 'TSHS', 'HSX', 'onlyS'])) for f in order: v = getattr(self, '_r_overlap_{}'.format(f.lower()))(*args, **kwargs) if v is not None: _track(self.read_overlap, f"found file {f}") return v return None def _r_overlap_nc(self, *args, **kwargs): """ Read overlap from the nc file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') _track_file(self._r_overlap_nc, f) if f.is_file(): return ncSileSiesta(f).read_overlap(*args, **kwargs) return None def _r_overlap_tshs(self, *args, **kwargs): """ Read overlap from the TSHS file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS') _track_file(self._r_overlap_tshs, f) S = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS']) S = tshsSileSiesta(f).read_overlap(*args, **kwargs) return S def _r_overlap_hsx(self, *args, **kwargs): """ Read overlap from the HSX file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.HSX') _track_file(self._r_overlap_hsx, f) S = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf']) S = hsxSileSiesta(f).read_overlap(*args, **kwargs) return S def _r_overlap_onlys(self, *args, **kwargs): """ Read overlap from the onlyS file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.onlyS') _track_file(self._r_overlap_onlys, f) S = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf']) S = onlysSileSiesta(f).read_overlap(*args, **kwargs) return S def read_hamiltonian(self, *args, **kwargs): """ Try and read the Hamiltonian by reading the <>.nc, <>.TSHS files, <>.HSX (in that order) One can limit the tried files to only one file by passing only a single file ending. Parameters ---------- order: list of str, optional the order of which to try and read the Hamiltonian. By default this is ``['nc', 'TSHS', 'HSX']``. """ order = _listify_str(kwargs.pop('order', ['nc', 'TSHS', 'HSX'])) for f in order: H = getattr(self, '_r_hamiltonian_{}'.format(f.lower()))(*args, **kwargs) if H is not None: _track(self.read_hamiltonian, f"found file {f}") return H return None def _r_hamiltonian_nc(self, *args, **kwargs): """ Read Hamiltonian from the nc file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc') _track_file(self._r_hamiltonian_nc, f) if f.is_file(): return ncSileSiesta(f).read_hamiltonian(*args, **kwargs) return None def _r_hamiltonian_tshs(self, *args, **kwargs): """ Read Hamiltonian from the TSHS file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS') _track_file(self._r_hamiltonian_tshs, f) H = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS']) H = tshsSileSiesta(f).read_hamiltonian(*args, **kwargs) return H def _r_hamiltonian_hsx(self, *args, **kwargs): """ Read Hamiltonian from the HSX file """ f = self.dir_file(self.get('SystemLabel', default='siesta') + '.HSX') _track_file(self._r_hamiltonian_hsx, f) H = None if f.is_file(): if 'geometry' not in kwargs: # to ensure we get the correct orbital count kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf']) H = hsxSileSiesta(f).read_hamiltonian(*args, **kwargs) Ef = self.read_fermi_level() if Ef is None: info(f"{str(self)}.read_hamiltonian from HSX file failed shifting to the Fermi-level.") else: H.shift(-Ef) return H @default_ArgumentParser(description="Manipulate a FDF file.") def ArgumentParser(self, p=None, *args, **kwargs): """ Returns the arguments that is available for this Sile """ import argparse # We must by-pass this fdf-file for importing import sisl.io.siesta as sis # The fdf parser is more complicated # It is based on different settings based on the sp = p.add_subparsers(help="Determine which part of the fdf-file that should be processed.") # Get the label which retains all the sub-modules label = self.get('SystemLabel', default='siesta') f_label = label + ".ext" def label_file(suffix): return self.dir_file(f_label).with_suffix(suffix) # The default on all sub-parsers are the retrieval and setting d = { '_fdf': self, '_fdf_first': True, } namespace = default_namespace(**d) ep = sp.add_parser('edit', help='Change or read and print data from the fdf file') # As the fdf may provide additional stuff, we do not add EVERYTHING from # the Geometry class. class FDFAdd(argparse.Action): def __call__(self, parser, ns, values, option_string=None): key = values[0] val = values[1] if ns._fdf_first: # Append to the end of the file with ns._fdf as fd: fd.write('\n\n# SISL added keywords\n') setattr(ns, '_fdf_first', False) ns._fdf.set(key, val) ep.add_argument('--set', '-s', nargs=2, metavar=('KEY', 'VALUE'), action=FDFAdd, help='Add a key to the FDF file. If it already exists it will be overwritten') class FDFGet(argparse.Action): def __call__(self, parser, ns, value, option_string=None): # Retrieve the value in standard units # Currently, we write out the unit "as-is" val = ns._fdf.get(value[0], with_unit=True) if val is None: print(f'# {value[0]} is currently not in the FDF file ') return if isinstance(val, tuple): print(ns._fdf.print(value[0], '{} {}'.format(*val))) else: print(ns._fdf.print(value[0], val)) ep.add_argument('--get', '-g', nargs=1, metavar='KEY', action=FDFGet, help='Print (to stdout) the value of the key in the FDF file.') # If the XV file exists, it has precedence # of the contained geometry (we will issue # a warning in that case) f = label_file('.XV') try: geom = self.read_geometry(True) tmp_p = sp.add_parser('geom', help="Edit the contained geometry in the file") tmp_p, tmp_ns = geom.ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) except: # Allowed pass due to pythonic reading pass f = label_file('.bands') if f.is_file(): tmp_p = sp.add_parser('band', help="Manipulate bands file from the Siesta simulation") tmp_p, tmp_ns = sis.bandsSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) f = label_file('.PDOS.xml') if f.is_file(): tmp_p = sp.add_parser('pdos', help="Manipulate PDOS.xml file from the Siesta simulation") tmp_p, tmp_ns = sis.pdosSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) f = label_file('.EIG') if f.is_file(): tmp_p = sp.add_parser('eig', help="Manipulate EIG file from the Siesta simulation") tmp_p, tmp_ns = sis.eigSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) #f = label + '.FA' #if isfile(f): # tmp_p = sp.add_parser('force', # help="Manipulate FA file from the Siesta simulation") # tmp_p, tmp_ns = sis.faSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) # namespace = merge_instances(namespace, tmp_ns) f = label_file('.TBT.nc') if f.is_file(): tmp_p = sp.add_parser('tbt', help="Manipulate tbtrans output file") tmp_p, tmp_ns = sis.tbtncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) f = label_file('.TBT.Proj.nc') if f.is_file(): tmp_p = sp.add_parser('tbt-proj', help="Manipulate tbtrans projection output file") tmp_p, tmp_ns = sis.tbtprojncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) f = label_file('.PHT.nc') if f.is_file(): tmp_p = sp.add_parser('pht', help="Manipulate the phtrans output file") tmp_p, tmp_ns = sis.phtncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) f = label_file('.PHT.Proj.nc') if f.is_file(): tmp_p = sp.add_parser('pht-proj', help="Manipulate phtrans projection output file") tmp_p, tmp_ns = sis.phtprojncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) f = label_file('.nc') if f.is_file(): tmp_p = sp.add_parser('nc', help="Manipulate Siesta NetCDF output file") tmp_p, tmp_ns = sis.ncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs) namespace = merge_instances(namespace, tmp_ns) return p, namespace add_sile('fdf', fdfSileSiesta, case=False, gzip=True)
import pytest pytestmark = pytest.mark.spin import math as m import numpy as np from sisl import Spin def test_spin1(): for val in ['unpolarized', '', Spin.UNPOLARIZED, 'polarized', 'p', Spin.POLARIZED, 'non-collinear', 'nc', Spin.NONCOLINEAR, 'spin-orbit', 'so', Spin.SPINORBIT]: s = Spin(val) str(s) s1 = s.copy() assert s == s1 def test_spin2(): s1 = Spin() s2 = Spin('p') s3 = Spin('nc') s4 = Spin('so') assert s1.kind == Spin.UNPOLARIZED assert s2.kind == Spin.POLARIZED assert s3.kind == Spin.NONCOLINEAR assert s4.kind == Spin.SPINORBIT assert s1 == s1.copy() assert s2 == s2.copy() assert s3 == s3.copy() assert s4 == s4.copy() assert s1 < s2 assert s2 < s3 assert s3 < s4 assert s1 <= s2 assert s2 <= s3 assert s3 <= s4 assert s2 > s1 assert s3 > s2 assert s4 > s3 assert s2 >= s1 assert s3 >= s2 assert s4 >= s3 assert s1.is_unpolarized assert not s1.is_polarized assert not s1.is_noncolinear assert not s1.is_spinorbit assert not s2.is_unpolarized assert s2.is_polarized assert not s2.is_noncolinear assert not s2.is_spinorbit assert not s3.is_unpolarized assert not s3.is_polarized assert s3.is_noncolinear assert not s3.is_spinorbit assert not s4.is_unpolarized assert not s4.is_polarized assert not s4.is_noncolinear assert s4.is_spinorbit def test_spin3(): with pytest.raises(ValueError): s = Spin('satoehus') def test_spin4(): s1 = Spin(Spin.UNPOLARIZED) S1 = Spin(Spin.UNPOLARIZED, np.complex64) s2 = Spin(Spin.POLARIZED) S2 = Spin(Spin.POLARIZED, np.complex64) s3 = Spin(Spin.NONCOLINEAR) S3 = Spin(Spin.NONCOLINEAR, np.complex64) s4 = Spin(Spin.SPINORBIT) S4 = Spin(Spin.SPINORBIT, np.complex64) assert s1 == S1 assert s2 == S2 assert s3 == S3 assert s4 == S4 # real comparison assert s1 < S2 assert s1 < S3 assert s1 < S4 assert s2 > S1 assert s2 < S3 assert s2 < S4 assert s3 > S1 assert s3 > S2 assert s3 < S4 assert s4 > S1 assert s4 > S2 assert s4 > S3 # complex complex assert S1 < S2 assert S1 < S3 assert S1 < S4 assert S2 > S1 assert S2 < S3 assert S2 < S4 assert S3 > S1 assert S3 > S2 assert S3 < S4 assert S4 > S1 assert S4 > S2 assert S4 > S3 # real comparison assert S1 < s2 assert S1 < s3 assert S1 < s4 assert S2 > s1 assert S2 < s3 assert S2 < s4 assert S3 > s1 assert S3 > s2 assert S3 < s4 assert S4 > s1 assert S4 > s2 assert S4 > s3 # complex complex assert S1 < s2 assert S1 < s3 assert S1 < s4 assert S2 > s1 assert S2 < s3 assert S2 < s4 assert S3 > s1 assert S3 > s2 assert S3 < s4 assert S4 > s1 assert S4 > s2 assert S4 > s3 def test_pauli(): # just grab the default spin S = Spin() # Create a fictituous wave-function sq2 = 2 ** .5 W = np.array([ [1/sq2, 1/sq2], # M_x = 1 [1/sq2, -1/sq2], # M_x = -1 [0.5 + 0.5j, 0.5 + 0.5j], # M_x = 1 [0.5 - 0.5j, -0.5 + 0.5j], # M_x = -1 [1/sq2, 1j/sq2], # M_y = 1 [1/sq2, -1j/sq2], # M_y = -1 [0.5 - 0.5j, 0.5 + 0.5j], # M_y = 1 [0.5 + 0.5j, 0.5 - 0.5j], # M_y = -1 [1, 0], # M_z = 1 [0, 1], # M_z = -1 ]) x = np.array([1, -1, 1, -1, 0, 0, 0, 0, 0, 0]) assert np.allclose(x, (np.conj(W)*S.X.dot(W.T).T).sum(1).real) y = np.array([0, 0, 0, 0, 1, -1, 1, -1, 0, 0]) assert np.allclose(y, (np.conj(W)*np.dot(S.Y, W.T).T).sum(1).real) z = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, -1]) assert np.allclose(z, (np.conj(W)*np.dot(S.Z, W.T).T).sum(1).real) def test_pickle(): import pickle as p S = Spin('nc') n = p.dumps(S) s = p.loads(n) assert S == s
zerothi/sids
sisl/physics/tests/test_spin.py
sisl/io/siesta/fdf.py
"""Test deprecation and future warnings. """ import numpy as np from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): """Check mode='full' FutureWarning. In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were deprecated. The release date will probably be sometime in the summer of 2013. """ a = np.eye(2) assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
import platform import pytest from numpy import array from numpy.testing import assert_, assert_raises from . import util class TestReturnReal(util.F2PyTest): def check_function(self, t, tname): if tname in ['t0', 't4', 's0', 's4']: err = 1e-5 else: err = 0.0 assert_(abs(t(234) - 234.0) <= err) assert_(abs(t(234.6) - 234.6) <= err) assert_(abs(t('234') - 234) <= err) assert_(abs(t('234.6') - 234.6) <= err) assert_(abs(t(-234) + 234) <= err) assert_(abs(t([234]) - 234) <= err) assert_(abs(t((234,)) - 234.) <= err) assert_(abs(t(array(234)) - 234.) <= err) assert_(abs(t(array([234])) - 234.) <= err) assert_(abs(t(array([[234]])) - 234.) <= err) assert_(abs(t(array([234], 'b')) + 22) <= err) assert_(abs(t(array([234], 'h')) - 234.) <= err) assert_(abs(t(array([234], 'i')) - 234.) <= err) assert_(abs(t(array([234], 'l')) - 234.) <= err) assert_(abs(t(array([234], 'B')) - 234.) <= err) assert_(abs(t(array([234], 'f')) - 234.) <= err) assert_(abs(t(array([234], 'd')) - 234.) <= err) if tname in ['t0', 't4', 's0', 's4']: assert_(t(1e200) == t(1e300)) # inf #assert_raises(ValueError, t, array([234], 'S1')) assert_raises(ValueError, t, 'abc') assert_raises(IndexError, t, []) assert_raises(IndexError, t, ()) assert_raises(Exception, t, t) assert_raises(Exception, t, {}) try: r = t(10 ** 400) assert_(repr(r) in ['inf', 'Infinity'], repr(r)) except OverflowError: pass @pytest.mark.skipif( platform.system() == 'Darwin', reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation") class TestCReturnReal(TestReturnReal): suffix = ".pyf" module_name = "c_ext_return_real" code = """ python module c_ext_return_real usercode \'\'\' float t4(float value) { return value; } void s4(float *t4, float value) { *t4 = value; } double t8(double value) { return value; } void s8(double *t8, double value) { *t8 = value; } \'\'\' interface function t4(value) real*4 intent(c) :: t4,value end function t8(value) real*8 intent(c) :: t8,value end subroutine s4(t4,value) intent(c) s4 real*4 intent(out) :: t4 real*4 intent(c) :: value end subroutine s8(t8,value) intent(c) s8 real*8 intent(out) :: t8 real*8 intent(c) :: value end end interface end python module c_ext_return_real """ @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF77ReturnReal(TestReturnReal): code = """ function t0(value) real value real t0 t0 = value end function t4(value) real*4 value real*4 t4 t4 = value end function t8(value) real*8 value real*8 t8 t8 = value end function td(value) double precision value double precision td td = value end subroutine s0(t0,value) real value real t0 cf2py intent(out) t0 t0 = value end subroutine s4(t4,value) real*4 value real*4 t4 cf2py intent(out) t4 t4 = value end subroutine s8(t8,value) real*8 value real*8 t8 cf2py intent(out) t8 t8 = value end subroutine sd(td,value) double precision value double precision td cf2py intent(out) td td = value end """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF90ReturnReal(TestReturnReal): suffix = ".f90" code = """ module f90_return_real contains function t0(value) real :: value real :: t0 t0 = value end function t0 function t4(value) real(kind=4) :: value real(kind=4) :: t4 t4 = value end function t4 function t8(value) real(kind=8) :: value real(kind=8) :: t8 t8 = value end function t8 function td(value) double precision :: value double precision :: td td = value end function td subroutine s0(t0,value) real :: value real :: t0 !f2py intent(out) t0 t0 = value end subroutine s0 subroutine s4(t4,value) real(kind=4) :: value real(kind=4) :: t4 !f2py intent(out) t4 t4 = value end subroutine s4 subroutine s8(t8,value) real(kind=8) :: value real(kind=8) :: t8 !f2py intent(out) t8 t8 = value end subroutine s8 subroutine sd(td,value) double precision :: value double precision :: td !f2py intent(out) td td = value end subroutine sd end module f90_return_real """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module.f90_return_real, name), name)
charris/numpy
numpy/f2py/tests/test_return_real.py
numpy/linalg/tests/test_deprecations.py
import numpy as np nd = np.array([[1, 2], [3, 4]]) # reshape reveal_type(nd.reshape()) # E: numpy.ndarray reveal_type(nd.reshape(4)) # E: numpy.ndarray reveal_type(nd.reshape(2, 2)) # E: numpy.ndarray reveal_type(nd.reshape((2, 2))) # E: numpy.ndarray reveal_type(nd.reshape((2, 2), order="C")) # E: numpy.ndarray reveal_type(nd.reshape(4, order="C")) # E: numpy.ndarray # resize does not return a value # transpose reveal_type(nd.transpose()) # E: numpy.ndarray reveal_type(nd.transpose(1, 0)) # E: numpy.ndarray reveal_type(nd.transpose((1, 0))) # E: numpy.ndarray # swapaxes reveal_type(nd.swapaxes(0, 1)) # E: numpy.ndarray # flatten reveal_type(nd.flatten()) # E: numpy.ndarray reveal_type(nd.flatten("C")) # E: numpy.ndarray # ravel reveal_type(nd.ravel()) # E: numpy.ndarray reveal_type(nd.ravel("C")) # E: numpy.ndarray # squeeze reveal_type(nd.squeeze()) # E: numpy.ndarray reveal_type(nd.squeeze(0)) # E: numpy.ndarray reveal_type(nd.squeeze((0, 2))) # E: numpy.ndarray
import platform import pytest from numpy import array from numpy.testing import assert_, assert_raises from . import util class TestReturnReal(util.F2PyTest): def check_function(self, t, tname): if tname in ['t0', 't4', 's0', 's4']: err = 1e-5 else: err = 0.0 assert_(abs(t(234) - 234.0) <= err) assert_(abs(t(234.6) - 234.6) <= err) assert_(abs(t('234') - 234) <= err) assert_(abs(t('234.6') - 234.6) <= err) assert_(abs(t(-234) + 234) <= err) assert_(abs(t([234]) - 234) <= err) assert_(abs(t((234,)) - 234.) <= err) assert_(abs(t(array(234)) - 234.) <= err) assert_(abs(t(array([234])) - 234.) <= err) assert_(abs(t(array([[234]])) - 234.) <= err) assert_(abs(t(array([234], 'b')) + 22) <= err) assert_(abs(t(array([234], 'h')) - 234.) <= err) assert_(abs(t(array([234], 'i')) - 234.) <= err) assert_(abs(t(array([234], 'l')) - 234.) <= err) assert_(abs(t(array([234], 'B')) - 234.) <= err) assert_(abs(t(array([234], 'f')) - 234.) <= err) assert_(abs(t(array([234], 'd')) - 234.) <= err) if tname in ['t0', 't4', 's0', 's4']: assert_(t(1e200) == t(1e300)) # inf #assert_raises(ValueError, t, array([234], 'S1')) assert_raises(ValueError, t, 'abc') assert_raises(IndexError, t, []) assert_raises(IndexError, t, ()) assert_raises(Exception, t, t) assert_raises(Exception, t, {}) try: r = t(10 ** 400) assert_(repr(r) in ['inf', 'Infinity'], repr(r)) except OverflowError: pass @pytest.mark.skipif( platform.system() == 'Darwin', reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation") class TestCReturnReal(TestReturnReal): suffix = ".pyf" module_name = "c_ext_return_real" code = """ python module c_ext_return_real usercode \'\'\' float t4(float value) { return value; } void s4(float *t4, float value) { *t4 = value; } double t8(double value) { return value; } void s8(double *t8, double value) { *t8 = value; } \'\'\' interface function t4(value) real*4 intent(c) :: t4,value end function t8(value) real*8 intent(c) :: t8,value end subroutine s4(t4,value) intent(c) s4 real*4 intent(out) :: t4 real*4 intent(c) :: value end subroutine s8(t8,value) intent(c) s8 real*8 intent(out) :: t8 real*8 intent(c) :: value end end interface end python module c_ext_return_real """ @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF77ReturnReal(TestReturnReal): code = """ function t0(value) real value real t0 t0 = value end function t4(value) real*4 value real*4 t4 t4 = value end function t8(value) real*8 value real*8 t8 t8 = value end function td(value) double precision value double precision td td = value end subroutine s0(t0,value) real value real t0 cf2py intent(out) t0 t0 = value end subroutine s4(t4,value) real*4 value real*4 t4 cf2py intent(out) t4 t4 = value end subroutine s8(t8,value) real*8 value real*8 t8 cf2py intent(out) t8 t8 = value end subroutine sd(td,value) double precision value double precision td cf2py intent(out) td td = value end """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF90ReturnReal(TestReturnReal): suffix = ".f90" code = """ module f90_return_real contains function t0(value) real :: value real :: t0 t0 = value end function t0 function t4(value) real(kind=4) :: value real(kind=4) :: t4 t4 = value end function t4 function t8(value) real(kind=8) :: value real(kind=8) :: t8 t8 = value end function t8 function td(value) double precision :: value double precision :: td td = value end function td subroutine s0(t0,value) real :: value real :: t0 !f2py intent(out) t0 t0 = value end subroutine s0 subroutine s4(t4,value) real(kind=4) :: value real(kind=4) :: t4 !f2py intent(out) t4 t4 = value end subroutine s4 subroutine s8(t8,value) real(kind=8) :: value real(kind=8) :: t8 !f2py intent(out) t8 t8 = value end subroutine s8 subroutine sd(td,value) double precision :: value double precision :: td !f2py intent(out) td td = value end subroutine sd end module f90_return_real """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module.f90_return_real, name), name)
charris/numpy
numpy/f2py/tests/test_return_real.py
numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py
import numpy as np reveal_type(np.issctype(np.generic)) # E: bool reveal_type(np.issctype("foo")) # E: bool reveal_type(np.obj2sctype("S8")) # E: Union[numpy.generic, None] reveal_type(np.obj2sctype("S8", default=None)) # E: Union[numpy.generic, None] reveal_type( np.obj2sctype("foo", default=int) # E: Union[numpy.generic, Type[builtins.int*]] ) reveal_type(np.issubclass_(np.float64, float)) # E: bool reveal_type(np.issubclass_(np.float64, (int, float))) # E: bool reveal_type(np.sctype2char("S8")) # E: str reveal_type(np.sctype2char(list)) # E: str reveal_type(np.find_common_type([np.int64], [np.int64])) # E: numpy.dtype reveal_type(np.cast[int]) # E: _CastFunc reveal_type(np.cast["i8"]) # E: _CastFunc reveal_type(np.cast[np.int64]) # E: _CastFunc reveal_type(np.nbytes[int]) # E: int reveal_type(np.nbytes["i8"]) # E: int reveal_type(np.nbytes[np.int64]) # E: int reveal_type(np.ScalarType) # E: Tuple reveal_type(np.ScalarType[0]) # E: Type[builtins.int] reveal_type(np.ScalarType[4]) # E: Type[builtins.bool] reveal_type(np.ScalarType[9]) # E: Type[{csingle}] reveal_type(np.ScalarType[11]) # E: Type[{clongdouble}] reveal_type(np.typecodes["Character"]) # E: Literal['c'] reveal_type(np.typecodes["Complex"]) # E: Literal['FDG'] reveal_type(np.typecodes["All"]) # E: Literal['?bhilqpBHILQPefdgFDGSUVOMm']
import platform import pytest from numpy import array from numpy.testing import assert_, assert_raises from . import util class TestReturnReal(util.F2PyTest): def check_function(self, t, tname): if tname in ['t0', 't4', 's0', 's4']: err = 1e-5 else: err = 0.0 assert_(abs(t(234) - 234.0) <= err) assert_(abs(t(234.6) - 234.6) <= err) assert_(abs(t('234') - 234) <= err) assert_(abs(t('234.6') - 234.6) <= err) assert_(abs(t(-234) + 234) <= err) assert_(abs(t([234]) - 234) <= err) assert_(abs(t((234,)) - 234.) <= err) assert_(abs(t(array(234)) - 234.) <= err) assert_(abs(t(array([234])) - 234.) <= err) assert_(abs(t(array([[234]])) - 234.) <= err) assert_(abs(t(array([234], 'b')) + 22) <= err) assert_(abs(t(array([234], 'h')) - 234.) <= err) assert_(abs(t(array([234], 'i')) - 234.) <= err) assert_(abs(t(array([234], 'l')) - 234.) <= err) assert_(abs(t(array([234], 'B')) - 234.) <= err) assert_(abs(t(array([234], 'f')) - 234.) <= err) assert_(abs(t(array([234], 'd')) - 234.) <= err) if tname in ['t0', 't4', 's0', 's4']: assert_(t(1e200) == t(1e300)) # inf #assert_raises(ValueError, t, array([234], 'S1')) assert_raises(ValueError, t, 'abc') assert_raises(IndexError, t, []) assert_raises(IndexError, t, ()) assert_raises(Exception, t, t) assert_raises(Exception, t, {}) try: r = t(10 ** 400) assert_(repr(r) in ['inf', 'Infinity'], repr(r)) except OverflowError: pass @pytest.mark.skipif( platform.system() == 'Darwin', reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation") class TestCReturnReal(TestReturnReal): suffix = ".pyf" module_name = "c_ext_return_real" code = """ python module c_ext_return_real usercode \'\'\' float t4(float value) { return value; } void s4(float *t4, float value) { *t4 = value; } double t8(double value) { return value; } void s8(double *t8, double value) { *t8 = value; } \'\'\' interface function t4(value) real*4 intent(c) :: t4,value end function t8(value) real*8 intent(c) :: t8,value end subroutine s4(t4,value) intent(c) s4 real*4 intent(out) :: t4 real*4 intent(c) :: value end subroutine s8(t8,value) intent(c) s8 real*8 intent(out) :: t8 real*8 intent(c) :: value end end interface end python module c_ext_return_real """ @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF77ReturnReal(TestReturnReal): code = """ function t0(value) real value real t0 t0 = value end function t4(value) real*4 value real*4 t4 t4 = value end function t8(value) real*8 value real*8 t8 t8 = value end function td(value) double precision value double precision td td = value end subroutine s0(t0,value) real value real t0 cf2py intent(out) t0 t0 = value end subroutine s4(t4,value) real*4 value real*4 t4 cf2py intent(out) t4 t4 = value end subroutine s8(t8,value) real*8 value real*8 t8 cf2py intent(out) t8 t8 = value end subroutine sd(td,value) double precision value double precision td cf2py intent(out) td td = value end """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF90ReturnReal(TestReturnReal): suffix = ".f90" code = """ module f90_return_real contains function t0(value) real :: value real :: t0 t0 = value end function t0 function t4(value) real(kind=4) :: value real(kind=4) :: t4 t4 = value end function t4 function t8(value) real(kind=8) :: value real(kind=8) :: t8 t8 = value end function t8 function td(value) double precision :: value double precision :: td td = value end function td subroutine s0(t0,value) real :: value real :: t0 !f2py intent(out) t0 t0 = value end subroutine s0 subroutine s4(t4,value) real(kind=4) :: value real(kind=4) :: t4 !f2py intent(out) t4 t4 = value end subroutine s4 subroutine s8(t8,value) real(kind=8) :: value real(kind=8) :: t8 !f2py intent(out) t8 t8 = value end subroutine s8 subroutine sd(td,value) double precision :: value double precision :: td !f2py intent(out) td td = value end subroutine sd end module f90_return_real """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module.f90_return_real, name), name)
charris/numpy
numpy/f2py/tests/test_return_real.py
numpy/typing/tests/data/reveal/numerictypes.py
""" Implementation of optimized einsum. """ import itertools import operator from numpy.core.multiarray import c_einsum from numpy.core.numeric import asanyarray, tensordot from numpy.core.overrides import array_function_dispatch __all__ = ['einsum', 'einsum_path'] einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' einsum_symbols_set = set(einsum_symbols) def _flop_count(idx_contraction, inner, num_terms, size_dictionary): """ Computes the number of FLOPS in the contraction. Parameters ---------- idx_contraction : iterable The indices involved in the contraction inner : bool Does this contraction require an inner product? num_terms : int The number of terms in a contraction size_dictionary : dict The size of each of the indices in idx_contraction Returns ------- flop_count : int The total number of FLOPS required for the contraction. Examples -------- >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) 30 >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) 60 """ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) op_factor = max(1, num_terms - 1) if inner: op_factor += 1 return overall_size * op_factor def _compute_size_by_dict(indices, idx_dict): """ Computes the product of the elements in indices based on the dictionary idx_dict. Parameters ---------- indices : iterable Indices to base the product on. idx_dict : dictionary Dictionary of index sizes Returns ------- ret : int The resulting product. Examples -------- >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) 90 """ ret = 1 for i in indices: ret *= idx_dict[i] return ret def _find_contraction(positions, input_sets, output_set): """ Finds the contraction for a given set of input and output sets. Parameters ---------- positions : iterable Integer positions of terms used in the contraction. input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript Returns ------- new_result : set The indices of the resulting contraction remaining : list List of sets that have not been contracted, the new set is appended to the end of this list idx_removed : set Indices removed from the entire contraction idx_contraction : set The indices used in the current contraction Examples -------- # A simple dot product test case >>> pos = (0, 1) >>> isets = [set('ab'), set('bc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) # A more complex case with additional terms in the contraction >>> pos = (0, 2) >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) """ idx_contract = set() idx_remain = output_set.copy() remaining = [] for ind, value in enumerate(input_sets): if ind in positions: idx_contract |= value else: remaining.append(value) idx_remain |= value new_result = idx_remain & idx_contract idx_removed = (idx_contract - new_result) remaining.append(new_result) return (new_result, remaining, idx_removed, idx_contract) def _optimal_path(input_sets, output_set, idx_dict, memory_limit): """ Computes all possible pair contractions, sieves the results based on ``memory_limit`` and returns the lowest cost path. This algorithm scales factorial with respect to the elements in the list ``input_sets``. Parameters ---------- input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript idx_dict : dictionary Dictionary of index sizes memory_limit : int The maximum number of elements in a temporary array Returns ------- path : list The optimal contraction order within the memory limit constraint. Examples -------- >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set() >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} >>> _optimal_path(isets, oset, idx_sizes, 5000) [(0, 2), (0, 1)] """ full_results = [(0, [], input_sets)] for iteration in range(len(input_sets) - 1): iter_results = [] # Compute all unique pairs for curr in full_results: cost, positions, remaining = curr for con in itertools.combinations(range(len(input_sets) - iteration), 2): # Find the contraction cont = _find_contraction(con, remaining, output_set) new_result, new_input_sets, idx_removed, idx_contract = cont # Sieve the results based on memory_limit new_size = _compute_size_by_dict(new_result, idx_dict) if new_size > memory_limit: continue # Build (total_cost, positions, indices_remaining) total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) new_pos = positions + [con] iter_results.append((total_cost, new_pos, new_input_sets)) # Update combinatorial list, if we did not find anything return best # path + remaining contractions if iter_results: full_results = iter_results else: path = min(full_results, key=lambda x: x[0])[1] path += [tuple(range(len(input_sets) - iteration))] return path # If we have not found anything return single einsum contraction if len(full_results) == 0: return [tuple(range(len(input_sets)))] path = min(full_results, key=lambda x: x[0])[1] return path def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): """Compute the cost (removed size + flops) and resultant indices for performing the contraction specified by ``positions``. Parameters ---------- positions : tuple of int The locations of the proposed tensors to contract. input_sets : list of sets The indices found on each tensors. output_set : set The output indices of the expression. idx_dict : dict Mapping of each index to its size. memory_limit : int The total allowed size for an intermediary tensor. path_cost : int The contraction cost so far. naive_cost : int The cost of the unoptimized expression. Returns ------- cost : (int, int) A tuple containing the size of any indices removed, and the flop cost. positions : tuple of int The locations of the proposed tensors to contract. new_input_sets : list of sets The resulting new list of indices if this proposed contraction is performed. """ # Find the contraction contract = _find_contraction(positions, input_sets, output_set) idx_result, new_input_sets, idx_removed, idx_contract = contract # Sieve the results based on memory_limit new_size = _compute_size_by_dict(idx_result, idx_dict) if new_size > memory_limit: return None # Build sort tuple old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) removed_size = sum(old_sizes) - new_size # NB: removed_size used to be just the size of any removed indices i.e.: # helpers.compute_size_by_dict(idx_removed, idx_dict) cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) sort = (-removed_size, cost) # Sieve based on total cost as well if (path_cost + cost) > naive_cost: return None # Add contraction to possible choices return [sort, positions, new_input_sets] def _update_other_results(results, best): """Update the positions and provisional input_sets of ``results`` based on performing the contraction result ``best``. Remove any involving the tensors contracted. Parameters ---------- results : list List of contraction results produced by ``_parse_possible_contraction``. best : list The best contraction of ``results`` i.e. the one that will be performed. Returns ------- mod_results : list The list of modified results, updated with outcome of ``best`` contraction. """ best_con = best[1] bx, by = best_con mod_results = [] for cost, (x, y), con_sets in results: # Ignore results involving tensors just contracted if x in best_con or y in best_con: continue # Update the input_sets del con_sets[by - int(by > x) - int(by > y)] del con_sets[bx - int(bx > x) - int(bx > y)] con_sets.insert(-1, best[2][-1]) # Update the position indices mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) mod_results.append((cost, mod_con, con_sets)) return mod_results def _greedy_path(input_sets, output_set, idx_dict, memory_limit): """ Finds the path by contracting the best pair until the input list is exhausted. The best pair is found by minimizing the tuple ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing matrix multiplication or inner product operations, then Hadamard like operations, and finally outer operations. Outer products are limited by ``memory_limit``. This algorithm scales cubically with respect to the number of elements in the list ``input_sets``. Parameters ---------- input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript idx_dict : dictionary Dictionary of index sizes memory_limit : int The maximum number of elements in a temporary array Returns ------- path : list The greedy contraction order within the memory limit constraint. Examples -------- >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set() >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} >>> _greedy_path(isets, oset, idx_sizes, 5000) [(0, 2), (0, 1)] """ # Handle trivial cases that leaked through if len(input_sets) == 1: return [(0,)] elif len(input_sets) == 2: return [(0, 1)] # Build up a naive cost contract = _find_contraction(range(len(input_sets)), input_sets, output_set) idx_result, new_input_sets, idx_removed, idx_contract = contract naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) # Initially iterate over all pairs comb_iter = itertools.combinations(range(len(input_sets)), 2) known_contractions = [] path_cost = 0 path = [] for iteration in range(len(input_sets) - 1): # Iterate over all pairs on first step, only previously found pairs on subsequent steps for positions in comb_iter: # Always initially ignore outer products if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): continue result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost) if result is not None: known_contractions.append(result) # If we do not have a inner contraction, rescan pairs including outer products if len(known_contractions) == 0: # Then check the outer products for positions in itertools.combinations(range(len(input_sets)), 2): result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost) if result is not None: known_contractions.append(result) # If we still did not find any remaining contractions, default back to einsum like behavior if len(known_contractions) == 0: path.append(tuple(range(len(input_sets)))) break # Sort based on first index best = min(known_contractions, key=lambda x: x[0]) # Now propagate as many unused contractions as possible to next iteration known_contractions = _update_other_results(known_contractions, best) # Next iteration only compute contractions with the new tensor # All other contractions have been accounted for input_sets = best[2] new_tensor_pos = len(input_sets) - 1 comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) # Update path and total cost path.append(best[1]) path_cost += best[0][1] return path def _can_dot(inputs, result, idx_removed): """ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. Parameters ---------- inputs : list of str Specifies the subscripts for summation. result : str Resulting summation. idx_removed : set Indices that are removed in the summation Returns ------- type : bool Returns true if BLAS should and can be used, else False Notes ----- If the operations is BLAS level 1 or 2 and is not already aligned we default back to einsum as the memory movement to copy is more costly than the operation itself. Examples -------- # Standard GEMM operation >>> _can_dot(['ij', 'jk'], 'ik', set('j')) True # Can use the standard BLAS, but requires odd data movement >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) False # DDOT where the memory is not aligned >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) False """ # All `dot` calls remove indices if len(idx_removed) == 0: return False # BLAS can only handle two operands if len(inputs) != 2: return False input_left, input_right = inputs for c in set(input_left + input_right): # can't deal with repeated indices on same input or more than 2 total nl, nr = input_left.count(c), input_right.count(c) if (nl > 1) or (nr > 1) or (nl + nr > 2): return False # can't do implicit summation or dimension collapse e.g. # "ab,bc->c" (implicitly sum over 'a') # "ab,ca->ca" (take diagonal of 'a') if nl + nr - 1 == int(c in result): return False # Build a few temporaries set_left = set(input_left) set_right = set(input_right) keep_left = set_left - idx_removed keep_right = set_right - idx_removed rs = len(idx_removed) # At this point we are a DOT, GEMV, or GEMM operation # Handle inner products # DDOT with aligned data if input_left == input_right: return True # DDOT without aligned data (better to use einsum) if set_left == set_right: return False # Handle the 4 possible (aligned) GEMV or GEMM cases # GEMM or GEMV no transpose if input_left[-rs:] == input_right[:rs]: return True # GEMM or GEMV transpose both if input_left[:rs] == input_right[-rs:]: return True # GEMM or GEMV transpose right if input_left[-rs:] == input_right[-rs:]: return True # GEMM or GEMV transpose left if input_left[:rs] == input_right[:rs]: return True # Einsum is faster than GEMV if we have to copy data if not keep_left or not keep_right: return False # We are a matrix-matrix product, but we need to copy data return True def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. Returns ------- input_strings : str Parsed input strings output_string : str Parsed output string operands : list of array_like The operands to use in the numpy contraction Examples -------- The operand list is simplified to reduce printing: >>> np.random.seed(123) >>> a = np.random.rand(4, 4) >>> b = np.random.rand(4, 4, 4) >>> _parse_einsum_input(('...a,...a->...', a, b)) ('za,xza', 'xz', [a, b]) # may vary >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) ('za,xza', 'xz', [a, b]) # may vary """ if len(operands) == 0: raise ValueError("No input operands") if isinstance(operands[0], str): subscripts = operands[0].replace(" ", "") operands = [asanyarray(v) for v in operands[1:]] # Ensure all characters are valid for s in subscripts: if s in '.,->': continue if s not in einsum_symbols: raise ValueError("Character %s is not a valid symbol." % s) else: tmp_operands = list(operands) operand_list = [] subscript_list = [] for p in range(len(operands) // 2): operand_list.append(tmp_operands.pop(0)) subscript_list.append(tmp_operands.pop(0)) output_list = tmp_operands[-1] if len(tmp_operands) else None operands = [asanyarray(v) for v in operand_list] subscripts = "" last = len(subscript_list) - 1 for num, sub in enumerate(subscript_list): for s in sub: if s is Ellipsis: subscripts += "..." else: try: s = operator.index(s) except TypeError as e: raise TypeError("For this input type lists must contain " "either int or Ellipsis") from e subscripts += einsum_symbols[s] if num != last: subscripts += "," if output_list is not None: subscripts += "->" for s in output_list: if s is Ellipsis: subscripts += "..." else: try: s = operator.index(s) except TypeError as e: raise TypeError("For this input type lists must contain " "either int or Ellipsis") from e subscripts += einsum_symbols[s] # Check for proper "->" if ("-" in subscripts) or (">" in subscripts): invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) if invalid or (subscripts.count("->") != 1): raise ValueError("Subscripts can only contain one '->'.") # Parse ellipses if "." in subscripts: used = subscripts.replace(".", "").replace(",", "").replace("->", "") unused = list(einsum_symbols_set - set(used)) ellipse_inds = "".join(unused) longest = 0 if "->" in subscripts: input_tmp, output_sub = subscripts.split("->") split_subscripts = input_tmp.split(",") out_sub = True else: split_subscripts = subscripts.split(',') out_sub = False for num, sub in enumerate(split_subscripts): if "." in sub: if (sub.count(".") != 3) or (sub.count("...") != 1): raise ValueError("Invalid Ellipses.") # Take into account numerical values if operands[num].shape == (): ellipse_count = 0 else: ellipse_count = max(operands[num].ndim, 1) ellipse_count -= (len(sub) - 3) if ellipse_count > longest: longest = ellipse_count if ellipse_count < 0: raise ValueError("Ellipses lengths do not match.") elif ellipse_count == 0: split_subscripts[num] = sub.replace('...', '') else: rep_inds = ellipse_inds[-ellipse_count:] split_subscripts[num] = sub.replace('...', rep_inds) subscripts = ",".join(split_subscripts) if longest == 0: out_ellipse = "" else: out_ellipse = ellipse_inds[-longest:] if out_sub: subscripts += "->" + output_sub.replace("...", out_ellipse) else: # Special care for outputless ellipses output_subscript = "" tmp_subscripts = subscripts.replace(",", "") for s in sorted(set(tmp_subscripts)): if s not in (einsum_symbols): raise ValueError("Character %s is not a valid symbol." % s) if tmp_subscripts.count(s) == 1: output_subscript += s normal_inds = ''.join(sorted(set(output_subscript) - set(out_ellipse))) subscripts += "->" + out_ellipse + normal_inds # Build output string if does not exist if "->" in subscripts: input_subscripts, output_subscript = subscripts.split("->") else: input_subscripts = subscripts # Build output subscripts tmp_subscripts = subscripts.replace(",", "") output_subscript = "" for s in sorted(set(tmp_subscripts)): if s not in einsum_symbols: raise ValueError("Character %s is not a valid symbol." % s) if tmp_subscripts.count(s) == 1: output_subscript += s # Make sure output subscripts are in the input for char in output_subscript: if char not in input_subscripts: raise ValueError("Output character %s did not appear in the input" % char) # Make sure number operands is equivalent to the number of terms if len(input_subscripts.split(',')) != len(operands): raise ValueError("Number of einsum subscripts must be equal to the " "number of operands.") return (input_subscripts, output_subscript, operands) def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None): # NOTE: technically, we should only dispatch on array-like arguments, not # subscripts (given as strings). But separating operands into # arrays/subscripts is a little tricky/slow (given einsum's two supported # signatures), so as a practical shortcut we dispatch on everything. # Strings will be ignored for dispatching since they don't define # __array_function__. return operands @array_function_dispatch(_einsum_path_dispatcher, module='numpy') def einsum_path(*operands, optimize='greedy', einsum_call=False): """ einsum_path(subscripts, *operands, optimize='greedy') Evaluates the lowest cost contraction order for an einsum expression by considering the creation of intermediate arrays. Parameters ---------- subscripts : str Specifies the subscripts for summation. *operands : list of array_like These are the arrays for the operation. optimize : {bool, list, tuple, 'greedy', 'optimal'} Choose the type of path. If a tuple is provided, the second argument is assumed to be the maximum intermediate size created. If only a single argument is provided the largest input or output array size is used as a maximum intermediate size. * if a list is given that starts with ``einsum_path``, uses this as the contraction path * if False no optimization is taken * if True defaults to the 'greedy' algorithm * 'optimal' An algorithm that combinatorially explores all possible ways of contracting the listed tensors and choosest the least costly path. Scales exponentially with the number of terms in the contraction. * 'greedy' An algorithm that chooses the best pair contraction at each step. Effectively, this algorithm searches the largest inner, Hadamard, and then outer products at each step. Scales cubically with the number of terms in the contraction. Equivalent to the 'optimal' path for most contractions. Default is 'greedy'. Returns ------- path : list of tuples A list representation of the einsum path. string_repr : str A printable representation of the einsum path. Notes ----- The resulting path indicates which terms of the input contraction should be contracted first, the result of this contraction is then appended to the end of the contraction list. This list can then be iterated over until all intermediate contractions are complete. See Also -------- einsum, linalg.multi_dot Examples -------- We can begin with a chain dot example. In this case, it is optimal to contract the ``b`` and ``c`` tensors first as represented by the first element of the path ``(1, 2)``. The resulting tensor is added to the end of the contraction and the remaining contraction ``(0, 1)`` is then completed. >>> np.random.seed(123) >>> a = np.random.rand(2, 2) >>> b = np.random.rand(2, 5) >>> c = np.random.rand(5, 2) >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') >>> print(path_info[0]) ['einsum_path', (1, 2), (0, 1)] >>> print(path_info[1]) Complete contraction: ij,jk,kl->il # may vary Naive scaling: 4 Optimized scaling: 3 Naive FLOP count: 1.600e+02 Optimized FLOP count: 5.600e+01 Theoretical speedup: 2.857 Largest intermediate: 4.000e+00 elements ------------------------------------------------------------------------- scaling current remaining ------------------------------------------------------------------------- 3 kl,jk->jl ij,jl->il 3 jl,ij->il il->il A more complex index transformation example. >>> I = np.random.rand(10, 10, 10, 10) >>> C = np.random.rand(10, 10) >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, ... optimize='greedy') >>> print(path_info[0]) ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] >>> print(path_info[1]) Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary Naive scaling: 8 Optimized scaling: 5 Naive FLOP count: 8.000e+08 Optimized FLOP count: 8.000e+05 Theoretical speedup: 1000.000 Largest intermediate: 1.000e+04 elements -------------------------------------------------------------------------- scaling current remaining -------------------------------------------------------------------------- 5 abcd,ea->bcde fb,gc,hd,bcde->efgh 5 bcde,fb->cdef gc,hd,cdef->efgh 5 cdef,gc->defg hd,defg->efgh 5 defg,hd->efgh efgh->efgh """ # Figure out what the path really is path_type = optimize if path_type is True: path_type = 'greedy' if path_type is None: path_type = False memory_limit = None # No optimization or a named path algorithm if (path_type is False) or isinstance(path_type, str): pass # Given an explicit path elif len(path_type) and (path_type[0] == 'einsum_path'): pass # Path tuple with memory limit elif ((len(path_type) == 2) and isinstance(path_type[0], str) and isinstance(path_type[1], (int, float))): memory_limit = int(path_type[1]) path_type = path_type[0] else: raise TypeError("Did not understand the path: %s" % str(path_type)) # Hidden option, only einsum should call this einsum_call_arg = einsum_call # Python side parsing input_subscripts, output_subscript, operands = _parse_einsum_input(operands) # Build a few useful list and sets input_list = input_subscripts.split(',') input_sets = [set(x) for x in input_list] output_set = set(output_subscript) indices = set(input_subscripts.replace(',', '')) # Get length of each unique dimension and ensure all dimensions are correct dimension_dict = {} broadcast_indices = [[] for x in range(len(input_list))] for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): raise ValueError("Einstein sum subscript %s does not contain the " "correct number of indices for operand %d." % (input_subscripts[tnum], tnum)) for cnum, char in enumerate(term): dim = sh[cnum] # Build out broadcast indices if dim == 1: broadcast_indices[tnum].append(char) if char in dimension_dict.keys(): # For broadcasting cases we always want the largest dim size if dimension_dict[char] == 1: dimension_dict[char] = dim elif dim not in (1, dimension_dict[char]): raise ValueError("Size of label '%s' for operand %d (%d) " "does not match previous terms (%d)." % (char, tnum, dimension_dict[char], dim)) else: dimension_dict[char] = dim # Convert broadcast inds to sets broadcast_indices = [set(x) for x in broadcast_indices] # Compute size of each input array plus the output array size_list = [_compute_size_by_dict(term, dimension_dict) for term in input_list + [output_subscript]] max_size = max(size_list) if memory_limit is None: memory_arg = max_size else: memory_arg = memory_limit # Compute naive cost # This isn't quite right, need to look into exactly how einsum does this inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) # Compute the path if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set): # Nothing to be optimized, leave it to einsum path = [tuple(range(len(input_list)))] elif path_type == "greedy": path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) elif path_type == "optimal": path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) elif path_type[0] == 'einsum_path': path = path_type[1:] else: raise KeyError("Path name %s not found", path_type) cost_list, scale_list, size_list, contraction_list = [], [], [], [] # Build contraction tuple (positions, gemm, einsum_str, remaining) for cnum, contract_inds in enumerate(path): # Make sure we remove inds from right to left contract_inds = tuple(sorted(list(contract_inds), reverse=True)) contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) cost_list.append(cost) scale_list.append(len(idx_contract)) size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) bcast = set() tmp_inputs = [] for x in contract_inds: tmp_inputs.append(input_list.pop(x)) bcast |= broadcast_indices.pop(x) new_bcast_inds = bcast - idx_removed # If we're broadcasting, nix blas if not len(idx_removed & bcast): do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) else: do_blas = False # Last contraction if (cnum - len(path)) == -1: idx_result = output_subscript else: sort_result = [(dimension_dict[ind], ind) for ind in out_inds] idx_result = "".join([x[1] for x in sorted(sort_result)]) input_list.append(idx_result) broadcast_indices.append(new_bcast_inds) einsum_str = ",".join(tmp_inputs) + "->" + idx_result contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) contraction_list.append(contraction) opt_cost = sum(cost_list) + 1 if einsum_call_arg: return (operands, contraction_list) # Return the path along with a nice string representation overall_contraction = input_subscripts + "->" + output_subscript header = ("scaling", "current", "remaining") speedup = naive_cost / opt_cost max_i = max(size_list) path_print = " Complete contraction: %s\n" % overall_contraction path_print += " Naive scaling: %d\n" % len(indices) path_print += " Optimized scaling: %d\n" % max(scale_list) path_print += " Naive FLOP count: %.3e\n" % naive_cost path_print += " Optimized FLOP count: %.3e\n" % opt_cost path_print += " Theoretical speedup: %3.3f\n" % speedup path_print += " Largest intermediate: %.3e elements\n" % max_i path_print += "-" * 74 + "\n" path_print += "%6s %24s %40s\n" % header path_print += "-" * 74 for n, contraction in enumerate(contraction_list): inds, idx_rm, einsum_str, remaining, blas = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run path = ['einsum_path'] + path return (path, path_print) def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): # Arguably we dispatch on more arguments that we really should; see note in # _einsum_path_dispatcher for why. yield from operands yield out # Rewrite einsum to handle different cases @array_function_dispatch(_einsum_dispatcher, module='numpy') def einsum(*operands, out=None, optimize=False, **kwargs): """ einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe', optimize=False) Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : list of array_like These are the arrays for the operation. out : ndarray, optional If provided, the calculation is done into this array. dtype : {data-type, None}, optional If provided, forces the calculation to use the data type specified. Note that you may have to also give a more liberal `casting` parameter to allow the conversions. Default is None. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the output. 'C' means it should be C contiguous. 'F' means it should be Fortran contiguous, 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. 'K' means it should be as close to the layout as the inputs as is possible, including arbitrarily permuted axes. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Default is 'safe'. optimize : {False, True, 'greedy', 'optimal'}, optional Controls if intermediate optimization should occur. No optimization will occur if False and True will default to the 'greedy' algorithm. Also accepts an explicit contraction list from the ``np.einsum_path`` function. See ``np.einsum_path`` for more details. Defaults to False. Returns ------- output : ndarray The calculation based on the Einstein summation convention. See Also -------- einsum_path, dot, inner, outer, tensordot, linalg.multi_dot einops : similar verbose interface is provided by `einops <https://github.com/arogozhnikov/einops>`_ package to cover additional operations: transpose, reshape/flatten, repeat/tile, squeeze/unsqueeze and reductions. opt_einsum : `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_ optimizes contraction order for einsum-like expressions in backend-agnostic manner. Notes ----- .. versionadded:: 1.6.0 The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`numpy.trace`. * Return a diagonal, :py:func:`numpy.diag`. * Array axis summations, :py:func:`numpy.sum`. * Transpositions and permutations, :py:func:`numpy.transpose`. * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. * Tensor contractions, :py:func:`numpy.tensordot`. * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label appears only once, it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent to :py:func:`np.trace(a) <numpy.trace>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while ``np.einsum('ji', a)`` takes its transpose. Additionally, ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``np.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``np.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` produces a view (changed in version 1.10.0). `einsum` also provides an alternative way to provide the subscripts and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. If the output shape is not provided in this format `einsum` will be calculated in implicit mode, otherwise it will be performed explicitly. The examples below have corresponding `einsum` calls with the two parameter methods. .. versionadded:: 1.10.0 Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal of a 2D array. .. versionadded:: 1.12.0 Added the ``optimize`` argument which will optimize the contraction order of an einsum expression. For a contraction with three or more operands this can greatly increase the computational efficiency at the cost of a larger memory footprint during computation. Typically a 'greedy' algorithm is applied which empirical tests have shown returns the optimal path in the majority of cases. In some cases 'optimal' will return the superlative path through a more expensive, exhaustive search. For iterative calculations it may be advisable to calculate the optimal path once and reuse that path by supplying it as an argument. An example is given below. See :py:func:`numpy.einsum_path` for more details. Examples -------- >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) Trace of a matrix: >>> np.einsum('ii', a) 60 >>> np.einsum(a, [0,0]) 60 >>> np.trace(a) 60 Extract the diagonal (requires explicit form): >>> np.einsum('ii->i', a) array([ 0, 6, 12, 18, 24]) >>> np.einsum(a, [0,0], [0]) array([ 0, 6, 12, 18, 24]) >>> np.diag(a) array([ 0, 6, 12, 18, 24]) Sum over an axis (requires explicit form): >>> np.einsum('ij->i', a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [0,1], [0]) array([ 10, 35, 60, 85, 110]) >>> np.sum(a, axis=1) array([ 10, 35, 60, 85, 110]) For higher dimensional arrays summing a single axis can be done with ellipsis: >>> np.einsum('...j->...', a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) array([ 10, 35, 60, 85, 110]) Compute a matrix transpose, or reorder any number of axes: >>> np.einsum('ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum('ij->ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum(c, [1,0]) array([[0, 3], [1, 4], [2, 5]]) >>> np.transpose(c) array([[0, 3], [1, 4], [2, 5]]) Vector inner products: >>> np.einsum('i,i', b, b) 30 >>> np.einsum(b, [0], b, [0]) 30 >>> np.inner(b,b) 30 Matrix vector multiplication: >>> np.einsum('ij,j', a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum(a, [0,1], b, [1]) array([ 30, 80, 130, 180, 230]) >>> np.dot(a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum('...j,j', a, b) array([ 30, 80, 130, 180, 230]) Broadcasting and scalar multiplication: >>> np.einsum('..., ...', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(',ij', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.multiply(3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) Vector outer product: >>> np.einsum('i,j', np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.einsum(np.arange(2)+1, [0], b, [1]) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.outer(np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) Tensor contraction: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> np.einsum('ijk,jil->kl', a, b) array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> np.tensordot(a,b, axes=([1,0],[0,1])) array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) Writeable returned arrays (since version 1.10.0): >>> a = np.zeros((3, 3)) >>> np.einsum('ii->i', a)[:] = 1 >>> a array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) Example of ellipsis use: >>> a = np.arange(6).reshape((3,2)) >>> b = np.arange(12).reshape((4,3)) >>> np.einsum('ki,jk->ij', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('ki,...k->i...', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('k...,jk', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) Chained array operations. For more complicated contractions, speed ups might be achieved by repeatedly computing a 'greedy' path or pre-computing the 'optimal' path and repeatedly applying it, using an `einsum_path` insertion (since version 1.12.0). Performance improvements can be particularly significant with larger arrays: >>> a = np.ones(64).reshape(2,4,8) Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) >>> for iteration in range(500): ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) Sub-optimal `einsum` (due to repeated path calculation time): ~330ms >>> for iteration in range(500): ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') Greedy `einsum` (faster optimal path approximation): ~160ms >>> for iteration in range(500): ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') Optimal `einsum` (best usage pattern in some use cases): ~110ms >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] >>> for iteration in range(500): ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) """ # Special handling if out is specified specified_out = out is not None # If no optimization, run pure einsum if optimize is False: if specified_out: kwargs['out'] = out return c_einsum(*operands, **kwargs) # Check the kwargs to avoid a more cryptic error later, without having to # repeat default values here valid_einsum_kwargs = ['dtype', 'order', 'casting'] unknown_kwargs = [k for (k, v) in kwargs.items() if k not in valid_einsum_kwargs] if len(unknown_kwargs): raise TypeError("Did not understand the following kwargs: %s" % unknown_kwargs) # Build the contraction list and operand operands, contraction_list = einsum_path(*operands, optimize=optimize, einsum_call=True) # Handle order kwarg for output array, c_einsum allows mixed case output_order = kwargs.pop('order', 'K') if output_order.upper() == 'A': if all(arr.flags.f_contiguous for arr in operands): output_order = 'F' else: output_order = 'C' # Start contraction loop for num, contraction in enumerate(contraction_list): inds, idx_rm, einsum_str, remaining, blas = contraction tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) # Call tensordot if still possible if blas: # Checks have already been handled input_str, results_index = einsum_str.split('->') input_left, input_right = input_str.split(',') tensor_result = input_left + input_right for s in idx_rm: tensor_result = tensor_result.replace(s, "") # Find indices to contract over left_pos, right_pos = [], [] for s in sorted(idx_rm): left_pos.append(input_left.find(s)) right_pos.append(input_right.find(s)) # Contract! new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) # Build a new view if needed if (tensor_result != results_index) or handle_out: if handle_out: kwargs["out"] = out new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs) # Call einsum else: # If out was specified if handle_out: kwargs["out"] = out # Do the contraction new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) # Append new items and dereference what we can operands.append(new_view) del tmp_operands, new_view if specified_out: return out else: return asanyarray(operands[0], order=output_order)
import platform import pytest from numpy import array from numpy.testing import assert_, assert_raises from . import util class TestReturnReal(util.F2PyTest): def check_function(self, t, tname): if tname in ['t0', 't4', 's0', 's4']: err = 1e-5 else: err = 0.0 assert_(abs(t(234) - 234.0) <= err) assert_(abs(t(234.6) - 234.6) <= err) assert_(abs(t('234') - 234) <= err) assert_(abs(t('234.6') - 234.6) <= err) assert_(abs(t(-234) + 234) <= err) assert_(abs(t([234]) - 234) <= err) assert_(abs(t((234,)) - 234.) <= err) assert_(abs(t(array(234)) - 234.) <= err) assert_(abs(t(array([234])) - 234.) <= err) assert_(abs(t(array([[234]])) - 234.) <= err) assert_(abs(t(array([234], 'b')) + 22) <= err) assert_(abs(t(array([234], 'h')) - 234.) <= err) assert_(abs(t(array([234], 'i')) - 234.) <= err) assert_(abs(t(array([234], 'l')) - 234.) <= err) assert_(abs(t(array([234], 'B')) - 234.) <= err) assert_(abs(t(array([234], 'f')) - 234.) <= err) assert_(abs(t(array([234], 'd')) - 234.) <= err) if tname in ['t0', 't4', 's0', 's4']: assert_(t(1e200) == t(1e300)) # inf #assert_raises(ValueError, t, array([234], 'S1')) assert_raises(ValueError, t, 'abc') assert_raises(IndexError, t, []) assert_raises(IndexError, t, ()) assert_raises(Exception, t, t) assert_raises(Exception, t, {}) try: r = t(10 ** 400) assert_(repr(r) in ['inf', 'Infinity'], repr(r)) except OverflowError: pass @pytest.mark.skipif( platform.system() == 'Darwin', reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation") class TestCReturnReal(TestReturnReal): suffix = ".pyf" module_name = "c_ext_return_real" code = """ python module c_ext_return_real usercode \'\'\' float t4(float value) { return value; } void s4(float *t4, float value) { *t4 = value; } double t8(double value) { return value; } void s8(double *t8, double value) { *t8 = value; } \'\'\' interface function t4(value) real*4 intent(c) :: t4,value end function t8(value) real*8 intent(c) :: t8,value end subroutine s4(t4,value) intent(c) s4 real*4 intent(out) :: t4 real*4 intent(c) :: value end subroutine s8(t8,value) intent(c) s8 real*8 intent(out) :: t8 real*8 intent(c) :: value end end interface end python module c_ext_return_real """ @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF77ReturnReal(TestReturnReal): code = """ function t0(value) real value real t0 t0 = value end function t4(value) real*4 value real*4 t4 t4 = value end function t8(value) real*8 value real*8 t8 t8 = value end function td(value) double precision value double precision td td = value end subroutine s0(t0,value) real value real t0 cf2py intent(out) t0 t0 = value end subroutine s4(t4,value) real*4 value real*4 t4 cf2py intent(out) t4 t4 = value end subroutine s8(t8,value) real*8 value real*8 t8 cf2py intent(out) t8 t8 = value end subroutine sd(td,value) double precision value double precision td cf2py intent(out) td td = value end """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestF90ReturnReal(TestReturnReal): suffix = ".f90" code = """ module f90_return_real contains function t0(value) real :: value real :: t0 t0 = value end function t0 function t4(value) real(kind=4) :: value real(kind=4) :: t4 t4 = value end function t4 function t8(value) real(kind=8) :: value real(kind=8) :: t8 t8 = value end function t8 function td(value) double precision :: value double precision :: td td = value end function td subroutine s0(t0,value) real :: value real :: t0 !f2py intent(out) t0 t0 = value end subroutine s0 subroutine s4(t4,value) real(kind=4) :: value real(kind=4) :: t4 !f2py intent(out) t4 t4 = value end subroutine s4 subroutine s8(t8,value) real(kind=8) :: value real(kind=8) :: t8 !f2py intent(out) t8 t8 = value end subroutine s8 subroutine sd(td,value) double precision :: value double precision :: td !f2py intent(out) td td = value end subroutine sd end module f90_return_real """ @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) def test_all(self, name): self.check_function(getattr(self.module.f90_return_real, name), name)
charris/numpy
numpy/f2py/tests/test_return_real.py
numpy/core/einsumfunc.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO import numpy as np from astropy.io import ascii from .common import assert_equal def test_types_from_dat(): converters = {'a': [ascii.convert_numpy(float)], 'e': [ascii.convert_numpy(str)]} dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'], Reader=ascii.Basic, converters=converters) assert dat['a'].dtype.kind == 'f' assert dat['b'].dtype.kind == 'i' assert dat['c'].dtype.kind in ('S', 'U') assert dat['d'].dtype.kind == 'f' assert dat['e'].dtype.kind in ('S', 'U') def test_rdb_write_types(): dat = ascii.read(['a b c d', '1 1.0 cat 2.1'], Reader=ascii.Basic) out = StringIO() ascii.write(dat, out, Writer=ascii.Rdb) outs = out.getvalue().splitlines() assert_equal(outs[1], 'N\tN\tS\tN') def test_ipac_read_types(): table = r"""\ | ra | dec | sai |-----v2---| sptype | | real | float | l | real | char | | unit | unit | unit | unit | ergs | | null | null | null | null | -999 | 2.09708 2956 73765 2.06000 B8IVpMnHg """ reader = ascii.get_reader(Reader=ascii.Ipac) reader.read(table) types = [ascii.FloatType, ascii.FloatType, ascii.IntType, ascii.FloatType, ascii.StrType] for (col, expected_type) in zip(reader.cols, types): assert_equal(col.type, expected_type) def test_col_dtype_in_custom_class(): """Test code in BaseOutputter._convert_vals to handle Column.dtype attribute. See discussion in #11895.""" dtypes = [np.float32, np.int8, np.int16] class TestDtypeHeader(ascii.BasicHeader): def get_cols(self, lines): super().get_cols(lines) for col, dtype in zip(self.cols, dtypes): col.dtype = dtype class TestDtype(ascii.Basic): """ Basic table Data Reader with data type alternating float32, int8 """ header_class = TestDtypeHeader txt = """ a b c 1 2 3 """ reader = ascii.get_reader(TestDtype) t = reader.read(txt) for col, dtype in zip(t.itercols(), dtypes): assert col.dtype.type is dtype
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to the ``HTML`` reader/writer and aims to document its functionality. Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_ to be installed. """ from io import StringIO from astropy.io.ascii import html from astropy.io.ascii import core from astropy.table import Table import pytest import numpy as np from .common import setup_function, teardown_function # noqa from astropy.io import ascii from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa if HAS_BS4: from bs4 import BeautifulSoup, FeatureNotFound @pytest.mark.skipif('not HAS_BS4') def test_soupstring(): """ Test to make sure the class SoupString behaves properly. """ soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>', 'html.parser') soup_str = html.SoupString(soup) assert isinstance(soup_str, str) assert isinstance(soup_str, html.SoupString) assert soup_str == '<html><head></head><body><p>foo</p></body></html>' assert soup_str.soup is soup def test_listwriter(): """ Test to make sure the class ListWriter behaves properly. """ lst = [] writer = html.ListWriter(lst) for i in range(5): writer.write(i) for ch in 'abcde': writer.write(ch) assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e'] @pytest.mark.skipif('not HAS_BS4') def test_identify_table(): """ Test to make sure that identify_table() returns whether the given BeautifulSoup tag is the correct table to process. """ # Should return False on non-<table> tags and None soup = BeautifulSoup('<html><body></body></html>', 'html.parser') assert html.identify_table(soup, {}, 0) is False assert html.identify_table(None, {}, 0) is False soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>' '<td>B</td></tr></table>', 'html.parser').table assert html.identify_table(soup, {}, 2) is False assert html.identify_table(soup, {}, 1) is True # Default index of 1 # Same tests, but with explicit parameter assert html.identify_table(soup, {'table_id': 2}, 1) is False assert html.identify_table(soup, {'table_id': 1}, 1) is True # Test identification by string ID assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True @pytest.mark.skipif('not HAS_BS4') def test_missing_data(): """ Test reading a table with missing data """ # First with default where blank => '0' table_in = ['<table>', '<tr><th>A</th></tr>', '<tr><td></td></tr>', '<tr><td>1</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert dat.masked is False assert np.all(dat['A'].mask == [True, False]) assert dat['A'].dtype.kind == 'i' # Now with a specific value '...' => missing table_in = ['<table>', '<tr><th>A</th></tr>', '<tr><td>...</td></tr>', '<tr><td>1</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')]) assert dat.masked is False assert np.all(dat['A'].mask == [True, False]) assert dat['A'].dtype.kind == 'i' @pytest.mark.skipif('not HAS_BS4') def test_rename_cols(): """ Test reading a table and renaming cols """ table_in = ['<table>', '<tr><th>A</th> <th>B</th></tr>', '<tr><td>1</td><td>2</td></tr>', '</table>'] # Swap column names dat = Table.read(table_in, format='ascii.html', names=['B', 'A']) assert dat.colnames == ['B', 'A'] assert len(dat) == 1 # Swap column names and only include A (the renamed version) dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A']) assert dat.colnames == ['A'] assert len(dat) == 1 assert np.all(dat['A'] == 2) @pytest.mark.skipif('not HAS_BS4') def test_no_names(): """ Test reading a table witn no column header """ table_in = ['<table>', '<tr><td>1</td></tr>', '<tr><td>2</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert dat.colnames == ['col1'] assert len(dat) == 2 dat = Table.read(table_in, format='ascii.html', names=['a']) assert dat.colnames == ['a'] assert len(dat) == 2 @pytest.mark.skipif('not HAS_BS4') def test_identify_table_fail(): """ Raise an exception with an informative error message if table_id is not found. """ table_in = ['<table id="foo"><tr><th>A</th></tr>', '<tr><td>B</td></tr></table>'] with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'}, guess=False) assert err.match("ERROR: HTML table id 'bad_id' not found$") with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 3}, guess=False) assert err.match("ERROR: HTML table number 3 not found$") @pytest.mark.skipif('not HAS_BS4') def test_backend_parsers(): """ Make sure the user can specify which back-end parser to use and that an error is raised if the parser is invalid. """ for parser in ('lxml', 'xml', 'html.parser', 'html5lib'): try: Table.read('data/html2.html', format='ascii.html', htmldict={'parser': parser}, guess=False) except FeatureNotFound: if parser == 'html.parser': raise # otherwise ignore if the dependency isn't present # reading should fail if the parser is invalid with pytest.raises(FeatureNotFound): Table.read('data/html2.html', format='ascii.html', htmldict={'parser': 'foo'}, guess=False) @pytest.mark.skipif('HAS_BS4') def test_htmlinputter_no_bs4(): """ This should return an OptionalTableImportError if BeautifulSoup is not installed. """ inputter = html.HTMLInputter() with pytest.raises(core.OptionalTableImportError): inputter.process_lines([]) @pytest.mark.skipif('not HAS_BS4') def test_htmlinputter(): """ Test to ensure that HTMLInputter correctly converts input into a list of SoupStrings representing table elements. """ f = 'data/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} # In absence of table_id, defaults to the first table expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>', '<tr><td>1</td><td>a</td><td>1.05</td></tr>', '<tr><td>2</td><td>b</td><td>2.75</td></tr>', '<tr><td>3</td><td>c</td><td>-1.25</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected # Should raise an InconsistentTableError if the table is not found inputter.html = {'table_id': 4} with pytest.raises(core.InconsistentTableError): inputter.get_lines(table) # Identification by string ID inputter.html['table_id'] = 'second' expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>', '<tr><td>4</td><td>d</td><td>10.5</td></tr>', '<tr><td>5</td><td>e</td><td>27.5</td></tr>', '<tr><td>6</td><td>f</td><td>-12.5</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected # Identification by integer index inputter.html['table_id'] = 3 expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>', '<tr><td>7</td><td>g</td><td>105.0</td></tr>', '<tr><td>8</td><td>h</td><td>275.0</td></tr>', '<tr><td>9</td><td>i</td><td>-125.0</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected @pytest.mark.skipif('not HAS_BS4') def test_htmlsplitter(): """ Test to make sure that HTMLSplitter correctly inputs lines of type SoupString to return a generator that gives all header and data elements. """ splitter = html.HTMLSplitter() lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>', 'html.parser').tr), html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>', 'html.parser').tr)] expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']] assert list(splitter(lines)) == expected_data # Make sure the presence of a non-SoupString triggers a TypeError lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>') with pytest.raises(TypeError): list(splitter(lines)) # Make sure that passing an empty list triggers an error with pytest.raises(core.InconsistentTableError): list(splitter([])) @pytest.mark.skipif('not HAS_BS4') def test_htmlheader_start(): """ Test to ensure that the start_line method of HTMLHeader returns the first line of header data. Uses t/html.html for sample input. """ f = 'data/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} header = html.HTMLHeader() lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>' inputter.html['table_id'] = 'second' lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>' inputter.html['table_id'] = 3 lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>C1</th><th>C2</th><th>C3</th></tr>' # start_line should return None if no valid header is found lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>', 'html.parser').tr), html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)] assert header.start_line(lines) is None # Should raise an error if a non-SoupString is present lines.append('<tr><th>Header</th></tr>') with pytest.raises(TypeError): header.start_line(lines) @pytest.mark.skipif('not HAS_BS4') def test_htmldata(): """ Test to ensure that the start_line and end_lines methods of HTMLData returns the first line of table data. Uses t/html.html for sample input. """ f = 'data/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} data = html.HTMLData() lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>1</td><td>a</td><td>1.05</td></tr>' # end_line returns the index of the last data element + 1 assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>3</td><td>c</td><td>-1.25</td></tr>' inputter.html['table_id'] = 'second' lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>4</td><td>d</td><td>10.5</td></tr>' assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>6</td><td>f</td><td>-12.5</td></tr>' inputter.html['table_id'] = 3 lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>7</td><td>g</td><td>105.0</td></tr>' assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>9</td><td>i</td><td>-125.0</td></tr>' # start_line should raise an error if no table data exists lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div), html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)] with pytest.raises(core.InconsistentTableError): data.start_line(lines) # end_line should return None if no table data exists assert data.end_line(lines) is None # Should raise an error if a non-SoupString is present lines.append('<tr><td>Data</td></tr>') with pytest.raises(TypeError): data.start_line(lines) with pytest.raises(TypeError): data.end_line(lines) def test_multicolumn_write(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td>a</td> <td>a</td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td>b</td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML().write(table)[0].strip() assert out == expected.strip() @pytest.mark.skipif('not HAS_BLEACH') def test_multicolumn_write_escape(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td><a></a></td> <td><a></a></td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td><b></b></td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip() assert out == expected.strip() def test_write_no_multicols(): """ Test to make sure that the HTML writer will not use multi-dimensional columns if the multicol parameter is False. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th>C2</th> <th>C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0 .. 1.0</td> <td>a .. a</td> </tr> <tr> <td>2</td> <td>2.0 .. 2.0</td> <td>b .. b</td> </tr> <tr> <td>3</td> <td>3.0 .. 3.0</td> <td>c .. c</td> </tr> </table> </body> </html> """ assert html.HTML({'multicol': False}).write(table)[0].strip() == \ expected.strip() @pytest.mark.skipif('not HAS_BS4') def test_multicolumn_read(): """ Test to make sure that the HTML reader inputs multidimensional columns (those with iterable elements) using the colspan attribute of <th>. Ensure that any string element within a multidimensional column casts all elements to string prior to type conversion operations. """ table = Table.read('data/html2.html', format='ascii.html') str_type = np.dtype((str, 21)) expected = Table(np.array([(['1', '2.5000000000000000001'], 3), (['1a', '1'], 3.5)], dtype=[('A', str_type, (2,)), ('B', '<f8')])) assert np.all(table == expected) @pytest.mark.skipif('not HAS_BLEACH') def test_raw_html_write(): """ Test that columns can contain raw HTML which is not escaped. """ t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b']) # One column contains raw HTML (string input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'}) expected = """\ <tr> <td><em>x</em></td> <td>&lt;em&gt;y&lt;/em&gt;</td> </tr>""" assert expected in out.getvalue() # One column contains raw HTML (list input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']}) assert expected in out.getvalue() # Two columns contains raw HTML (list input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']}) expected = """\ <tr> <td><em>x</em></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() @pytest.mark.skipif('not HAS_BLEACH') def test_raw_html_write_clean(): """ Test that columns can contain raw HTML which is not escaped. """ import bleach # noqa t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c']) # Confirm that <script> and <p> get escaped but not <em> out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td>&lt;p&gt;y&lt;/p&gt;</td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() # Confirm that we can whitelist <p> out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames, 'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td><p>y</p></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() def test_write_table_html_fill_values(): """ Test that passing fill_values should replace any matching row """ buffer_output = StringIO() t = Table([[1], [2]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world'), format='html') t_expected = Table([['Hello world'], [2]], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_optional_columns(): """ Test that passing optional column in fill_values should only replace matching columns """ buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'), format='html') t_expected = Table([[1], ['Hello world']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values """ buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8')) t['a'] = np.ma.masked ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'), format='html') t_expected = Table([['TEST'], [1]], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multicolumn_table_html_fill_values(): """ Test to make sure that the HTML writer writes multidimensional columns with correctly replaced fill_values. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] buffer_output = StringIO() t = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) ascii.write(t, buffer_output, fill_values=('a', 'z'), format='html') col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')] buffer_expected = StringIO() t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multi_column_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values for multidimensional tables """ buffer_output = StringIO() t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True) t['a'][0:2] = np.ma.masked t['b'][0:2] = np.ma.masked ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')], format='html') t_expected = Table([['MASKED', 'MASKED', 3, 4], [ 'MASKED', 'MASKED', '--', 'b']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') print(buffer_expected.getvalue()) assert buffer_output.getvalue() == buffer_expected.getvalue() @pytest.mark.skipif('not HAS_BS4') def test_read_html_unicode(): """ Test reading an HTML table with unicode values """ table_in = ['<table>', '<tr><td>&#x0394;</td></tr>', '<tr><td>Δ</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert np.all(dat['col1'] == ['Δ', 'Δ'])
astropy/astropy
astropy/io/ascii/tests/test_html.py
astropy/io/ascii/tests/test_types.py
# Licensed under a 3-clause BSD style license - see PYFITS.rst import os import shutil import stat import tempfile import time from astropy.io import fits class FitsTestCase: def setup(self): self.data_dir = os.path.join(os.path.dirname(__file__), 'data') self.temp_dir = tempfile.mkdtemp(prefix='fits-test-') # Restore global settings to defaults # TODO: Replace this when there's a better way to in the config API to # force config values to their defaults fits.conf.enable_record_valued_keyword_cards = True fits.conf.extension_name_case_sensitive = False fits.conf.strip_header_whitespace = True fits.conf.use_memmap = True def teardown(self): if hasattr(self, 'temp_dir') and os.path.exists(self.temp_dir): tries = 3 while tries: try: shutil.rmtree(self.temp_dir) break except OSError: # Probably couldn't delete the file because for whatever # reason a handle to it is still open/hasn't been # garbage-collected time.sleep(0.5) tries -= 1 fits.conf.reset('enable_record_valued_keyword_cards') fits.conf.reset('extension_name_case_sensitive') fits.conf.reset('strip_header_whitespace') fits.conf.reset('use_memmap') def copy_file(self, filename): """Copies a backup of a test data file to the temp dir and sets its mode to writeable. """ shutil.copy(self.data(filename), self.temp(filename)) os.chmod(self.temp(filename), stat.S_IREAD | stat.S_IWRITE) def data(self, filename): """Returns the path to a test data file.""" return os.path.join(self.data_dir, filename) def temp(self, filename): """ Returns the full path to a file in the test temp dir.""" return os.path.join(self.temp_dir, filename)
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to the ``HTML`` reader/writer and aims to document its functionality. Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_ to be installed. """ from io import StringIO from astropy.io.ascii import html from astropy.io.ascii import core from astropy.table import Table import pytest import numpy as np from .common import setup_function, teardown_function # noqa from astropy.io import ascii from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa if HAS_BS4: from bs4 import BeautifulSoup, FeatureNotFound @pytest.mark.skipif('not HAS_BS4') def test_soupstring(): """ Test to make sure the class SoupString behaves properly. """ soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>', 'html.parser') soup_str = html.SoupString(soup) assert isinstance(soup_str, str) assert isinstance(soup_str, html.SoupString) assert soup_str == '<html><head></head><body><p>foo</p></body></html>' assert soup_str.soup is soup def test_listwriter(): """ Test to make sure the class ListWriter behaves properly. """ lst = [] writer = html.ListWriter(lst) for i in range(5): writer.write(i) for ch in 'abcde': writer.write(ch) assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e'] @pytest.mark.skipif('not HAS_BS4') def test_identify_table(): """ Test to make sure that identify_table() returns whether the given BeautifulSoup tag is the correct table to process. """ # Should return False on non-<table> tags and None soup = BeautifulSoup('<html><body></body></html>', 'html.parser') assert html.identify_table(soup, {}, 0) is False assert html.identify_table(None, {}, 0) is False soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>' '<td>B</td></tr></table>', 'html.parser').table assert html.identify_table(soup, {}, 2) is False assert html.identify_table(soup, {}, 1) is True # Default index of 1 # Same tests, but with explicit parameter assert html.identify_table(soup, {'table_id': 2}, 1) is False assert html.identify_table(soup, {'table_id': 1}, 1) is True # Test identification by string ID assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True @pytest.mark.skipif('not HAS_BS4') def test_missing_data(): """ Test reading a table with missing data """ # First with default where blank => '0' table_in = ['<table>', '<tr><th>A</th></tr>', '<tr><td></td></tr>', '<tr><td>1</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert dat.masked is False assert np.all(dat['A'].mask == [True, False]) assert dat['A'].dtype.kind == 'i' # Now with a specific value '...' => missing table_in = ['<table>', '<tr><th>A</th></tr>', '<tr><td>...</td></tr>', '<tr><td>1</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')]) assert dat.masked is False assert np.all(dat['A'].mask == [True, False]) assert dat['A'].dtype.kind == 'i' @pytest.mark.skipif('not HAS_BS4') def test_rename_cols(): """ Test reading a table and renaming cols """ table_in = ['<table>', '<tr><th>A</th> <th>B</th></tr>', '<tr><td>1</td><td>2</td></tr>', '</table>'] # Swap column names dat = Table.read(table_in, format='ascii.html', names=['B', 'A']) assert dat.colnames == ['B', 'A'] assert len(dat) == 1 # Swap column names and only include A (the renamed version) dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A']) assert dat.colnames == ['A'] assert len(dat) == 1 assert np.all(dat['A'] == 2) @pytest.mark.skipif('not HAS_BS4') def test_no_names(): """ Test reading a table witn no column header """ table_in = ['<table>', '<tr><td>1</td></tr>', '<tr><td>2</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert dat.colnames == ['col1'] assert len(dat) == 2 dat = Table.read(table_in, format='ascii.html', names=['a']) assert dat.colnames == ['a'] assert len(dat) == 2 @pytest.mark.skipif('not HAS_BS4') def test_identify_table_fail(): """ Raise an exception with an informative error message if table_id is not found. """ table_in = ['<table id="foo"><tr><th>A</th></tr>', '<tr><td>B</td></tr></table>'] with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'}, guess=False) assert err.match("ERROR: HTML table id 'bad_id' not found$") with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 3}, guess=False) assert err.match("ERROR: HTML table number 3 not found$") @pytest.mark.skipif('not HAS_BS4') def test_backend_parsers(): """ Make sure the user can specify which back-end parser to use and that an error is raised if the parser is invalid. """ for parser in ('lxml', 'xml', 'html.parser', 'html5lib'): try: Table.read('data/html2.html', format='ascii.html', htmldict={'parser': parser}, guess=False) except FeatureNotFound: if parser == 'html.parser': raise # otherwise ignore if the dependency isn't present # reading should fail if the parser is invalid with pytest.raises(FeatureNotFound): Table.read('data/html2.html', format='ascii.html', htmldict={'parser': 'foo'}, guess=False) @pytest.mark.skipif('HAS_BS4') def test_htmlinputter_no_bs4(): """ This should return an OptionalTableImportError if BeautifulSoup is not installed. """ inputter = html.HTMLInputter() with pytest.raises(core.OptionalTableImportError): inputter.process_lines([]) @pytest.mark.skipif('not HAS_BS4') def test_htmlinputter(): """ Test to ensure that HTMLInputter correctly converts input into a list of SoupStrings representing table elements. """ f = 'data/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} # In absence of table_id, defaults to the first table expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>', '<tr><td>1</td><td>a</td><td>1.05</td></tr>', '<tr><td>2</td><td>b</td><td>2.75</td></tr>', '<tr><td>3</td><td>c</td><td>-1.25</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected # Should raise an InconsistentTableError if the table is not found inputter.html = {'table_id': 4} with pytest.raises(core.InconsistentTableError): inputter.get_lines(table) # Identification by string ID inputter.html['table_id'] = 'second' expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>', '<tr><td>4</td><td>d</td><td>10.5</td></tr>', '<tr><td>5</td><td>e</td><td>27.5</td></tr>', '<tr><td>6</td><td>f</td><td>-12.5</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected # Identification by integer index inputter.html['table_id'] = 3 expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>', '<tr><td>7</td><td>g</td><td>105.0</td></tr>', '<tr><td>8</td><td>h</td><td>275.0</td></tr>', '<tr><td>9</td><td>i</td><td>-125.0</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected @pytest.mark.skipif('not HAS_BS4') def test_htmlsplitter(): """ Test to make sure that HTMLSplitter correctly inputs lines of type SoupString to return a generator that gives all header and data elements. """ splitter = html.HTMLSplitter() lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>', 'html.parser').tr), html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>', 'html.parser').tr)] expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']] assert list(splitter(lines)) == expected_data # Make sure the presence of a non-SoupString triggers a TypeError lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>') with pytest.raises(TypeError): list(splitter(lines)) # Make sure that passing an empty list triggers an error with pytest.raises(core.InconsistentTableError): list(splitter([])) @pytest.mark.skipif('not HAS_BS4') def test_htmlheader_start(): """ Test to ensure that the start_line method of HTMLHeader returns the first line of header data. Uses t/html.html for sample input. """ f = 'data/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} header = html.HTMLHeader() lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>' inputter.html['table_id'] = 'second' lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>' inputter.html['table_id'] = 3 lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>C1</th><th>C2</th><th>C3</th></tr>' # start_line should return None if no valid header is found lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>', 'html.parser').tr), html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)] assert header.start_line(lines) is None # Should raise an error if a non-SoupString is present lines.append('<tr><th>Header</th></tr>') with pytest.raises(TypeError): header.start_line(lines) @pytest.mark.skipif('not HAS_BS4') def test_htmldata(): """ Test to ensure that the start_line and end_lines methods of HTMLData returns the first line of table data. Uses t/html.html for sample input. """ f = 'data/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} data = html.HTMLData() lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>1</td><td>a</td><td>1.05</td></tr>' # end_line returns the index of the last data element + 1 assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>3</td><td>c</td><td>-1.25</td></tr>' inputter.html['table_id'] = 'second' lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>4</td><td>d</td><td>10.5</td></tr>' assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>6</td><td>f</td><td>-12.5</td></tr>' inputter.html['table_id'] = 3 lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>7</td><td>g</td><td>105.0</td></tr>' assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>9</td><td>i</td><td>-125.0</td></tr>' # start_line should raise an error if no table data exists lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div), html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)] with pytest.raises(core.InconsistentTableError): data.start_line(lines) # end_line should return None if no table data exists assert data.end_line(lines) is None # Should raise an error if a non-SoupString is present lines.append('<tr><td>Data</td></tr>') with pytest.raises(TypeError): data.start_line(lines) with pytest.raises(TypeError): data.end_line(lines) def test_multicolumn_write(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td>a</td> <td>a</td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td>b</td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML().write(table)[0].strip() assert out == expected.strip() @pytest.mark.skipif('not HAS_BLEACH') def test_multicolumn_write_escape(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td><a></a></td> <td><a></a></td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td><b></b></td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip() assert out == expected.strip() def test_write_no_multicols(): """ Test to make sure that the HTML writer will not use multi-dimensional columns if the multicol parameter is False. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th>C2</th> <th>C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0 .. 1.0</td> <td>a .. a</td> </tr> <tr> <td>2</td> <td>2.0 .. 2.0</td> <td>b .. b</td> </tr> <tr> <td>3</td> <td>3.0 .. 3.0</td> <td>c .. c</td> </tr> </table> </body> </html> """ assert html.HTML({'multicol': False}).write(table)[0].strip() == \ expected.strip() @pytest.mark.skipif('not HAS_BS4') def test_multicolumn_read(): """ Test to make sure that the HTML reader inputs multidimensional columns (those with iterable elements) using the colspan attribute of <th>. Ensure that any string element within a multidimensional column casts all elements to string prior to type conversion operations. """ table = Table.read('data/html2.html', format='ascii.html') str_type = np.dtype((str, 21)) expected = Table(np.array([(['1', '2.5000000000000000001'], 3), (['1a', '1'], 3.5)], dtype=[('A', str_type, (2,)), ('B', '<f8')])) assert np.all(table == expected) @pytest.mark.skipif('not HAS_BLEACH') def test_raw_html_write(): """ Test that columns can contain raw HTML which is not escaped. """ t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b']) # One column contains raw HTML (string input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'}) expected = """\ <tr> <td><em>x</em></td> <td>&lt;em&gt;y&lt;/em&gt;</td> </tr>""" assert expected in out.getvalue() # One column contains raw HTML (list input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']}) assert expected in out.getvalue() # Two columns contains raw HTML (list input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']}) expected = """\ <tr> <td><em>x</em></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() @pytest.mark.skipif('not HAS_BLEACH') def test_raw_html_write_clean(): """ Test that columns can contain raw HTML which is not escaped. """ import bleach # noqa t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c']) # Confirm that <script> and <p> get escaped but not <em> out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td>&lt;p&gt;y&lt;/p&gt;</td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() # Confirm that we can whitelist <p> out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames, 'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td><p>y</p></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() def test_write_table_html_fill_values(): """ Test that passing fill_values should replace any matching row """ buffer_output = StringIO() t = Table([[1], [2]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world'), format='html') t_expected = Table([['Hello world'], [2]], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_optional_columns(): """ Test that passing optional column in fill_values should only replace matching columns """ buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'), format='html') t_expected = Table([[1], ['Hello world']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values """ buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8')) t['a'] = np.ma.masked ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'), format='html') t_expected = Table([['TEST'], [1]], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multicolumn_table_html_fill_values(): """ Test to make sure that the HTML writer writes multidimensional columns with correctly replaced fill_values. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] buffer_output = StringIO() t = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) ascii.write(t, buffer_output, fill_values=('a', 'z'), format='html') col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')] buffer_expected = StringIO() t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multi_column_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values for multidimensional tables """ buffer_output = StringIO() t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True) t['a'][0:2] = np.ma.masked t['b'][0:2] = np.ma.masked ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')], format='html') t_expected = Table([['MASKED', 'MASKED', 3, 4], [ 'MASKED', 'MASKED', '--', 'b']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') print(buffer_expected.getvalue()) assert buffer_output.getvalue() == buffer_expected.getvalue() @pytest.mark.skipif('not HAS_BS4') def test_read_html_unicode(): """ Test reading an HTML table with unicode values """ table_in = ['<table>', '<tr><td>&#x0394;</td></tr>', '<tr><td>Δ</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert np.all(dat['col1'] == ['Δ', 'Δ'])
astropy/astropy
astropy/io/ascii/tests/test_html.py
astropy/io/fits/tests/__init__.py
from pandas.compat import StringIO from pandas import read_sas import pandas.util.testing as tm class TestSas(object): def test_sas_buffer_format(self): # see gh-14947 b = StringIO("") msg = ("If this is a buffer object rather than a string " "name, you must specify a format string") with tm.assert_raises_regex(ValueError, msg): read_sas(b)
# -*- coding: utf-8 -*- # pylint: disable-msg=E1101,W0612 from operator import methodcaller import pytest import numpy as np import pandas as pd from distutils.version import LooseVersion from pandas import Series, date_range, MultiIndex from pandas.compat import range from pandas.util.testing import (assert_series_equal, assert_almost_equal) import pandas.util.testing as tm import pandas.util._test_decorators as td from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestSeries(Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x, y) def setup_method(self): self.ts = tm.makeTimeSeries() # Was at top level in test_series self.ts.name = 'ts' self.series = tm.makeStringSeries() self.series.name = 'series' def test_rename_mi(self): s = Series([11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]])) s.rename(str.lower) def test_set_axis_name(self): s = Series([1, 2, 3], index=['a', 'b', 'c']) funcs = ['rename_axis', '_set_axis_name'] name = 'foo' for func in funcs: result = methodcaller(func, name)(s) assert s.index.name is None assert result.index.name == name def test_set_axis_name_mi(self): s = Series([11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]], names=['l1', 'l2']) ) funcs = ['rename_axis', '_set_axis_name'] for func in funcs: result = methodcaller(func, ['L1', 'L2'])(s) assert s.index.name is None assert s.index.names == ['l1', 'l2'] assert result.index.name is None assert result.index.names, ['L1', 'L2'] def test_set_axis_name_raises(self): s = pd.Series([1]) with pytest.raises(ValueError): s._set_axis_name(name='a', axis=1) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = Series([1, 2, 3]) result = o._get_numeric_data() self._compare(result, o) o = Series([1, '2', 3.]) result = o._get_numeric_data() expected = Series([], dtype=object, index=pd.Index([], dtype=object)) self._compare(result, expected) o = Series([True, False, True]) result = o._get_numeric_data() self._compare(result, o) o = Series([True, False, True]) result = o._get_bool_data() self._compare(result, o) o = Series(date_range('20130101', periods=3)) result = o._get_numeric_data() expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object)) self._compare(result, expected) def test_nonzero_single_element(self): # allow single item via bool method s = Series([True]) assert s.bool() s = Series([False]) assert not s.bool() # single item nan to raise for s in [Series([np.nan]), Series([pd.NaT]), Series([True]), Series([False])]: pytest.raises(ValueError, lambda: bool(s)) for s in [Series([np.nan]), Series([pd.NaT])]: pytest.raises(ValueError, lambda: s.bool()) # multiple bool are still an error for s in [Series([True, True]), Series([False, False])]: pytest.raises(ValueError, lambda: bool(s)) pytest.raises(ValueError, lambda: s.bool()) # single non-bool are an error for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: pytest.raises(ValueError, lambda: bool(s)) pytest.raises(ValueError, lambda: s.bool()) def test_metadata_propagation_indiv(self): # check that the metadata matches up on the resulting ops o = Series(range(3), range(3)) o.name = 'foo' o2 = Series(range(3), range(3)) o2.name = 'bar' result = o.T self.check_metadata(o, result) # resample ts = Series(np.random.rand(1000), index=date_range('20130101', periods=1000, freq='s'), name='foo') result = ts.resample('1T').mean() self.check_metadata(ts, result) result = ts.resample('1T').min() self.check_metadata(ts, result) result = ts.resample('1T').apply(lambda x: x.sum()) self.check_metadata(ts, result) _metadata = Series._metadata _finalize = Series.__finalize__ Series._metadata = ['name', 'filename'] o.filename = 'foo' o2.filename = 'bar' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat' and name == 'filename': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self Series.__finalize__ = finalize result = pd.concat([o, o2]) assert result.filename == 'foo+bar' assert result.name is None # reset Series._metadata = _metadata Series.__finalize__ = _finalize @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'TimedeltaIndex', 'CategoricalIndex']) def test_to_xarray_index_types(self, index): from xarray import DataArray index = getattr(tm, 'make{}'.format(index)) s = Series(range(6), index=index(6)) s.index.name = 'foo' result = s.to_xarray() repr(result) assert len(result) == 6 assert len(result.coords) == 1 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, DataArray) # idempotency assert_series_equal(result.to_series(), s, check_index_type=False, check_categorical=True) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import DataArray s = Series([]) s.index.name = 'foo' result = s.to_xarray() assert len(result) == 0 assert len(result.coords) == 1 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, DataArray) s = Series(range(6)) s.index.name = 'foo' s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)], names=['one', 'two']) result = s.to_xarray() assert len(result) == 2 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, DataArray) assert_series_equal(result.to_series(), s) def test_valid_deprecated(self): # GH18800 with tm.assert_produces_warning(FutureWarning): pd.Series([]).valid() @pytest.mark.parametrize("s", [ Series([np.arange(5)]), pd.date_range('1/1/2011', periods=24, freq='H'), pd.Series(range(5), index=pd.date_range("2017", periods=5)) ]) @pytest.mark.parametrize("shift_size", [0, 1, 2]) def test_shift_always_copy(self, s, shift_size): # GH22397 assert s.shift(shift_size) is not s @pytest.mark.parametrize("move_by_freq", [ pd.Timedelta('1D'), pd.Timedelta('1M'), ]) def test_datetime_shift_always_copy(self, move_by_freq): # GH22397 s = pd.Series(range(5), index=pd.date_range("2017", periods=5)) assert s.shift(freq=move_by_freq) is not s
amolkahat/pandas
pandas/tests/generic/test_series.py
pandas/tests/io/sas/test_sas.py
#!/usr/bin/env python """Top level ``eval`` module. """ import warnings import tokenize from pandas.io.formats.printing import pprint_thing from pandas.core.computation.scope import _ensure_scope from pandas.compat import string_types from pandas.core.computation.engines import _engines from pandas.util._validators import validate_bool_kwarg def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """ from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """ from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys())) def _check_resolvers(resolvers): if resolvers is not None: for resolver in resolvers: if not hasattr(resolver, '__getitem__'): name = type(resolver).__name__ raise TypeError('Resolver of type {name!r} does not implement ' 'the __getitem__ method'.format(name=name)) def _check_expression(expr): """Make sure an expression is not an empty string Parameters ---------- expr : object An object that can be converted to a string Raises ------ ValueError * If expr is an empty string """ if not expr: raise ValueError("expr cannot be an empty string") def _convert_expression(expr): """Convert an object to an expression. Thus function converts an object to an expression (a unicode string) and checks to make sure it isn't empty after conversion. This is used to convert operators to their string representation for recursive calls to :func:`~pandas.eval`. Parameters ---------- expr : object The object to be converted to a string. Returns ------- s : unicode The string representation of an object. Raises ------ ValueError * If the expression is empty. """ s = pprint_thing(expr) _check_expression(s) return s def _check_for_locals(expr, stack_level, parser): from pandas.core.computation.expr import tokenize_string at_top_of_stack = stack_level == 0 not_pandas_parser = parser != 'pandas' if not_pandas_parser: msg = "The '@' prefix is only supported by the pandas parser" elif at_top_of_stack: msg = ("The '@' prefix is not allowed in " "top-level eval calls, \nplease refer to " "your variables by name without the '@' " "prefix") if at_top_of_stack or not_pandas_parser: for toknum, tokval in tokenize_string(expr): if toknum == tokenize.OP and tokval == '@': raise SyntaxError(msg) def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~pandas.DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. See Also -------- pandas.DataFrame.query pandas.DataFrame.eval """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, string_types): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
# -*- coding: utf-8 -*- # pylint: disable-msg=E1101,W0612 from operator import methodcaller import pytest import numpy as np import pandas as pd from distutils.version import LooseVersion from pandas import Series, date_range, MultiIndex from pandas.compat import range from pandas.util.testing import (assert_series_equal, assert_almost_equal) import pandas.util.testing as tm import pandas.util._test_decorators as td from .test_generic import Generic try: import xarray _XARRAY_INSTALLED = True except ImportError: _XARRAY_INSTALLED = False class TestSeries(Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x, y) def setup_method(self): self.ts = tm.makeTimeSeries() # Was at top level in test_series self.ts.name = 'ts' self.series = tm.makeStringSeries() self.series.name = 'series' def test_rename_mi(self): s = Series([11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]])) s.rename(str.lower) def test_set_axis_name(self): s = Series([1, 2, 3], index=['a', 'b', 'c']) funcs = ['rename_axis', '_set_axis_name'] name = 'foo' for func in funcs: result = methodcaller(func, name)(s) assert s.index.name is None assert result.index.name == name def test_set_axis_name_mi(self): s = Series([11, 21, 31], index=MultiIndex.from_tuples( [("A", x) for x in ["a", "B", "c"]], names=['l1', 'l2']) ) funcs = ['rename_axis', '_set_axis_name'] for func in funcs: result = methodcaller(func, ['L1', 'L2'])(s) assert s.index.name is None assert s.index.names == ['l1', 'l2'] assert result.index.name is None assert result.index.names, ['L1', 'L2'] def test_set_axis_name_raises(self): s = pd.Series([1]) with pytest.raises(ValueError): s._set_axis_name(name='a', axis=1) def test_get_numeric_data_preserve_dtype(self): # get the numeric data o = Series([1, 2, 3]) result = o._get_numeric_data() self._compare(result, o) o = Series([1, '2', 3.]) result = o._get_numeric_data() expected = Series([], dtype=object, index=pd.Index([], dtype=object)) self._compare(result, expected) o = Series([True, False, True]) result = o._get_numeric_data() self._compare(result, o) o = Series([True, False, True]) result = o._get_bool_data() self._compare(result, o) o = Series(date_range('20130101', periods=3)) result = o._get_numeric_data() expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object)) self._compare(result, expected) def test_nonzero_single_element(self): # allow single item via bool method s = Series([True]) assert s.bool() s = Series([False]) assert not s.bool() # single item nan to raise for s in [Series([np.nan]), Series([pd.NaT]), Series([True]), Series([False])]: pytest.raises(ValueError, lambda: bool(s)) for s in [Series([np.nan]), Series([pd.NaT])]: pytest.raises(ValueError, lambda: s.bool()) # multiple bool are still an error for s in [Series([True, True]), Series([False, False])]: pytest.raises(ValueError, lambda: bool(s)) pytest.raises(ValueError, lambda: s.bool()) # single non-bool are an error for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: pytest.raises(ValueError, lambda: bool(s)) pytest.raises(ValueError, lambda: s.bool()) def test_metadata_propagation_indiv(self): # check that the metadata matches up on the resulting ops o = Series(range(3), range(3)) o.name = 'foo' o2 = Series(range(3), range(3)) o2.name = 'bar' result = o.T self.check_metadata(o, result) # resample ts = Series(np.random.rand(1000), index=date_range('20130101', periods=1000, freq='s'), name='foo') result = ts.resample('1T').mean() self.check_metadata(ts, result) result = ts.resample('1T').min() self.check_metadata(ts, result) result = ts.resample('1T').apply(lambda x: x.sum()) self.check_metadata(ts, result) _metadata = Series._metadata _finalize = Series.__finalize__ Series._metadata = ['name', 'filename'] o.filename = 'foo' o2.filename = 'bar' def finalize(self, other, method=None, **kwargs): for name in self._metadata: if method == 'concat' and name == 'filename': value = '+'.join([getattr( o, name) for o in other.objs if getattr(o, name, None) ]) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, None)) return self Series.__finalize__ = finalize result = pd.concat([o, o2]) assert result.filename == 'foo+bar' assert result.name is None # reset Series._metadata = _metadata Series.__finalize__ = _finalize @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and LooseVersion(xarray.__version__) < LooseVersion('0.10.0'), reason='xarray >= 0.10.0 required') @pytest.mark.parametrize( "index", ['FloatIndex', 'IntIndex', 'StringIndex', 'UnicodeIndex', 'DateIndex', 'PeriodIndex', 'TimedeltaIndex', 'CategoricalIndex']) def test_to_xarray_index_types(self, index): from xarray import DataArray index = getattr(tm, 'make{}'.format(index)) s = Series(range(6), index=index(6)) s.index.name = 'foo' result = s.to_xarray() repr(result) assert len(result) == 6 assert len(result.coords) == 1 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, DataArray) # idempotency assert_series_equal(result.to_series(), s, check_index_type=False, check_categorical=True) @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): from xarray import DataArray s = Series([]) s.index.name = 'foo' result = s.to_xarray() assert len(result) == 0 assert len(result.coords) == 1 assert_almost_equal(list(result.coords.keys()), ['foo']) assert isinstance(result, DataArray) s = Series(range(6)) s.index.name = 'foo' s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)], names=['one', 'two']) result = s.to_xarray() assert len(result) == 2 assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, DataArray) assert_series_equal(result.to_series(), s) def test_valid_deprecated(self): # GH18800 with tm.assert_produces_warning(FutureWarning): pd.Series([]).valid() @pytest.mark.parametrize("s", [ Series([np.arange(5)]), pd.date_range('1/1/2011', periods=24, freq='H'), pd.Series(range(5), index=pd.date_range("2017", periods=5)) ]) @pytest.mark.parametrize("shift_size", [0, 1, 2]) def test_shift_always_copy(self, s, shift_size): # GH22397 assert s.shift(shift_size) is not s @pytest.mark.parametrize("move_by_freq", [ pd.Timedelta('1D'), pd.Timedelta('1M'), ]) def test_datetime_shift_always_copy(self, move_by_freq): # GH22397 s = pd.Series(range(5), index=pd.date_range("2017", periods=5)) assert s.shift(freq=move_by_freq) is not s
amolkahat/pandas
pandas/tests/generic/test_series.py
pandas/core/computation/eval.py
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ import collections from collections import OrderedDict, abc import functools from io import StringIO import itertools import sys import warnings from textwrap import dedent from typing import FrozenSet, List, Optional, Set, Type, Union import numpy as np import numpy.ma as ma from pandas._config import get_option from pandas._libs import lib, algos as libalgos from pandas.util._decorators import (Appender, Substitution, rewrite_axis_style_signature, deprecate_kwarg) from pandas.util._validators import (validate_bool_kwarg, validate_axis_style_args) from pandas.compat import PY36, raise_with_traceback from pandas.compat.numpy import function as nv from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.dtypes.cast import ( maybe_upcast, cast_scalar_to_array, infer_dtype_from_scalar, maybe_cast_to_datetime, maybe_infer_to_datetimelike, maybe_convert_platform, maybe_downcast_to_dtype, invalidate_string_dtypes, coerce_to_dtypes, maybe_upcast_putmask, find_common_type) from pandas.core.dtypes.common import ( is_dict_like, is_datetime64tz_dtype, is_object_dtype, is_extension_type, is_extension_array_dtype, is_datetime64_any_dtype, is_bool_dtype, is_integer_dtype, is_float_dtype, is_integer, is_scalar, is_dtype_equal, needs_i8_conversion, infer_dtype_from_object, ensure_float64, ensure_int64, ensure_platform_int, is_list_like, is_nested_list_like, is_iterator, is_sequence, is_named_tuple) from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, ABCIndexClass, ABCMultiIndex) from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms from pandas.core import common as com from pandas.core import nanops from pandas.core import ops from pandas.core.accessor import CachedAccessor from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import ( DatetimeLikeArrayMixin as DatetimeLikeArray ) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, ensure_index, ensure_index_from_sequences) from pandas.core.indexes import base as ibase from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import BlockManager from pandas.core.internals.construction import ( masked_rec_array_to_mgr, get_names_from_index, to_arrays, reorder_arrays, init_ndarray, init_dict, arrays_to_mgr, sanitize_index) from pandas.core.series import Series from pandas.io.formats import console from pandas.io.formats import format as fmt from pandas.io.formats.printing import pprint_thing import pandas.plotting # --------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = dict( axes='index, columns', klass='DataFrame', axes_single_arg="{0 or 'index', 1 or 'columns'}", axis="""axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index': apply function to each column. If 1 or 'columns': apply function to each row.""", optional_by=""" by : str or list of str Name or list of names to sort by. - if `axis` is 0 or `'index'` then `by` may contain index levels and/or column labels - if `axis` is 1 or `'columns'` then `by` may contain column levels and/or index labels .. versionchanged:: 0.23.0 Allow specifying index or column level names.""", versionadded_to_excel='', optional_labels="""labels : array-like, optional New labels / index to conform the axis specified by 'axis' to.""", optional_axis="""axis : int or str, optional Axis to target. Can be either the axis name ('index', 'columns') or number (0, 1).""", ) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame or named Series objects with a database-style join. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame or named Series Object to merge with. how : {'left', 'right', 'outer', 'inner'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : tuple of (str, str), default ('_x', '_y') Suffix to apply to overlapping column names in the left and right side, respectively. To raise an exception on overlapping columns use (False, False). copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False If True, adds a column to output DataFrame called "_merge" with information on the source of each row. If string, column with information on source of each row will be added to output DataFrame, and column will be named value of string. Information column is Categorical-type and takes on a value of "left_only" for observations whose merge key only appears in 'left' DataFrame, "right_only" for observations whose merge key only appears in 'right' DataFrame, and "both" if the observation's merge key is found in both. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. .. versionadded:: 0.21.0 Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Notes ----- Support for specifying index levels as the `on`, `left_on`, and `right_on` parameters was added in version 0.23.0 Support for merging named Series objects was added in version 0.24.0 Examples -------- >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}) >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on='lkey', right_on='rkey') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', ... suffixes=('_left', '_right')) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') """ # ----------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame): """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects .. versionchanged :: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. DataFrame.from_items : From sequence of (key, value) pairs read_csv, pandas.read_table, pandas.read_clipboard. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 """ @property def _constructor(self): return DataFrame _constructor_sliced = Series # type: Type[Series] _deprecations = NDFrame._deprecations | frozenset([ 'get_value', 'set_value', 'from_csv', 'from_items' ]) # type: FrozenSet[str] _accessors = set() # type: Set[str] @property def _constructor_expanddim(self): raise NotImplementedError("Not supported for DataFrames!") # ---------------------------------------------------------------------- # Constructors def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns mgr = init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None) is not None: mgr = init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) # For data is list-like, or Iterable (will consume into list) elif (isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes))): if not isinstance(data, abc.Sequence): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = get_names_from_index(data) elif isinstance(data[0], Categorical): index = ibase.default_index(len(data[0])) else: index = ibase.default_index(len(data)) mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = init_dict({}, index, columns, dtype=dtype) else: try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' 'incompatible data and dtype: {e}'.format(e=e)) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: values = cast_scalar_to_array((len(index), len(columns)), data, dtype=dtype) mgr = init_ndarray(values, index, columns, dtype=values.dtype, copy=False) else: raise ValueError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr, fastpath=True) # ---------------------------------------------------------------------- @property def axes(self): """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) @property def _is_homogeneous_type(self): """ Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if self._data.any_extension_types: return len({block.dtype for block in self._data.blocks}) == 1 else: return not self._data.is_mixed_type # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self): """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width=False): """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case off non-interactive session, no boundaries apply. `ignore_width` is here so ipnb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if ((max_columns and nb_columns > max_columns) or ((not ignore_width) and width and nb_columns > (width // 2))): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or not console.in_interactive_session(): return True if (get_option('display.width') is not None or console.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if not (max_rows is None): # unlimited rows # min of two, where one may be None d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(l) for l in value.split('\n')) return repr_width < width def _info_repr(self): """ True if the repr should show the info view. """ info_repr_option = (get_option("display.large_repr") == "info") return info_repr_option and not (self._repr_fits_horizontal_() and self._repr_fits_vertical_()) def __repr__(self): """ Return a string representation for a particular DataFrame. """ buf = StringIO("") if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() else: width = None self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols, line_width=width, show_dimensions=show_dimensions) return buf.getvalue() def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1) val = val.replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, notebook=True) else: return None @Substitution(header='Write out the column names. If a list of strings ' 'is given, it is assumed to be aliases for the ' 'column names', col_space_type='int', col_space='The minimum width of each column') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, line_width=line_width) formatter.to_string() if buf is None: result = formatter.buf.getvalue() return result # ---------------------------------------------------------------------- @property def style(self): """ Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. See Also -------- io.formats.style.Styler """ from pandas.io.formats.style import Styler return Styler(self) def iteritems(self): r""" Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content, sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self): """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. See Also -------- itertuples : Iterate over DataFrame rows as namedtuples of the values. iteritems : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k) yield k, s def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.iteritems : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) # Python 3 supports at most 255 arguments to constructor if name is not None and len(self.columns) + index < 256: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) items = iteritems def __len__(self): """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other): """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Serie. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, ' '{s} vs {r}'.format(s=lvals.shape, r=rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: {oth}'.format(oth=type(other))) def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.T.dot(np.transpose(other)).T # ---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict(cls, data, orient='columns', dtype=None, columns=None): """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from ndarray (structured dtype), list of tuples, dict, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == 'columns': if columns is not None: raise ValueError("cannot use columns parameter with " "orient='columns'") else: # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', [ list(map(com.maybe_box_datetimelike, t)) for t in self.itertuples(index=False, name=None) ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: raise ValueError( "DataFrame index must be unique for orient='index'." ) return into_c((t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None)) else: raise ValueError("orient '{o}' not understood".format(o=orient)) def to_gbq(self, destination_table, project_id=None, chunksize=None, reauth=False, if_exists='fail', auth_local_webserver=False, table_schema=None, location=None, progress_bar=True, credentials=None, verbose=None, private_key=None): """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists, do nothing. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 verbose : bool, deprecated Deprecated in pandas-gbq version 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, verbose=verbose, private_key=private_key) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- DataFrame """ # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns = [] for k, v in data.items(): if k in columns: arr_columns.append(k) arrays.append(v) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) if columns is not None: columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float) arr_columns = ensure_index(arr_columns) if columns is not None: columns = ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if (isinstance(index, str) or not hasattr(index, "__iter__")): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) except Exception: result_index = index if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None .. deprecated:: 0.23.0 Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = "<S{}".format(df.index.str.len().max()) >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if convert_datetime64 is not None: warnings.warn("The 'convert_datetime64' parameter is " "deprecated and will be removed in a future " "version", FutureWarning, stacklevel=2) if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index.values))) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [self[c].get_values() for c in self.columns] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = ("Invalid dtype {dtype} specified for " "{element} {name}").format(dtype=dtype_mapping, element=element, name=name) raise ValueError(msg) return np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} ) @classmethod def from_items(cls, items, columns=None, orient='columns'): """ Construct a DataFrame from a list of tuples. .. deprecated:: 0.23.0 `from_items` is deprecated and will be removed in a future version. Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` instead. :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- DataFrame """ warnings.warn("from_items is deprecated. Please use " "DataFrame.from_dict(dict(items), ...) instead. " "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order.", FutureWarning, stacklevel=2) keys, values = zip(*items) if orient == 'columns': if columns is not None: columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = ensure_index(keys) arrays = values # GH 17312 # Provide more informative error msg when scalar values passed try: return cls._from_arrays(arrays, columns, None) except ValueError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed try: arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) except TypeError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'") @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=None, infer_datetime_format=False): """ Read CSV file. .. deprecated:: 0.21.0 Use :func:`read_csv` instead. It is preferable to use the more powerful :func:`read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a DataFrame of time series data. This method only differs from the preferred :func:`read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) So a ``pd.DataFrame.from_csv(path)`` can be replaced by ``pd.read_csv(path, index_col=0, parse_dates=True)``. Parameters ---------- path : string file path or file handle / StringIO header : int, default 0 Row to use as header (skip prior rows) sep : string, default ',' Field delimiter index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table parse_dates : boolean, default True Parse dates. Different default from read_table tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) infer_datetime_format : boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Returns ------- DataFrame See Also -------- read_csv """ warnings.warn("from_csv is deprecated. Please use read_csv(...) " "instead. Note that some of the default arguments are " "different, so please refer to the documentation " "for from_csv when changing your function calls", FutureWarning, stacklevel=2) from pandas.io.parsers import read_csv return read_csv(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame. .. deprecated:: 0.25.0 Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() # doctest: +SKIP >>> sdf # doctest: +SKIP 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) # doctest: +SKIP <class 'pandas.core.sparse.frame.SparseDataFrame'> """ warnings.warn("DataFrame.to_sparse is deprecated and will be removed " "in a future version", FutureWarning, stacklevel=2) from pandas.core.sparse.api import SparseDataFrame with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="SparseDataFrame") return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value) @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None, version=114, convert_strl=None): """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- fname : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 version : {114, 117}, default 114 Version to use in the output dta file. Version 114 can be used read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 114 limits string variables to 244 characters or fewer while 117 allows strings with lengths up to 2,000,000 characters. .. versionadded:: 0.23.0 convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. .. versionadded:: 0.23.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ kwargs = {} if version not in (114, 117): raise ValueError('Only formats 114 and 117 supported.') if version == 114: if convert_strl is not None: raise ValueError('strl support is only available when using ' 'format 117') from pandas.io.stata import StataWriter as statawriter else: from pandas.io.stata import StataWriter117 as statawriter kwargs['convert_strl'] = convert_strl writer = statawriter(fname, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, **kwargs) writer.write_file() def to_feather(self, fname): """ Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname) def to_parquet(self, fname, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- fname : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the behavior depends on the chosen engine. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine, compression=compression, index=index, partition_cols=partition_cols, **kwargs) @Substitution(header='Whether to print column labels, default True', col_space_type='str or int', col_space='The minimum width of each column in CSS length ' 'units. An int is assumed to be px units.\n\n' ' .. versionadded:: 0.25.0\n' ' Ability to use str') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue() # ---------------------------------------------------------------------- def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None): """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. By default, the setting in ``pandas.options.display.max_info_columns`` is followed. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. By default, the setting in ``pandas.options.display.max_info_columns`` is used. memory_usage : bool, str, optional Specifies whether total memory usage of the DataFrame elements (including the index) should be displayed. By default, this follows the ``pandas.options.display.memory_usage`` setting. True always show memory usage. False never shows memory usage. A value of 'deep' is equivalent to "True with deep introspection". Memory usage is shown in human-readable units (base-2 representation). Without deep introspection a memory estimation is made based in column dtype and number of rows assuming values consume the same memory amount for corresponding dtypes. With deep memory introspection, a real memory usage calculation is performed at the cost of computational resources. null_counts : bool, optional Whether to show the non-null counts. By default, this is shown only if the frame is smaller than ``pandas.options.display.max_info_rows`` and ``pandas.options.display.max_info_columns``. A value of True always shows the counts, and False never shows the counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. DataFrame.memory_usage: Memory usage of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, ... "float_col": float_values}) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): int_col 5 non-null int64 text_col 5 non-null object float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open("df_info.txt", "w", ... encoding="utf-8") as f: # doctest: +SKIP ... f.write(s) 260 The `memory_usage` parameter allows deep introspection mode, specially useful for big DataFrames and fine-tune memory optimization: >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) >>> df = pd.DataFrame({ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) ... }) >>> df.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): column_1 1000000 non-null object column_2 1000000 non-null object column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB >>> df.info(memory_usage='deep') <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): column_1 1000000 non-null object column_2 1000000 non-null object column_3 1000000 non-null object dtypes: object(3) memory usage: 188.8 MB """ if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append(str(type(self))) lines.append(self.index._summary()) if len(self.columns) == 0: lines.append('Empty {name}'.format(name=type(self).__name__)) fmt.buffer_put_lines(buf, lines) return cols = self.columns # hack if max_cols is None: max_cols = get_option('display.max_info_columns', len(self.columns) + 1) max_rows = get_option('display.max_info_rows', len(self) + 1) if null_counts is None: show_counts = ((len(self.columns) <= max_cols) and (len(self) < max_rows)) else: show_counts = null_counts exceeds_info_cols = len(self.columns) > max_cols def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max(len(pprint_thing(k)) for k in self.columns) + 4 counts = None tmpl = "{count}{dtype}" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError( 'Columns must equal counts ' '({cols:d} != {counts:d})'.format( cols=len(cols), counts=len(counts))) tmpl = "{count} non-null {dtype}" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes.iloc[i] col = pprint_thing(col) count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl.format(count=count, dtype=dtype)) def _non_verbose_repr(): lines.append(self.columns._summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return ("{num:3.1f}{size_q} " "{x}".format(num=num, size_q=size_qualifier, x=x)) num /= 1024.0 return "{num:3.1f}{size_q} {pb}".format(num=num, size_q=size_qualifier, pb='PB') if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None _non_verbose_repr() else: if exceeds_info_cols: _non_verbose_repr() else: _verbose_repr() counts = self.get_dtype_counts() dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k in sorted(counts.items())] lines.append('dtypes: {types}'.format(types=', '.join(dtypes))) if memory_usage is None: memory_usage = get_option('display.memory_usage') if memory_usage: # append memory usage of df to display size_qualifier = '' if memory_usage == 'deep': deep = True else: # size_qualifier is just a best effort; not guaranteed to catch # all cases (e.g., it misses categorical data even with object # categories) deep = False if ('object' in counts or self.index._is_memory_usage_qualified()): size_qualifier = '+' mem_usage = self.memory_usage(index=True, deep=deep).sum() lines.append("memory usage: {mem}\n".format( mem=_sizeof_fmt(mem_usage, size_qualifier))) fmt.buffer_put_lines(buf, lines) def memory_usage(self, index=True, deep=False): """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5216 """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) if index: result = Series(self.index.memory_usage(deep=deep), index=['Index']).append(result) return result def transpose(self, *args, **kwargs): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- copy : bool, default False If True, the underlying data is copied. Otherwise (default), no copy is made if possible. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, dict()) return super().transpose(1, 0, **kwargs) T = property(transpose) # ---------------------------------------------------------------------- # Picklability # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state columns = com._unpickle_array(cols) index = com._unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover # old unpickling (vals, idx, cols), object_state = state index = com._unpickle_array(idx) dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, columns=com._unpickle_array(ocols), copy=False) dm = dm.join(objects) self._data = dm._data # ---------------------------------------------------------------------- # Getting and setting elements def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable) def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) return com.maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine try: return engine.get_value(series._values, index) except KeyError: # GH 20629 if self.index.nlevels > 1: # partial indexing forbidden raise except (TypeError, ValueError): pass # we cannot handle direct indexing # use positional col = self.columns.get_loc(col) index = self.index.get_loc(index) return self._get_value(index, col, takeable=True) _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable) def _set_value(self, index, col, value, takeable=False): try: if takeable is True: series = self._iget_item_cache(col) return series._set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine engine.set_value(series._values, index, value) return self except (KeyError, TypeError): # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) return self _set_value.__doc__ = set_value.__doc__ def _ixs(self, i, axis=0): """ Parameters ---------- i : int, slice, or sequence of integers axis : int Notes ----- If slice passed, the resulting data will be a view. """ # irow if axis == 0: if isinstance(i, slice): return self[i] else: label = self.index[i] if isinstance(label, Index): # a location index by definition result = self.take(i, axis=axis) copy = True else: new_values = self._data.fast_xs(i) if is_scalar(new_values): return new_values # if we are a copy, mark as such copy = (isinstance(new_values, np.ndarray) and new_values.base is None) result = self._constructor_sliced(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] if isinstance(i, slice): # need to return view lab_slice = slice(label[0], label[-1]) return self.loc[:, lab_slice] else: if isinstance(label, Index): return self._take(i, axis=1) index_len = len(self.index) # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) if index_len and not len(values): values = np.array([np.nan] * index_len, dtype=object) result = self._box_col_values(values, label) # this is a cached value, mark it so result._set_as_cached(label, self) return result def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) # shortcut if the key is in columns try: if self.columns.is_unique and key in self.columns: if self.columns.nlevels > 1: return self._getitem_multilevel(key) return self._get_item_cache(key) except (TypeError, ValueError): # The TypeError correctly catches non hashable "key" (e.g. list) # The ValueError can be removed once GH #21729 is fixed pass # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self._getitem_frame(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True) # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): data = data[key] return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn("Boolean Series key will be reindexed to match " "DataFrame index.", UserWarning, stacklevel=3) elif len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d.' % (len(key), len(self.index))) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self._take(indexer, axis=0) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = self._constructor(new_values, index=self.index, columns=result_columns) result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == '': result = result[''] if isinstance(result, Series): result = self._constructor_sliced(result, index=self.index, name=key) result._set_is_copy(self) return result else: return self._get_item_cache(key) def _getitem_frame(self, key): if key.values.size and not is_bool_dtype(key.values): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) def query(self, expr, inplace=False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. .. versionadded:: 0.25.0 You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, str): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: new_data = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query new_data = self[res] if inplace: self._update_inplace(new_data) else: return new_data def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = \ self._get_space_character_free_column_resolvers() resolvers = column_resolvers, index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' raise TypeError(msg.format(typ=type(obj).__name__)) slices = [slice(None)] * obj.ndim slices[obj._info_axis_number] = indexer return tuple(slices) if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map( lambda x: frozenset(map(infer_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on {inc_ex}'.format( inc_ex=(include & exclude))) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(idx, dtype): return idx, functools.partial(issubclass, dtype.type) for idx, f in itertools.starmap(is_dtype_instance_mapper, enumerate(self.dtypes)): if include: # checks for the case of empty include or exclude include_these.iloc[idx] = any(map(f, include)) if exclude: exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[_get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: return self._box_col_values(values, items) def _box_col_values(self, values, items): """ Provide boxed values for a column. """ klass = self._constructor_sliced return klass(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key, value): self._check_setitem_copy() self.loc._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d!' % (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.loc._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: indexer = self.loc._convert_to_indexer(key, axis=1) self._check_setitem_copy() self.loc._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError( 'Array conditional must be same shape as self' ) key = self._constructor(key, **self._construct_axes_dict()) if key.values.size and not is_bool_dtype(key.values): raise TypeError( 'Must pass DataFrame or 2-d ndarray with boolean values only' ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _ensure_valid_index(self, value): """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan) def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data def _sanitize_column(self, key, value, broadcast=True): """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- key : object value : scalar, Series, or array-like broadcast : bool, default True If ``key`` matches multiple duplicate column names in the DataFrame, this parameter indicates whether ``value`` should be tiled so that the returned array contains a (duplicated) column for each occurrence of the key. If False, ``value`` will not be tiled. Returns ------- numpy.ndarray """ def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value._values.copy() else: # GH 4107 try: value = value.reindex(self.index)._values except Exception as e: # duplicate axis if not value.index.is_unique: raise e # other raise TypeError('incompatible index of inserted column ' 'with frame index') return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, ExtensionArray): # Explicitly copy here, instead of in sanitize_index, # as sanitize_index won't copy an EA, even with copy=True value = value.copy() value = sanitize_index(value, self.index, copy=False) elif isinstance(value, Index) or is_sequence(value): # turn me into an ndarray value = sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): value = value.copy(deep=True) else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = maybe_infer_to_datetimelike(value) else: # cast ignores pandas dtypes. so save the dtype first infer_dtype, _ = infer_dtype_from_scalar( value, pandas_dtype=True) # upcast value = cast_scalar_to_array(len(self.index), value) value = maybe_cast_to_datetime(value, infer_dtype) # return internal types directly if is_extension_type(value) or is_extension_array_dtype(value): return value # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: if (not self.columns.is_unique or isinstance(self.columns, MultiIndex)): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): return {item: Series(self._data.iget(idx), index=self.index, name=item) for idx, item in enumerate(self.columns)} def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Returns ------- numpy.ndarray Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, method, copy, level, fill_value, limit, tolerance) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, fill_value, limit, tolerance) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan, limit=None, tolerance=None): new_index, indexer = self.index.reindex(new_index, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_columns(self, new_columns, method, copy, level, fill_value=None, limit=None, tolerance=None): new_columns, indexer = self.columns.reindex(new_columns, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) @Appender(_shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): return super().align(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) @rewrite_axis_style_signature('labels', [('method', None), ('copy', True), ('level', None), ('fill_value', np.nan), ('limit', None), ('tolerance', None)]) def reindex(self, *args, **kwargs): axes = validate_axis_style_args(self, args, kwargs, 'labels', 'reindex') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('labels', None) return super().reindex(**kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): return super().reindex_axis(labels=labels, axis=axis, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors) @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False), ('level', None), ('errors', 'ignore')]) def rename(self, *args, **kwargs): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) return super().rename(**kwargs) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.fillna.__doc__) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): return super().fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, **kwargs) @Appender(_shared_docs['replace'] % _shared_doc_kwargs) def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad'): return super().replace(to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None): return super().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ Set the DataFrame index using existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and instances of :class:`~collections.abc.Iterator`. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). verify_integrity : bool, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method. Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 Create a MultiIndex using an Index and a column: >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 Create a MultiIndex using two Series: >>> s = pd.Series([1, 2, 3, 4]) >>> df.set_index([s, s**2]) month year sale 1 1 1 2012 55 2 4 4 2014 40 3 9 7 2013 84 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(keys, list): keys = [keys] err_msg = ('The parameter "keys" may be a column key, one-dimensional ' 'array, or a list containing only valid column keys and ' 'one-dimensional arrays.') missing = [] for col in keys: if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, 'ndim', 1) != 1: raise ValueError(err_msg) else: # everything else gets tried as a key; see GH 24969 try: found = col in self.columns except TypeError: raise TypeError(err_msg + ' Received column of ' 'type {}'.format(type(col))) else: if not found: missing.append(col) if missing: raise KeyError('None of {} are in the columns'.format(missing)) if inplace: frame = self else: frame = self.copy() arrays = [] names = [] if append: names = [x for x in self.index.names] if isinstance(self.index, ABCMultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: arrays.append(self.index) to_remove = [] for col in keys: if isinstance(col, ABCMultiIndex): for n in range(col.nlevels): arrays.append(col._get_level_values(n)) names.extend(col.names) elif isinstance(col, (ABCIndexClass, ABCSeries)): # if Index then not MultiIndex (treated above) arrays.append(col) names.append(col.name) elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) elif isinstance(col, abc.Iterator): arrays.append(list(col)) names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) names.append(col) if drop: to_remove.append(col) if len(arrays[-1]) != len(self): # check newest element against length of calling frame, since # ensure_index_from_sequences would not raise for append=False. raise ValueError('Length mismatch: Expected {len_self} rows, ' 'received array of length {len_col}'.format( len_self=len(self), len_col=len(arrays[-1]) )) index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() raise ValueError('Index has duplicate keys: {dup}'.format( dup=duplicates)) # use set to handle duplicate column names gracefully in case of drop for c in set(to_remove): del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): """ Reset the index, or a level of it. Reset the index of the DataFrame, and use the default one instead. If the DataFrame has a MultiIndex, this method can remove one or more levels. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column, and a new sequential index is used: >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, col_fill='genus') genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): values = index._values if not isinstance(index, (PeriodIndex, DatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's if mask.all(): values = np.empty(len(mask)) values.fill(np.nan) else: values = values.take(labels) # TODO(https://github.com/pandas-dev/pandas/issues/24206) # Push this into maybe_upcast_putmask? # We can't pass EAs there right now. Looks a bit # complicated. # So we unbox the ndarray_values, op, re-box. values_type = type(values) values_dtype = values.dtype if issubclass(values_type, DatetimeLikeArray): values = values._data if mask.any(): values, changed = maybe_upcast_putmask( values, mask, np.nan) if issubclass(values_type, DatetimeLikeArray): values = values_type(values, dtype=values_dtype) return values new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): names = [n if n is not None else ('level_%d' % i) for (i, n) in enumerate(self.index.names)] to_insert = zip(self.index.levels, self.index.codes) else: default = 'index' if 'index' not in self else 'level_0' names = ([default] if self.index.name is None else [self.index.name]) to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue name = names[i] if multi_col: col_name = (list(name) if isinstance(name, tuple) else [name]) if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError("col_fill=None is incompatible " "with incomplete column name " "{}".format(name)) col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) new_obj.insert(0, name, level_values) new_obj.index = new_index if not inplace: return new_obj # ---------------------------------------------------------------------- # Reindex-based selection methods @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isna(self): return super().isna() @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): return super().isnull() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notna(self): return super().notna() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): return super().notnull() def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): # GH20987 msg = ("supplying multiple axes to axis is deprecated and " "will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated] def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: return Series(dtype=bool) def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8', copy=False), len(shape) if subset is None: subset = self.columns elif (not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns): subset = subset, # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, keep), index=self.index) # ---------------------------------------------------------------------- # Sorting @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by (%d)' % (len(ascending), len(by))) if len(by) > 1: from pandas.core.sorting import lexsort_indexer keys = [self._get_label_or_level_values(x, axis=axis) for x in by] indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position) indexer = ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort by = by[0] k = self._get_label_or_level_values(by, axis=axis) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort(k, kind=kind, ascending=ascending, na_position=na_position) new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis), verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_index.__doc__) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): # TODO: this can be combined with Series.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') # 10726 if by is not None: warnings.warn("by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) axis = self._get_axis_number(axis) labels = self._get_axis(axis) # make sure that the axis is lexsorted to start # if not we need to reconstruct to get the correct indexer labels = labels._sort_levels_monotonic() if level is not None: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer(labels._get_codes_for_sorting(), orders=ascending, na_position=na_position) else: from pandas.core.sorting import nargsort # Check monotonic-ness before sort an index # GH11080 if ((ascending and labels.is_monotonic_increasing) or (not ascending and labels.is_monotonic_decreasing)): if inplace: return else: return self.copy() indexer = nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) baxis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- DataFrame .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result # ---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns def _arith_op(left, right): # for the mixed_type case where we iterate over columns, # _arith_op(left, right) is equivalent to # left._binop(right, func, fill_value=fill_value) left, right = ops.fill_binop(left, right, fill_value) return func(left, right) if ops.should_series_dispatch(this, other, func): # iterate over columns return ops.dispatch_to_series(this, other, _arith_op) else: result = _arith_op(this.values, other.values) return self._constructor(result, index=new_index, columns=new_columns, copy=False) def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) assert left.index.equals(right.index) if left._is_mixed_type or right._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` return ops.dispatch_to_series(left, right, func) else: # fastpath --> operate directly on values with np.errstate(all="ignore"): new_data = func(left.values.T, right.values).T return self._constructor(new_data, index=left.index, columns=self.columns, copy=False) def _combine_match_columns(self, other, func, level=None): assert isinstance(other, Series) left, right = self.align(other, join='outer', axis=1, level=level, copy=False) assert left.columns.equals(right.index) return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func): assert lib.is_scalar(other) or np.ndim(other) == 0 return ops.dispatch_to_series(self, other, func) def combine(self, other, func, fill_value=None, overwrite=True): """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unnecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) arr = func(series, otherSeries) arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) @deprecate_kwarg(old_arg_name='raise_conflict', new_arg_name='errors', mapping={False: 'ignore', True: 'raise'}) def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either " "'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unnecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Data reshaping _shared_docs['pivot'] = """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation, multiple values will result in a MultiIndex in the columns. See the :ref:`User Guide <reshaping>` for more on reshaping. Parameters ----------%s index : string or object, optional Column to use to make new frame's index. If None, uses existing index. columns : string or object Column to use to make new frame's columns. values : string, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. .. versionchanged :: 0.23.0 Also accept list of column names. Returns ------- DataFrame Returns reshaped DataFrame. Raises ------ ValueError: When there are any `index`, `columns` combinations with multiple values. `DataFrame.pivot_table` when you need to aggregate. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods. Examples -------- >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz') bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) baz zoo bar A B C A B C foo one 1 2 3 x y z two 4 5 6 q w t A ValueError is raised if there are any duplicates. >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 Notice that the first two rows are the same for our `index` and `columns` arguments. >>> df.pivot(index='foo', columns='bar', values='baz') Traceback (most recent call last): ... ValueError: Index contains duplicate entries, cannot reshape """ @Substitution('') @Appender(_shared_docs['pivot']) def pivot(self, index=None, columns=None, values=None): from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs['pivot_table'] = """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ----------%s values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) If dict is passed, the key is column to aggregate and value is function or list of functions fill_value : scalar, default None Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna : boolean, default True Do not include columns whose entries are all NaN margins_name : string, default 'All' Name of the row / column that will contain the totals when margins is True. observed : boolean, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionchanged :: 0.25.0 Returns ------- DataFrame See Also -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 We can also fill missing values using the `fill_value` parameter. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 The next example aggregates by taking the mean across multiple columns. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': np.mean}) >>> table D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 foo large 2.000000 4.500000 small 2.333333 4.333333 We can also calculate multiple types of aggregations for any given value column. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E mean max mean min A C bar large 5.500000 9.0 7.500000 6.0 small 5.500000 9.0 8.500000 8.0 foo large 2.000000 5.0 4.500000 4.0 small 2.333333 6.0 4.333333 2.0 """ @Substitution('') @Appender(_shared_docs['pivot_table']) def pivot_table(self, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All', observed=False): from pandas.core.reshape.pivot import pivot_table return pivot_table(self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed) def stack(self, level=-1, dropna=True): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Parameters ---------- level : int, str, list, default -1 Level(s) to stack from the column axis onto the index axis, defined as one index or label, or a list of indices or labels. dropna : bool, default True Whether to drop rows in the resulting Frame/Series with missing values. Stacking a column level onto the index axis can create combinations of index and column values that are missing from the original dataframe. See Examples section. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack() cat weight 0 height 1 dog weight 2 height 3 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack() height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN **Prescribing the level(s) to be stacked** The first parameter controls which level or levels are stacked: >>> df_multi_level_cols2.stack(0) kg m cat height NaN 2.0 weight 1.0 NaN dog height NaN 4.0 weight 3.0 NaN >>> df_multi_level_cols2.stack([0, 1]) cat height m 2.0 weight kg 1.0 dog height m 4.0 weight kg 3.0 dtype: float64 **Dropping missing values** >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]], ... index=['cat', 'dog'], ... columns=multicol2) Note that rows where all values are missing are dropped by default but this behaviour can be controlled via the dropna keyword parameter: >>> df_multi_level_cols3 weight height kg m cat NaN 1.0 dog 2.0 3.0 >>> df_multi_level_cols3.stack(dropna=False) height weight cat kg NaN NaN m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN >>> df_multi_level_cols3.stack(dropna=True) height weight cat m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN """ from pandas.core.reshape.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values .. versionadded:: 0.18.0 Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) _shared_docs['melt'] = (""" Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. %(versionadded)s Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar Name to use for the 'variable' column. If None it uses ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' Name to use for the 'value' column. col_level : int or string, optional If columns are a MultiIndex then use this level to melt. Returns ------- DataFrame Unpivoted DataFrame. See Also -------- %(other)s pivot_table DataFrame.pivot Examples -------- >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)sid_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 1 b B 3 2 c B 5 3 a C 2 4 b C 4 5 c C 6 The names of 'variable' and 'value' columns can be customized: >>> %(caller)sid_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] >>> df A B C D E F 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) (A, D) variable_0 variable_1 value 0 a B E 1 1 b B E 3 2 c B E 5 """) @Appender(_shared_docs['melt'] % dict(caller='df.melt(', versionadded='.. versionadded:: 0.20.0\n', other='melt')) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): from pandas.core.reshape.melt import melt return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level) # ---------------------------------------------------------------------- # Time series-related def diff(self, periods=1, axis=0): """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : {0 or 'index', 1 or 'columns'}, default 0 Take difference over rows (0) or columns (1). .. versionadded:: 0.16.1. Returns ------- DataFrame See Also -------- Series.diff: First discrete difference for a Series. DataFrame.pct_change: Percent change over given number of periods. DataFrame.shift: Shift index by desired number of periods with an optional time freq. Examples -------- Difference with previous row >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(axis=1) a b c 0 NaN 0.0 0.0 1 NaN -1.0 3.0 2 NaN -1.0 7.0 3 NaN -1.0 13.0 4 NaN 0.0 20.0 5 NaN 2.0 28.0 Difference with 3rd previous row >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ bm_axis = self._get_block_manager_axis(axis) new_data = self._data.diff(n=periods, axis=bm_axis) return self._constructor(new_data) # ---------------------------------------------------------------------- # Function application def _gotitem(self, key: Union[str, List[str]], ndim: int, subset: Optional[Union[Series, ABCDataFrame]] = None, ) -> Union[Series, ABCDataFrame]: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if subset is None: subset = self elif subset.ndim == 1: # is Series return subset # TODO: _shallow_copy(subset)? return subset[key] _agg_summary_and_see_also_doc = dedent(""" The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, `var`), where the default is to compute the aggregation of the flattened array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. core.groupby.GroupBy : Perform operations over groups. core.resample.Resampler : Perform operations over resampled bins. core.window.Rolling : Perform operations over rolling window. core.window.Expanding : Perform operations over expanding window. core.window.EWM : Perform operation over exponential weighted window. """) _agg_examples_doc = dedent(""" Examples -------- >>> df = pd.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) Aggregate these functions over the rows. >>> df.agg(['sum', 'min']) A B C sum 12.0 15.0 18.0 min 1.0 2.0 3.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN Aggregate over the columns. >>> df.agg("mean", axis="columns") 0 2.0 1 5.0 2 8.0 3 NaN dtype: float64 """) @Substitution(see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, versionadded='\n.. versionadded:: 0.20.0\n', **_shared_doc_kwargs) @Appender(_shared_docs['aggregate']) def aggregate(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) result = None try: result, how = self._aggregate(func, axis=axis, *args, **kwargs) except TypeError: pass if result is None: return self.apply(func, axis=axis, args=args, **kwargs) return result def _aggregate(self, arg, axis=0, *args, **kwargs): if axis == 1: # NDFrame.aggregate returns a tuple, and we need to transpose # only result result, how = self.T._aggregate(arg, *args, **kwargs) result = result.T if result is not None else result return result, how return super()._aggregate(arg, *args, **kwargs) agg = aggregate @Appender(_shared_docs['transform'] % _shared_doc_kwargs) def transform(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) if axis == 1: return self.T.transform(func, *args, **kwargs).T return super().transform(func, *args, **kwargs) def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. broadcast : bool, optional Only relevant for aggregation functions: * ``False`` or ``None`` : returns a Series whose length is the length of the index or the number of columns (based on the `axis` parameter) * ``True`` : results will be broadcast to the original shape of the frame, the original index and columns will be retained. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. reduce : bool or None, default None Try to apply reduction procedures. If the DataFrame is empty, `apply` will use `reduce` to determine whether the result should be a Series or a DataFrame. If ``reduce=None`` (the default), `apply`'s return value will be guessed by calling `func` on an empty Series (note: while guessing, exceptions raised by `func` will be ignored). If ``reduce=True`` a Series will always be returned, and if ``reduce=False`` a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by ``result_type='reduce'``. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- In the current implementation apply calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing result_type='expand' will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, args=args, kwds=kwds) return op.get_result() def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Notes ----- In the current implementation applymap calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func) return lib.map_infer(x.astype(object).values, func) return self.apply(infer) # ---------------------------------------------------------------------- # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False, sort=None): """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame or Series objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') if other.name is None: index = None else: # other must have the same index name as self, otherwise # index name will be reset index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) try: combined_columns = self.columns.append(idx_diff) except TypeError: combined_columns = self.columns.astype(object).append(idx_diff) other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns) other = other._convert(datetime=True, timedelta=True) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): from pandas.core.reshape.merge import merge from pandas.core.reshape.concat import concat if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) # join indexes only using concat if can_concat: if how == 'left': how = 'outer' join_axes = [self.index] else: join_axes = None return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None): from pandas.core.reshape.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate) def round(self, decimals=0, *args, **kwargs): """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. *args Additional keywords have no effect but might be accepted for compatibility with numpy. **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame A DataFrame with the affected columns rounded to the specified number of decimal places. See Also -------- numpy.around : Round a numpy array to the given number of decimals. Series.round : Round a Series to the given number of decimals. Examples -------- >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.21 0.32 1 0.01 0.67 2 0.66 0.03 3 0.21 0.18 By providing an integer each column is rounded to the same number of decimal places >>> df.round(1) dogs cats 0 0.2 0.3 1 0.0 0.7 2 0.7 0.0 3 0.2 0.2 With a dict, the number of places for specific columns can be specified with the column names as key and the number of decimal places as value >>> df.round({'dogs': 1, 'cats': 0}) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 Using a Series, the number of places for specific columns can be specified with the column names as index and the number of decimal places as value >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) >>> df.round(decimals) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 """ from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.iteritems(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(s, decimals): if is_integer_dtype(s) or is_float_dtype(s): return s.round(decimals) return s nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series): if not decimals.index.is_unique: raise ValueError("Index of decimals must be unique") new_cols = [col for col in _dict_round(self, decimals)] elif is_integer(decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.iteritems()] else: raise TypeError("decimals must be an integer, a dict-like or a " "Series") if len(new_cols) > 0: return self._constructor(concat(new_cols, axis=1), index=self.index, columns=self.columns) else: return self # ---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method)) return self._constructor(correl, index=idx, columns=cols) def cov(self, min_periods=None): """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. core.window.EWM.cov: Exponential weighted sample covariance. core.window.Expanding.cov : Expanding sample covariance. core.window.Rolling.cov : Rolling sample covariance. Notes ----- Returns the covariance matrix of the DataFrame's time series. The covariance is normalized by N-1. For DataFrames that have Series that are missing data (assuming that data is `missing at random <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__) the returned covariance matrix will be an unbiased estimate of the variance and covariance between the member Series. However, for many applications this estimate may not be acceptable because the estimate covariance matrix is not guaranteed to be positive semi-definite. This could lead to estimate correlations having absolute values which are greater than one, and/or a non-invertible covariance matrix. See `Estimation of covariance matrices <http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_ matrices>`__ for more details. Examples -------- >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> df.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if notna(mat).all(): if min_periods is not None and min_periods > len(mat): baseCov = np.empty((mat.shape[1], mat.shape[1])) baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=idx, columns=cols) def corrwith(self, other, axis=0, drop=False, method='pearson'): """ Compute pairwise correlation between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. Parameters ---------- other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float .. versionadded:: 0.24.0 Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr """ axis = self._get_axis_number(axis) this = self._get_numeric_data() if isinstance(other, Series): return this.apply(lambda x: other.corr(x, method=method), axis=axis) other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) if axis == 1: left = left.T right = right.T if method == 'pearson': # mask missing values left = left + right * 0 right = right + left * 0 # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom elif method in ['kendall', 'spearman'] or callable(method): def c(x): return nanops.nancorr(x[0], x[1], method=method) correl = Series(map(c, zip(left.values.T, right.values.T)), index=left.columns) else: raise ValueError("Invalid method {method} was passed, " "valid methods are: 'pearson', 'kendall', " "'spearman', or callable". format(method=method)) if not drop: # Find non-matching labels along the given axis # and append missing correlations (GH 22375) raxis = 1 if axis == 0 else 0 result_index = (this._get_axis(raxis). union(other._get_axis(raxis))) idx_diff = result_index.difference(correl.index) if len(idx_diff) > 0: correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff)) return correl # ---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): """ Count non-NA cells for each column or row. The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending on `pandas.options.mode.use_inf_as_na`) are considered NA. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index' counts are generated for each column. If 1 or 'columns' counts are generated for each **row**. level : int or str, optional If the axis is a `MultiIndex` (hierarchical), count along a particular `level`, collapsing into a `DataFrame`. A `str` specifies the level name. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- Series or DataFrame For each column/row the number of non-NA/null entries. If `level` is specified returns a `DataFrame`. See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = pd.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 Counts for each **row**: >>> df.count(axis='columns') 0 3 1 2 2 3 3 3 4 3 dtype: int64 Counts for one level of a `MultiIndex`: >>> df.set_index(["Person", "Single"]).count(level="Person") Age Person John 2 Lewis 1 Myla 1 """ axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type or frame._data.any_extension_types: # the or any_extension_types is really only hit for single- # column frames with an extension array result = notna(frame).sum(axis=axis) else: # GH13407 series_counts = notna(frame).sum(axis=axis) counts = series_counts.values result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical " "{ax}.".format(ax=self._get_axis_name(axis))) if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notna(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, str): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): if axis is None and filter_type == 'bool': labels = None constructor = None else: # TODO: Make other agg func handle axis=None properly axis = self._get_axis_number(axis) labels = self._get_agg_axis(axis) constructor = self._constructor def f(x): return op(x, axis=axis, skipna=skipna, **kwds) # exclude timedelta/datetime unless we are uniform types if (axis == 1 and self._is_datelike_mixed_type and (not self._is_homogeneous_type and not is_datetime64tz_dtype(self.dtypes[0]))): numeric_only = True if numeric_only is None: try: values = self.values result = f(values) if (filter_type == 'bool' and is_object_dtype(values) and axis is None): # work around https://github.com/numpy/numpy/issues/10489 # TODO: combine with hasattr(result, 'dtype') further down # hard since we don't have `values` down there. result = np.bool_(result) except Exception as e: # try by-column first if filter_type is None and axis == 0: try: # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series # we only end up here if we have not specified # numeric_only and yet we have tried a # column-by-column reduction, where we have mixed type. # So let's just do what we can from pandas.core.apply import frame_apply opa = frame_apply(self, func=f, result_type='expand', ignore_failures=True) result = opa.get_result() if result.ndim == self.ndim: result = result.iloc[0] return result except Exception: pass if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover e = NotImplementedError( "Handling exception with filter_type {f} not" "implemented.".format(f=filter_type)) raise_with_traceback(e) with np.errstate(all='ignore'): result = f(data.values) labels = data._get_agg_axis(axis) else: if numeric_only: if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': # GH 25101, # GH 24434 data = self._get_bool_data() if axis == 0 else self else: # pragma: no cover msg = ("Generating numeric_only data with filter_type {f}" "not supported.".format(f=filter_type)) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) else: values = self.values result = f(values) if hasattr(result, 'dtype') and is_object_dtype(result.dtype): try: if filter_type is None or filter_type == 'numeric': result = result.astype(np.float64) elif filter_type == 'bool' and notna(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = coerce_to_dtypes(result, self.dtypes) if constructor is not None: result = Series(result, index=labels) return result def nunique(self, axis=0, dropna=True): """ Count distinct observations over requested axis. Return Series with number of distinct observations. Can ignore NaN values. .. versionadded:: 0.20.0 Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64 """ return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin Notes ----- This method is the DataFrame version of ``ndarray.argmin``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of maxima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmax Notes ----- This method is the DataFrame version of ``ndarray.argmax``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): """ Let's be explicit about this. """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False, dropna=True): """ Get the mode(s) of each element along the selected axis. The mode of a set of values is the value that appears most often. It can be multiple values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to iterate over while searching for the mode: * 0 or 'index' : get mode of each column * 1 or 'columns' : get mode of each row numeric_only : bool, default False If True, only apply to numeric columns. dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- DataFrame The modes of each column or row. See Also -------- Series.mode : Return the highest frequency value in a Series. Series.value_counts : Return the counts of values in a Series. Examples -------- >>> df = pd.DataFrame([('bird', 2, 2), ... ('mammal', 4, np.nan), ... ('arthropod', 8, 0), ... ('bird', 2, np.nan)], ... index=('falcon', 'horse', 'spider', 'ostrich'), ... columns=('species', 'legs', 'wings')) >>> df species legs wings falcon bird 2 2.0 horse mammal 4 NaN spider arthropod 8 0.0 ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings are both 0 and 2. The second row of species and legs contains ``NaN``, because they have only one mode, but the DataFrame has two rows. >>> df.mode() species legs wings 0 bird 2.0 0.0 1 NaN NaN 2.0 Setting ``dropna=False`` ``NaN`` values are considered and they can be the mode (like for wings). >>> df.mode(dropna=False) species legs wings 0 bird 2 NaN Setting ``numeric_only=True``, only the mode of numeric columns is computed, and columns of other types are ignored. >>> df.mode(numeric_only=True) legs wings 0 2.0 0.0 1 NaN 2.0 To compute the mode over columns and not rows, use the axis parameter: >>> df.mode(axis='columns', numeric_only=True) 0 1 falcon 2.0 NaN horse 4.0 NaN spider 0.0 8.0 ostrich 2.0 NaN """ data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode(dropna=dropna) return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear'): """ Return values at the given quantile over requested axis. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value between 0 <= q <= 1, the quantile(s) to compute. axis : {0, 1, 'index', 'columns'} (default 0) Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. .. versionadded:: 0.18.0 Returns ------- Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also -------- core.window.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), ... columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 Name: 0.1, dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. >>> df = pd.DataFrame({'A': [1, 2], ... 'B': [pd.Timestamp('2010'), ... pd.Timestamp('2011')], ... 'C': [pd.Timedelta('1 days'), ... pd.Timedelta('2 days')]}) >>> df.quantile(0.5, numeric_only=False) A 1.5 B 2010-07-02 12:00:00 C 1 days 12:00:00 Name: 0.5, dtype: object """ self._check_percentile(q) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 if is_transposed: data = data.T result = data._data.quantile(qs=q, axis=1, interpolation=interpolation, transposed=is_transposed) if result.ndim == 2: result = self._constructor(result) else: result = self._constructor_sliced(result, name=q) if is_transposed: result = result.T return result def to_timestamp(self, freq=None, how='start', axis=0, copy=True): """ Cast to DatetimeIndex of timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with DatetimeIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how)) elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( ax=axis)) return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): """ Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- TimeSeries with PeriodIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( ax=axis)) return self._constructor(new_data) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dict The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dict, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. See Also -------- DataFrame.eq: Equality test for DataFrame. Series.isin: Equivalent method on Series. Series.str.contains: Test if pattern or regex is contained within a string of a Series or Index. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True When ``values`` is a Series or DataFrame the index and column must match. Note that 'falcon' does not match based on the number of legs in df2. >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]}, ... index=['spider', 'falcon']) >>> df.isin(other) num_legs num_wings falcon True True dog False False """ if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are " "allowed to be passed to DataFrame.isin(), " "you passed a " "{0!r}".format(type(values).__name__)) return DataFrame( algorithms.isin(self.values.ravel(), values).reshape(self.shape), self.index, self.columns) # ---------------------------------------------------------------------- # Add plotting methods to DataFrame plot = CachedAccessor("plot", pandas.plotting.FramePlotMethods) hist = pandas.plotting.hist_frame boxplot = pandas.plotting.boxplot_frame sparse = CachedAccessor("sparse", SparseFrameAccessor) DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}, docs={ 'index': 'The index (row labels) of the DataFrame.', 'columns': 'The column labels of the DataFrame.'}) DataFrame._add_numeric_operations() DataFrame._add_series_or_dataframe_operations() ops.add_flex_arithmetic_methods(DataFrame) ops.add_special_arithmetic_methods(DataFrame) def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() for index, s in data.items(): for col, v in s.items(): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data def _put_str(s, space): return '{s}'.format(s=s)[:space].ljust(space)
from collections import OrderedDict import numpy as np from numpy import nan from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge import pandas.util.testing as tm @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) return DataFrame({'key1': key1, 'key2': key2, 'data': data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['key1', 'key2']) return DataFrame(np.random.randn(10, 3), index=index, columns=['j_one', 'j_two', 'j_three']) @pytest.fixture def left_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C'], Destination=['A', 'B', 'A', 'C', 'A'], Period=['AM', 'AM', 'IP', 'AM', 'OP'], TripPurp=['hbw', 'nhb', 'hbo', 'nhb', 'hbw'], Trips=[1987, 3647, 2470, 4296, 4444]), columns=['Origin', 'Destination', 'Period', 'TripPurp', 'Trips']) .set_index(['Origin', 'Destination', 'Period', 'TripPurp'])) @pytest.fixture def right_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C', 'C', 'E'], Destination=['A', 'B', 'A', 'B', 'A', 'B', 'F'], Period=['AM', 'AM', 'IP', 'AM', 'OP', 'IP', 'AM'], LinkType=['a', 'b', 'c', 'b', 'a', 'b', 'a'], Distance=[100, 80, 90, 80, 75, 35, 55]), columns=['Origin', 'Destination', 'Period', 'LinkType', 'Distance']) .set_index(['Origin', 'Destination', 'Period', 'LinkType'])) @pytest.fixture def on_cols_multi(): return ['Origin', 'Destination', 'Period'] @pytest.fixture def idx_cols_multi(): return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType'] class TestMergeMulti: def setup_method(self): self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.to_join = DataFrame(np.random.randn(10, 3), index=self.index, columns=['j_one', 'j_two', 'j_three']) # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) self.data = DataFrame({'key1': key1, 'key2': key2, 'data': data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ['key1', 'key2'] result = (left.join(right, on=on_cols, how=join_type) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = (left.join(right, on=on_cols, how=join_type, sort=True) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type, sort=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ['1st', '2nd', '3rd'] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord('a') return (f(df['1st']) + f(df['3rd']) * 1e2 + df['2nd'].fillna(0) * 1e4) def run_asserts(left, right, sort): res = left.join(right, on=icols, how='left', sort=sort) assert len(left) < len(res) + 1 assert not res['4th'].isna().any() assert not res['5th'].isna().any() tm.assert_series_equal( res['4th'], - res['5th'], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res['4th'], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal( res, res.sort_values(icols, kind='mergesort')) out = merge(left, right.reset_index(), on=icols, sort=sort, how='left') res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=['1st', '3rd']) left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left['4th'] = bind_cols(left) right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, '1st'] = np.nan left.loc[2::37, '2nd'] = np.nan left.loc[3::43, '3rd'] = np.nan left['4th'] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ['key1', 'key2'] merged_left_right = left.merge(right, left_on=on_cols, right_index=True, how='left', sort=sort) merge_right_left = right.merge(left, right_on=on_cols, left_index=True, how='right', sort=sort) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({'key1': key1, 'key2': key2, 'value1': np.random.randn(20000)}) df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2], 'value2': np.random.randn(10000)}) # just to hit the label compression code path merge(df, df2, how='outer') def test_left_join_index_preserve_order(self): on_cols = ['k1', 'k2'] left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'v': np.array(np.arange(24), dtype=np.int64)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind='mergesort', inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'k3': np.array([0, 1, 2] * 8, dtype=np.float32), 'v': np.array(np.arange(24), dtype=np.int32)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind='mergesort') expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame([ ['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]) right = (DataFrame([ ['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]], columns=['col1', 'col2', 'col3', 'val']) .set_index(['col1', 'col2', 'col3'])) result = left.join(right, on=['cola', 'colb', 'colc'], how='left') expected = DataFrame([ ['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', nan], ['X', 'Y', 'A', 'c', 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]) tm.assert_frame_equal(result, expected) result = left.join(right, on=['cola', 'colb', 'colc'], how='left', sort=True) expected = expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort') tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame([ ['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'], index=[2, 0, 1, 3]) right = (DataFrame([ ['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char']) .set_index('tag')) result = left.join(right, on='tag', how='left') expected = DataFrame([ ['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, nan]], columns=['tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3]) tm.assert_frame_equal(result, expected) result = left.join(right, on='tag', how='left', sort=True) expected2 = expected.sort_values('tag', kind='mergesort') tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame({'id': list('abcde'), 'v1': randn(5), 'v2': randn(5), 'dummy': list('abcde'), 'v3': randn(5)}, columns=['id', 'v1', 'v2', 'dummy', 'v3']) right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]}) result = merge(left, right, on='id', how='left') rdf = right.drop(['id'], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [[1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.], [1950, "C", 4.], [1960, "C", np.nan], [1965, "C", 3.], [1970, "C", 4.]] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [[1960, 'A', np.nan], [1970, 'A', np.nan], [1955, 'A', np.nan], [1965, 'A', np.nan], [1965, 'B', np.nan], [1955, 'C', np.nan]] other = DataFrame(other_data, columns=['year', 'panel', 'data']) result = frame.merge(other, how='outer') expected = frame.fillna(-999).merge(other.fillna(-999), how='outer') expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = ( DataFrame( dict(household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750]), columns=['household_id', 'male', 'wealth']) .set_index('household_id')) portfolio = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], name=["ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'name', 'share']) .set_index(['household_id', 'asset_id'])) result = household.join(portfolio, how='inner') expected = ( DataFrame( dict(male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=['ABN Amro', 'Robeco', 'Royal Dutch Shell', 'Royal Dutch Shell', 'AAB Eastern Europe Equity Fund', 'Postbank BioTech Fonds'], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29', 'gb00b03mlx29', 'lu0197800237', 'nl0000289965'])) .set_index(['household_id', 'asset_id']) .reindex(columns=['male', 'wealth', 'name', 'share'])) tm.assert_frame_equal(result, expected) # equivalency result = (merge(household.reset_index(), portfolio.reset_index(), on=['household_id'], how='inner') .set_index(['household_id', 'asset_id'])) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how='outer') expected = (concat([ expected, (DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) ], axis=0, sort=True).reindex(columns=expected.columns)) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = 'foo' with pytest.raises(ValueError): household.join(portfolio, how='inner') portfolio2 = portfolio.copy() portfolio2.index.set_names(['household_id', 'foo']) with pytest.raises(ValueError): portfolio2.join(portfolio, how='inner') def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'share']) .set_index(['household_id', 'asset_id'])) log_return = DataFrame(dict( asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 180, 181], log_return=[.09604978, -.06524096, .03532373, .03025441, .036997] )).set_index(["asset_id", "t"]) expected = ( DataFrame(dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) # this is the equivalency result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='inner') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) expected = ( DataFrame(dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0], log_return=[None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='outer') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): # Multi-index join tests expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi). set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi) .set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=index_left) index_right = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')], names=['key', 'Y']) right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index_right) result = left.join(right) expected = (pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner') .set_index(['key', 'X', 'Y'])) tm.assert_frame_equal(result, expected)
cbertinato/pandas
pandas/tests/reshape/merge/test_multi.py
pandas/core/frame.py
""" manage PyTables query interface via Expressions """ import ast from functools import partial import numpy as np from pandas._libs.tslibs import Timedelta, Timestamp from pandas.compat.chainmap import DeepChainMap from pandas.core.dtypes.common import is_list_like import pandas as pd from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation import expr, ops from pandas.core.computation.common import _ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import UndefinedVariableError, is_term from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded class Scope(expr.Scope): __slots__ = 'queryables', def __init__(self, level, global_dict=None, local_dict=None, queryables=None): super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) self.queryables = queryables or dict() class Term(ops.Term): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls supr_new = StringMixin.__new__ return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): super().__init__(name, env, side=side, encoding=encoding) def _resolve_name(self): # must be a queryables if self.side == 'left': if self.name not in self.env.queryables: raise NameError('name {name!r} is not defined' .format(name=self.name)) return self.name # resolve the rhs (and allow it to be None) try: return self.env.resolve(self.name, is_local=False) except UndefinedVariableError: return self.name # read-only property overwriting read/write property @property # type: ignore def value(self): return self._value class Constant(Term): def __init__(self, value, env, side=None, encoding=None): super().__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name class BinOp(ops.BinOp): _max_selectors = 31 def __init__(self, op, lhs, rhs, queryables, encoding): super().__init__(op, lhs, rhs) self.queryables = queryables self.encoding = encoding self.filter = None self.condition = None def _disallow_scalar_only_bool_ops(self): pass def prune(self, klass): def pr(left, right): """ create and return a new specialized BinOp from myself """ if left is None: return right elif right is None: return left k = klass if isinstance(left, ConditionBinOp): if (isinstance(left, ConditionBinOp) and isinstance(right, ConditionBinOp)): k = JointConditionBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right elif isinstance(left, FilterBinOp): if (isinstance(left, FilterBinOp) and isinstance(right, FilterBinOp)): k = JointFilterBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right return k(self.op, left, right, queryables=self.queryables, encoding=self.encoding).evaluate() left, right = self.lhs, self.rhs if is_term(left) and is_term(right): res = pr(left.value, right.value) elif not is_term(left) and is_term(right): res = pr(left.prune(klass), right.value) elif is_term(left) and not is_term(right): res = pr(left.value, right.prune(klass)) elif not (is_term(left) or is_term(right)): res = pr(left.prune(klass), right.prune(klass)) return res def conform(self, rhs): """ inplace conform rhs """ if not is_list_like(rhs): rhs = [rhs] if isinstance(rhs, np.ndarray): rhs = rhs.ravel() return rhs @property def is_valid(self): """ return True if this is a valid field """ return self.lhs in self.queryables @property def is_in_table(self): """ return True if this is a valid column name for generation (e.g. an actual column in the table) """ return self.queryables.get(self.lhs) is not None @property def kind(self): """ the kind of my field """ return getattr(self.queryables.get(self.lhs), 'kind', None) @property def meta(self): """ the meta of my field """ return getattr(self.queryables.get(self.lhs), 'meta', None) @property def metadata(self): """ the metadata of my field """ return getattr(self.queryables.get(self.lhs), 'metadata', None) def generate(self, v): """ create and return the op string for this TermValue """ val = v.tostring(self.encoding) return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val) def convert_value(self, v): """ convert the expression that is in the term to something that is accepted by pytables """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) kind = _ensure_decoded(self.kind) meta = _ensure_decoded(self.meta) if kind == 'datetime64' or kind == 'datetime': if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == 'timedelta64' or kind == 'timedelta': v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == 'category': metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata # check that metadata contains v if not result and v not in metadata: result = -1 return TermValue(result, result, 'integer') elif kind == 'integer': v = int(float(v)) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = not v.strip().lower() in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): # string quoting return TermValue(v, stringify(v), 'string') else: raise TypeError("Cannot compare {v} of type {typ} to {kind} column" .format(v=v, typ=type(v), kind=kind)) def convert_values(self): pass class FilterBinOp(BinOp): def __str__(self): return pprint_thing("[Filter : [{lhs}] -> [{op}]" .format(lhs=self.filter[0], op=self.filter[1])) def invert(self): """ invert the filter """ if self.filter is not None: f = list(self.filter) f[1] = self.generate_filter_op(invert=True) self.filter = tuple(f) return self def format(self): """ return the actual filter format """ return [self.filter] def evaluate(self): if not self.is_valid: raise ValueError("query term is not valid [{slf}]" .format(slf=self)) rhs = self.conform(self.rhs) values = [TermValue(v, v, self.kind).value for v in rhs] if self.is_in_table: # if too many values to create the expression, use a filter instead if self.op in ['==', '!='] and len(values) > self._max_selectors: filter_op = self.generate_filter_op() self.filter = ( self.lhs, filter_op, pd.Index(values)) return self return None # equality conditions if self.op in ['==', '!=']: filter_op = self.generate_filter_op() self.filter = ( self.lhs, filter_op, pd.Index(values)) else: raise TypeError("passing a filterable condition to a non-table " "indexer [{slf}]".format(slf=self)) return self def generate_filter_op(self, invert=False): if (self.op == '!=' and not invert) or (self.op == '==' and invert): return lambda axis, vals: ~axis.isin(vals) else: return lambda axis, vals: axis.isin(vals) class JointFilterBinOp(FilterBinOp): def format(self): raise NotImplementedError("unable to collapse Joint Filters") def evaluate(self): return self class ConditionBinOp(BinOp): def __str__(self): return pprint_thing("[Condition : [{cond}]]" .format(cond=self.condition)) def invert(self): """ invert the condition """ # if self.condition is not None: # self.condition = "~(%s)" % self.condition # return self raise NotImplementedError("cannot use an invert condition when " "passing to numexpr") def format(self): """ return the actual ne format """ return self.condition def evaluate(self): if not self.is_valid: raise ValueError("query term is not valid [{slf}]" .format(slf=self)) # convert values if we are in the table if not self.is_in_table: return None rhs = self.conform(self.rhs) values = [self.convert_value(v) for v in rhs] # equality conditions if self.op in ['==', '!=']: # too many values to create the expression? if len(values) <= self._max_selectors: vs = [self.generate(v) for v in values] self.condition = "({cond})".format(cond=' | '.join(vs)) # use a filter after reading else: return None else: self.condition = self.generate(values[0]) return self class JointConditionBinOp(ConditionBinOp): def evaluate(self): self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition, op=self.op, rhs=self.rhs.condition) return self class UnaryOp(ops.UnaryOp): def prune(self, klass): if self.op != '~': raise NotImplementedError("UnaryOp only support invert type ops") operand = self.operand operand = operand.prune(klass) if operand is not None: if issubclass(klass, ConditionBinOp): if operand.condition is not None: return operand.invert() elif issubclass(klass, FilterBinOp): if operand.filter is not None: return operand.invert() return None _op_classes = {'unary': UnaryOp} class ExprVisitor(BaseExprVisitor): const_type = Constant term_type = Term def __init__(self, env, engine, parser, **kwargs): super().__init__(env, engine, parser) for bin_op in self.binary_ops: bin_node = self.binary_op_nodes_map[bin_op] setattr(self, 'visit_{node}'.format(node=bin_node), lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs)) def visit_UnaryOp(self, node, **kwargs): if isinstance(node.op, (ast.Not, ast.Invert)): return UnaryOp('~', self.visit(node.operand)) elif isinstance(node.op, ast.USub): return self.const_type(-self.visit(node.operand).value, self.env) elif isinstance(node.op, ast.UAdd): raise NotImplementedError('Unary addition not supported') def visit_Index(self, node, **kwargs): return self.visit(node.value).value def visit_Assign(self, node, **kwargs): cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]) return self.visit(cmpr) def visit_Subscript(self, node, **kwargs): # only allow simple subscripts value = self.visit(node.value) slobj = self.visit(node.slice) try: value = value.value except AttributeError: pass try: return self.const_type(value[slobj], self.env) except TypeError: raise ValueError("cannot subscript {value!r} with " "{slobj!r}".format(value=value, slobj=slobj)) def visit_Attribute(self, node, **kwargs): attr = node.attr value = node.value ctx = node.ctx.__class__ if ctx == ast.Load: # resolve the value resolved = self.visit(value) # try to get the value to see if we are another expression try: resolved = resolved.value except (AttributeError): pass try: return self.term_type(getattr(resolved, attr), self.env) except AttributeError: # something like datetime.datetime where scope is overridden if isinstance(value, ast.Name) and value.id == attr: return resolved raise ValueError("Invalid Attribute context {name}" .format(name=ctx.__name__)) def translate_In(self, op): return ast.Eq() if isinstance(op, ast.In) else op def _rewrite_membership_op(self, node, left, right): return self.visit(node.op), node.op, left, right def _validate_where(w): """ Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). """ if not (isinstance(w, (Expr, str)) or is_list_like(w)): raise TypeError("where must be passed as a string, Expr, " "or list-like of Exprs") return w class Expr(expr.Expr): """ hold a pytables like expression, comprised of possibly multiple 'terms' Parameters ---------- where : string term expression, Expr, or list-like of Exprs queryables : a "kinds" map (dict of column name -> kind), or None if column is non-indexable encoding : an encoding that will encode the query terms Returns ------- an Expr object Examples -------- 'index>=date' "columns=['A', 'D']" 'columns=A' 'columns==A' "~(columns=['A','B'])" 'index>df.index[3] & string="bar"' '(index>df.index[3] & index<=df.index[6]) | string="bar"' "ts>=Timestamp('2012-02-01')" "major_axis>=20130101" """ def __init__(self, where, queryables=None, encoding=None, scope_level=0): where = _validate_where(where) self.encoding = encoding self.condition = None self.filter = None self.terms = None self._visitor = None # capture the environment if needed local_dict = DeepChainMap() if isinstance(where, Expr): local_dict = where.env.scope where = where.expr elif isinstance(where, (list, tuple)): for idx, w in enumerate(where): if isinstance(w, Expr): local_dict = w.env.scope else: w = _validate_where(w) where[idx] = w where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa self.expr = where self.env = Scope(scope_level + 1, local_dict=local_dict) if queryables is not None and isinstance(self.expr, str): self.env.queryables.update(queryables) self._visitor = ExprVisitor(self.env, queryables=queryables, parser='pytables', engine='pytables', encoding=encoding) self.terms = self.parse() def __str__(self): if self.terms is not None: return pprint_thing(self.terms) return pprint_thing(self.expr) def evaluate(self): """ create and return the numexpr condition and filter """ try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid condition".format(expr=self.expr, slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid filter".format(expr=self.expr, slf=self)) return self.condition, self.filter class TermValue: """ hold a term value the we use to construct a condition/filter """ def __init__(self, value, converted, kind): self.value = value self.converted = converted self.kind = kind def tostring(self, encoding): """ quote the string if not encoded else encode and return """ if self.kind == 'string': if encoding is not None: return self.converted return '"{converted}"'.format(converted=self.converted) elif self.kind == 'float': # python 2 str(float) is not always # round-trippable so use repr() return repr(self.converted) return self.converted def maybe_expression(s): """ loose checking if s is a pytables-acceptable expression """ if not isinstance(s, str): return False ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',) # make sure we have an op at least return any(op in s for op in ops)
from collections import OrderedDict import numpy as np from numpy import nan from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge import pandas.util.testing as tm @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) return DataFrame({'key1': key1, 'key2': key2, 'data': data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['key1', 'key2']) return DataFrame(np.random.randn(10, 3), index=index, columns=['j_one', 'j_two', 'j_three']) @pytest.fixture def left_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C'], Destination=['A', 'B', 'A', 'C', 'A'], Period=['AM', 'AM', 'IP', 'AM', 'OP'], TripPurp=['hbw', 'nhb', 'hbo', 'nhb', 'hbw'], Trips=[1987, 3647, 2470, 4296, 4444]), columns=['Origin', 'Destination', 'Period', 'TripPurp', 'Trips']) .set_index(['Origin', 'Destination', 'Period', 'TripPurp'])) @pytest.fixture def right_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C', 'C', 'E'], Destination=['A', 'B', 'A', 'B', 'A', 'B', 'F'], Period=['AM', 'AM', 'IP', 'AM', 'OP', 'IP', 'AM'], LinkType=['a', 'b', 'c', 'b', 'a', 'b', 'a'], Distance=[100, 80, 90, 80, 75, 35, 55]), columns=['Origin', 'Destination', 'Period', 'LinkType', 'Distance']) .set_index(['Origin', 'Destination', 'Period', 'LinkType'])) @pytest.fixture def on_cols_multi(): return ['Origin', 'Destination', 'Period'] @pytest.fixture def idx_cols_multi(): return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType'] class TestMergeMulti: def setup_method(self): self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.to_join = DataFrame(np.random.randn(10, 3), index=self.index, columns=['j_one', 'j_two', 'j_three']) # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) self.data = DataFrame({'key1': key1, 'key2': key2, 'data': data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ['key1', 'key2'] result = (left.join(right, on=on_cols, how=join_type) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = (left.join(right, on=on_cols, how=join_type, sort=True) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type, sort=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ['1st', '2nd', '3rd'] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord('a') return (f(df['1st']) + f(df['3rd']) * 1e2 + df['2nd'].fillna(0) * 1e4) def run_asserts(left, right, sort): res = left.join(right, on=icols, how='left', sort=sort) assert len(left) < len(res) + 1 assert not res['4th'].isna().any() assert not res['5th'].isna().any() tm.assert_series_equal( res['4th'], - res['5th'], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res['4th'], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal( res, res.sort_values(icols, kind='mergesort')) out = merge(left, right.reset_index(), on=icols, sort=sort, how='left') res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=['1st', '3rd']) left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left['4th'] = bind_cols(left) right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, '1st'] = np.nan left.loc[2::37, '2nd'] = np.nan left.loc[3::43, '3rd'] = np.nan left['4th'] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ['key1', 'key2'] merged_left_right = left.merge(right, left_on=on_cols, right_index=True, how='left', sort=sort) merge_right_left = right.merge(left, right_on=on_cols, left_index=True, how='right', sort=sort) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({'key1': key1, 'key2': key2, 'value1': np.random.randn(20000)}) df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2], 'value2': np.random.randn(10000)}) # just to hit the label compression code path merge(df, df2, how='outer') def test_left_join_index_preserve_order(self): on_cols = ['k1', 'k2'] left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'v': np.array(np.arange(24), dtype=np.int64)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind='mergesort', inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'k3': np.array([0, 1, 2] * 8, dtype=np.float32), 'v': np.array(np.arange(24), dtype=np.int32)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind='mergesort') expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame([ ['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]) right = (DataFrame([ ['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]], columns=['col1', 'col2', 'col3', 'val']) .set_index(['col1', 'col2', 'col3'])) result = left.join(right, on=['cola', 'colb', 'colc'], how='left') expected = DataFrame([ ['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', nan], ['X', 'Y', 'A', 'c', 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]) tm.assert_frame_equal(result, expected) result = left.join(right, on=['cola', 'colb', 'colc'], how='left', sort=True) expected = expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort') tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame([ ['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'], index=[2, 0, 1, 3]) right = (DataFrame([ ['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char']) .set_index('tag')) result = left.join(right, on='tag', how='left') expected = DataFrame([ ['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, nan]], columns=['tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3]) tm.assert_frame_equal(result, expected) result = left.join(right, on='tag', how='left', sort=True) expected2 = expected.sort_values('tag', kind='mergesort') tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame({'id': list('abcde'), 'v1': randn(5), 'v2': randn(5), 'dummy': list('abcde'), 'v3': randn(5)}, columns=['id', 'v1', 'v2', 'dummy', 'v3']) right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]}) result = merge(left, right, on='id', how='left') rdf = right.drop(['id'], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [[1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.], [1950, "C", 4.], [1960, "C", np.nan], [1965, "C", 3.], [1970, "C", 4.]] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [[1960, 'A', np.nan], [1970, 'A', np.nan], [1955, 'A', np.nan], [1965, 'A', np.nan], [1965, 'B', np.nan], [1955, 'C', np.nan]] other = DataFrame(other_data, columns=['year', 'panel', 'data']) result = frame.merge(other, how='outer') expected = frame.fillna(-999).merge(other.fillna(-999), how='outer') expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = ( DataFrame( dict(household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750]), columns=['household_id', 'male', 'wealth']) .set_index('household_id')) portfolio = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], name=["ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'name', 'share']) .set_index(['household_id', 'asset_id'])) result = household.join(portfolio, how='inner') expected = ( DataFrame( dict(male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=['ABN Amro', 'Robeco', 'Royal Dutch Shell', 'Royal Dutch Shell', 'AAB Eastern Europe Equity Fund', 'Postbank BioTech Fonds'], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29', 'gb00b03mlx29', 'lu0197800237', 'nl0000289965'])) .set_index(['household_id', 'asset_id']) .reindex(columns=['male', 'wealth', 'name', 'share'])) tm.assert_frame_equal(result, expected) # equivalency result = (merge(household.reset_index(), portfolio.reset_index(), on=['household_id'], how='inner') .set_index(['household_id', 'asset_id'])) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how='outer') expected = (concat([ expected, (DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) ], axis=0, sort=True).reindex(columns=expected.columns)) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = 'foo' with pytest.raises(ValueError): household.join(portfolio, how='inner') portfolio2 = portfolio.copy() portfolio2.index.set_names(['household_id', 'foo']) with pytest.raises(ValueError): portfolio2.join(portfolio, how='inner') def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'share']) .set_index(['household_id', 'asset_id'])) log_return = DataFrame(dict( asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 180, 181], log_return=[.09604978, -.06524096, .03532373, .03025441, .036997] )).set_index(["asset_id", "t"]) expected = ( DataFrame(dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) # this is the equivalency result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='inner') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) expected = ( DataFrame(dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0], log_return=[None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='outer') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): # Multi-index join tests expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi). set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi) .set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=index_left) index_right = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')], names=['key', 'Y']) right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index_right) result = left.join(right) expected = (pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner') .set_index(['key', 'X', 'Y'])) tm.assert_frame_equal(result, expected)
cbertinato/pandas
pandas/tests/reshape/merge/test_multi.py
pandas/core/computation/pytables.py
import numpy as np import pytest import pandas as pd from pandas import Index, MultiIndex @pytest.fixture def idx(): # a MultiIndex used to test the general functionality of the # general functionality of this object major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_codes = np.array([0, 0, 1, 2, 3, 3]) minor_codes = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] mi = MultiIndex(levels=[major_axis, minor_axis], codes=[major_codes, minor_codes], names=index_names, verify_integrity=False) return mi @pytest.fixture def idx_dup(): # compare tests/indexes/multi/conftest.py major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_codes = np.array([0, 0, 1, 0, 1, 1]) minor_codes = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] mi = MultiIndex(levels=[major_axis, minor_axis], codes=[major_codes, minor_codes], names=index_names, verify_integrity=False) return mi @pytest.fixture def index_names(): # names that match those in the idx fixture for testing equality of # names assigned to the idx return ['first', 'second'] @pytest.fixture def holder(): # the MultiIndex constructor used to base compatibility with pickle return MultiIndex @pytest.fixture def compat_props(): # a MultiIndex must have these properties associated with it return ['shape', 'ndim', 'size'] @pytest.fixture def narrow_multi_index(): """ Return a MultiIndex that is narrower than the display (<80 characters). """ n = 1000 ci = pd.CategoricalIndex(list('a' * n) + (['abc'] * n)) dti = pd.date_range('2000-01-01', freq='s', periods=n * 2) return pd.MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=['a', 'b', 'dti']) @pytest.fixture def wide_multi_index(): """ Return a MultiIndex that is wider than the display (>80 characters). """ n = 1000 ci = pd.CategoricalIndex(list('a' * n) + (['abc'] * n)) dti = pd.date_range('2000-01-01', freq='s', periods=n * 2) levels = [ci, ci.codes + 9, dti, dti, dti] names = ['a', 'b', 'dti_1', 'dti_2', 'dti_3'] return pd.MultiIndex.from_arrays(levels, names=names)
from collections import OrderedDict import numpy as np from numpy import nan from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge import pandas.util.testing as tm @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) return DataFrame({'key1': key1, 'key2': key2, 'data': data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['key1', 'key2']) return DataFrame(np.random.randn(10, 3), index=index, columns=['j_one', 'j_two', 'j_three']) @pytest.fixture def left_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C'], Destination=['A', 'B', 'A', 'C', 'A'], Period=['AM', 'AM', 'IP', 'AM', 'OP'], TripPurp=['hbw', 'nhb', 'hbo', 'nhb', 'hbw'], Trips=[1987, 3647, 2470, 4296, 4444]), columns=['Origin', 'Destination', 'Period', 'TripPurp', 'Trips']) .set_index(['Origin', 'Destination', 'Period', 'TripPurp'])) @pytest.fixture def right_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C', 'C', 'E'], Destination=['A', 'B', 'A', 'B', 'A', 'B', 'F'], Period=['AM', 'AM', 'IP', 'AM', 'OP', 'IP', 'AM'], LinkType=['a', 'b', 'c', 'b', 'a', 'b', 'a'], Distance=[100, 80, 90, 80, 75, 35, 55]), columns=['Origin', 'Destination', 'Period', 'LinkType', 'Distance']) .set_index(['Origin', 'Destination', 'Period', 'LinkType'])) @pytest.fixture def on_cols_multi(): return ['Origin', 'Destination', 'Period'] @pytest.fixture def idx_cols_multi(): return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType'] class TestMergeMulti: def setup_method(self): self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.to_join = DataFrame(np.random.randn(10, 3), index=self.index, columns=['j_one', 'j_two', 'j_three']) # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) self.data = DataFrame({'key1': key1, 'key2': key2, 'data': data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ['key1', 'key2'] result = (left.join(right, on=on_cols, how=join_type) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = (left.join(right, on=on_cols, how=join_type, sort=True) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type, sort=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ['1st', '2nd', '3rd'] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord('a') return (f(df['1st']) + f(df['3rd']) * 1e2 + df['2nd'].fillna(0) * 1e4) def run_asserts(left, right, sort): res = left.join(right, on=icols, how='left', sort=sort) assert len(left) < len(res) + 1 assert not res['4th'].isna().any() assert not res['5th'].isna().any() tm.assert_series_equal( res['4th'], - res['5th'], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res['4th'], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal( res, res.sort_values(icols, kind='mergesort')) out = merge(left, right.reset_index(), on=icols, sort=sort, how='left') res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=['1st', '3rd']) left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left['4th'] = bind_cols(left) right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, '1st'] = np.nan left.loc[2::37, '2nd'] = np.nan left.loc[3::43, '3rd'] = np.nan left['4th'] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ['key1', 'key2'] merged_left_right = left.merge(right, left_on=on_cols, right_index=True, how='left', sort=sort) merge_right_left = right.merge(left, right_on=on_cols, left_index=True, how='right', sort=sort) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({'key1': key1, 'key2': key2, 'value1': np.random.randn(20000)}) df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2], 'value2': np.random.randn(10000)}) # just to hit the label compression code path merge(df, df2, how='outer') def test_left_join_index_preserve_order(self): on_cols = ['k1', 'k2'] left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'v': np.array(np.arange(24), dtype=np.int64)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind='mergesort', inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'k3': np.array([0, 1, 2] * 8, dtype=np.float32), 'v': np.array(np.arange(24), dtype=np.int32)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind='mergesort') expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame([ ['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]) right = (DataFrame([ ['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]], columns=['col1', 'col2', 'col3', 'val']) .set_index(['col1', 'col2', 'col3'])) result = left.join(right, on=['cola', 'colb', 'colc'], how='left') expected = DataFrame([ ['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', nan], ['X', 'Y', 'A', 'c', 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]) tm.assert_frame_equal(result, expected) result = left.join(right, on=['cola', 'colb', 'colc'], how='left', sort=True) expected = expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort') tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame([ ['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'], index=[2, 0, 1, 3]) right = (DataFrame([ ['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char']) .set_index('tag')) result = left.join(right, on='tag', how='left') expected = DataFrame([ ['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, nan]], columns=['tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3]) tm.assert_frame_equal(result, expected) result = left.join(right, on='tag', how='left', sort=True) expected2 = expected.sort_values('tag', kind='mergesort') tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame({'id': list('abcde'), 'v1': randn(5), 'v2': randn(5), 'dummy': list('abcde'), 'v3': randn(5)}, columns=['id', 'v1', 'v2', 'dummy', 'v3']) right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]}) result = merge(left, right, on='id', how='left') rdf = right.drop(['id'], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [[1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.], [1950, "C", 4.], [1960, "C", np.nan], [1965, "C", 3.], [1970, "C", 4.]] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [[1960, 'A', np.nan], [1970, 'A', np.nan], [1955, 'A', np.nan], [1965, 'A', np.nan], [1965, 'B', np.nan], [1955, 'C', np.nan]] other = DataFrame(other_data, columns=['year', 'panel', 'data']) result = frame.merge(other, how='outer') expected = frame.fillna(-999).merge(other.fillna(-999), how='outer') expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = ( DataFrame( dict(household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750]), columns=['household_id', 'male', 'wealth']) .set_index('household_id')) portfolio = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], name=["ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'name', 'share']) .set_index(['household_id', 'asset_id'])) result = household.join(portfolio, how='inner') expected = ( DataFrame( dict(male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=['ABN Amro', 'Robeco', 'Royal Dutch Shell', 'Royal Dutch Shell', 'AAB Eastern Europe Equity Fund', 'Postbank BioTech Fonds'], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29', 'gb00b03mlx29', 'lu0197800237', 'nl0000289965'])) .set_index(['household_id', 'asset_id']) .reindex(columns=['male', 'wealth', 'name', 'share'])) tm.assert_frame_equal(result, expected) # equivalency result = (merge(household.reset_index(), portfolio.reset_index(), on=['household_id'], how='inner') .set_index(['household_id', 'asset_id'])) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how='outer') expected = (concat([ expected, (DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) ], axis=0, sort=True).reindex(columns=expected.columns)) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = 'foo' with pytest.raises(ValueError): household.join(portfolio, how='inner') portfolio2 = portfolio.copy() portfolio2.index.set_names(['household_id', 'foo']) with pytest.raises(ValueError): portfolio2.join(portfolio, how='inner') def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'share']) .set_index(['household_id', 'asset_id'])) log_return = DataFrame(dict( asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 180, 181], log_return=[.09604978, -.06524096, .03532373, .03025441, .036997] )).set_index(["asset_id", "t"]) expected = ( DataFrame(dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) # this is the equivalency result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='inner') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) expected = ( DataFrame(dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0], log_return=[None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='outer') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): # Multi-index join tests expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi). set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi) .set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=index_left) index_right = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')], names=['key', 'Y']) right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index_right) result = left.join(right) expected = (pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner') .set_index(['key', 'X', 'Y'])) tm.assert_frame_equal(result, expected)
cbertinato/pandas
pandas/tests/reshape/merge/test_multi.py
pandas/tests/indexes/multi/conftest.py
import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( CategoricalIndex, DatetimeIndex, Index, Int64Index, IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, isna) from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin import pandas.util.testing as tm class Base: """ base class for index sub-class tests """ _holder = None _compat_props = ['shape', 'ndim', 'size', 'nbytes'] def setup_indices(self): for name, idx in self.indices.items(): setattr(self, name, idx) def test_pickle_compat_construction(self): # need an object to create with msg = (r"Index\(\.\.\.\) must be called with a collection of some" r" kind, None was passed|" r"__new__\(\) missing 1 required positional argument: 'data'|" r"__new__\(\) takes at least 2 arguments \(1 given\)") with pytest.raises(TypeError, match=msg): self._holder() def test_to_series(self): # assert that we are creating a copy of the index idx = self.create_index() s = idx.to_series() assert s.values is not idx.values assert s.index is not idx assert s.name == idx.name def test_to_series_with_arguments(self): # GH18699 # index kwarg idx = self.create_index() s = idx.to_series(index=idx) assert s.values is not idx.values assert s.index is idx assert s.name == idx.name # name kwarg idx = self.create_index() s = idx.to_series(name='__test') assert s.values is not idx.values assert s.index is not idx assert s.name != idx.name @pytest.mark.parametrize("name", [None, "new_name"]) def test_to_frame(self, name): # see GH-15230, GH-22580 idx = self.create_index() if name: idx_name = name else: idx_name = idx.name or 0 df = idx.to_frame(name=idx_name) assert df.index is idx assert len(df.columns) == 1 assert df.columns[0] == idx_name assert df[idx_name].values is not idx.values df = idx.to_frame(index=False, name=idx_name) assert df.index is not idx def test_to_frame_datetime_tz(self): # GH 25809 idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D') idx = idx.tz_localize('UTC') result = idx.to_frame() expected = pd.DataFrame(idx, index=idx) tm.assert_frame_equal(result, expected) def test_shift(self): # GH8083 test the base class for shift idx = self.create_index() msg = "Not supported for type {}".format(type(idx).__name__) with pytest.raises(NotImplementedError, match=msg): idx.shift(1) with pytest.raises(NotImplementedError, match=msg): idx.shift(1, 2) def test_create_index_existing_name(self): # GH11193, when an existing index is passed, and a new name is not # specified, the new index should inherit the previous object name expected = self.create_index() if not isinstance(expected, MultiIndex): expected.name = 'foo' result = pd.Index(expected) tm.assert_index_equal(result, expected) result = pd.Index(expected, name='bar') expected.name = 'bar' tm.assert_index_equal(result, expected) else: expected.names = ['foo', 'bar'] result = pd.Index(expected) tm.assert_index_equal( result, Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object'), names=['foo', 'bar'])) result = pd.Index(expected, names=['A', 'B']) tm.assert_index_equal( result, Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object'), names=['A', 'B'])) def test_numeric_compat(self): idx = self.create_index() with pytest.raises(TypeError, match="cannot perform __mul__"): idx * 1 with pytest.raises(TypeError, match="cannot perform __rmul__"): 1 * idx div_err = "cannot perform __truediv__" with pytest.raises(TypeError, match=div_err): idx / 1 div_err = div_err.replace(' __', ' __r') with pytest.raises(TypeError, match=div_err): 1 / idx with pytest.raises(TypeError, match="cannot perform __floordiv__"): idx // 1 with pytest.raises(TypeError, match="cannot perform __rfloordiv__"): 1 // idx def test_logical_compat(self): idx = self.create_index() with pytest.raises(TypeError, match='cannot perform all'): idx.all() with pytest.raises(TypeError, match='cannot perform any'): idx.any() def test_boolean_context_compat(self): # boolean context compat idx = self.create_index() with pytest.raises(ValueError, match='The truth value of a'): if idx: pass def test_reindex_base(self): idx = self.create_index() expected = np.arange(idx.size, dtype=np.intp) actual = idx.get_indexer(idx) tm.assert_numpy_array_equal(expected, actual) with pytest.raises(ValueError, match='Invalid fill method'): idx.get_indexer(idx, method='invalid') def test_get_indexer_consistency(self): # See GH 16819 for name, index in self.indices.items(): if isinstance(index, IntervalIndex): continue if index.is_unique or isinstance(index, CategoricalIndex): indexer = index.get_indexer(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp else: e = "Reindexing only valid with uniquely valued Index objects" with pytest.raises(InvalidIndexError, match=e): index.get_indexer(index[0:2]) indexer, _ = index.get_indexer_non_unique(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp def test_ndarray_compat_properties(self): idx = self.create_index() assert idx.T.equals(idx) assert idx.transpose().equals(idx) values = idx.values for prop in self._compat_props: assert getattr(idx, prop) == getattr(values, prop) # test for validity idx.nbytes idx.values.nbytes def test_repr_roundtrip(self): idx = self.create_index() tm.assert_index_equal(eval(repr(idx)), idx) def test_str(self): # test the string repr idx = self.create_index() idx.name = 'foo' assert "'foo'" in str(idx) assert idx.__class__.__name__ in str(idx) def test_repr_max_seq_item_setting(self): # GH10182 idx = self.create_index() idx = idx.repeat(50) with pd.option_context("display.max_seq_items", None): repr(idx) assert '...' not in str(idx) def test_copy_name(self): # gh-12309: Check that the "name" argument # passed at initialization is honored. for name, index in self.indices.items(): if isinstance(index, MultiIndex): continue first = index.__class__(index, copy=True, name='mario') second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second # Not using tm.assert_index_equal() since names differ. assert index.equals(first) assert first.name == 'mario' assert second.name == 'mario' s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) if not isinstance(index, CategoricalIndex): # See gh-13365 s3 = s1 * s2 assert s3.index.name == 'mario' def test_ensure_copied_data(self): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 for name, index in self.indices.items(): init_kwargs = {} if isinstance(index, PeriodIndex): # Needs "freq" specification: init_kwargs['freq'] = index.freq elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): # RangeIndex cannot be initialized from data # MultiIndex and CategoricalIndex are tested separately continue index_type = index.__class__ result = index_type(index.values, copy=True, **init_kwargs) tm.assert_index_equal(index, result) tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='copy') if isinstance(index, PeriodIndex): # .values an object array of Period, thus copied result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='same') elif isinstance(index, IntervalIndex): # checked in test_interval.py pass else: result = index_type(index.values, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index.values, result.values, check_same='same') tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='same') def test_memory_usage(self): for name, index in self.indices.items(): result = index.memory_usage() if len(index): index.get_loc(index[0]) result2 = index.memory_usage() result3 = index.memory_usage(deep=True) # RangeIndex, IntervalIndex # don't have engines if not isinstance(index, (RangeIndex, IntervalIndex)): assert result2 > result if index.inferred_type == 'object': assert result3 > result2 else: # we report 0 for no-length assert result == 0 def test_argsort(self): for k, ind in self.indices.items(): # separately tested if k in ['catIndex']: continue result = ind.argsort() expected = np.array(ind).argsort() tm.assert_numpy_array_equal(result, expected, check_dtype=False) def test_numpy_argsort(self): for k, ind in self.indices.items(): result = np.argsort(ind) expected = ind.argsort() tm.assert_numpy_array_equal(result, expected) # these are the only two types that perform # pandas compatibility input validation - the # rest already perform separate (or no) such # validation via their 'values' attribute as # defined in pandas.core.indexes/base.py - they # cannot be changed at the moment due to # backwards compatibility concerns if isinstance(type(ind), (CategoricalIndex, RangeIndex)): msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, axis=1) msg = "the 'kind' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, kind='mergesort') msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, order=('a', 'b')) def test_take(self): indexer = [4, 3, 0, 2] for k, ind in self.indices.items(): # separate if k in ['boolIndex', 'tuples', 'empty']: continue result = ind.take(indexer) expected = ind[indexer] assert result.equals(expected) if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # GH 10791 with pytest.raises(AttributeError): ind.freq def test_take_invalid_kwargs(self): idx = self.create_index() indices = [1, 2] msg = r"take\(\) got an unexpected keyword argument 'foo'" with pytest.raises(TypeError, match=msg): idx.take(indices, foo=2) msg = "the 'out' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, out=indices) msg = "the 'mode' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, mode='clip') def test_repeat(self): rep = 2 i = self.create_index() expected = pd.Index(i.values.repeat(rep), name=i.name) tm.assert_index_equal(i.repeat(rep), expected) i = self.create_index() rep = np.arange(len(i)) expected = pd.Index(i.values.repeat(rep), name=i.name) tm.assert_index_equal(i.repeat(rep), expected) def test_numpy_repeat(self): rep = 2 i = self.create_index() expected = i.repeat(rep) tm.assert_index_equal(np.repeat(i, rep), expected) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.repeat(i, rep, axis=0) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): i = self.create_index() cond = [True] * len(i) result = i.where(klass(cond)) expected = i tm.assert_index_equal(result, expected) cond = [False] + [True] * len(i[1:]) expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype) result = i.where(klass(cond)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("case", [0.5, "xxx"]) @pytest.mark.parametrize("method", ["intersection", "union", "difference", "symmetric_difference"]) def test_set_ops_error_cases(self, case, method): for name, idx in self.indices.items(): # non-iterable input msg = "Input must be Index or array-like" with pytest.raises(TypeError, match=msg): getattr(idx, method)(case) def test_intersection_base(self): for name, idx in self.indices.items(): first = idx[:5] second = idx[:3] intersect = first.intersection(second) if isinstance(idx, CategoricalIndex): pass else: assert tm.equalContents(intersect, second) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.intersection(case) assert tm.equalContents(result, second) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.intersection([1, 2, 3]) def test_union_base(self): for name, idx in self.indices.items(): first = idx[3:] second = idx[:5] everything = idx union = first.union(second) assert tm.equalContents(union, everything) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.union(case) assert tm.equalContents(result, everything) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.union([1, 2, 3]) @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): for name, idx in self.indices.items(): first = idx[2:] second = idx[:4] answer = idx[4:] result = first.difference(second, sort) if isinstance(idx, CategoricalIndex): pass else: assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): assert result.__class__ == answer.__class__ tm.assert_numpy_array_equal(result.sort_values().asi8, answer.sort_values().asi8) else: result = first.difference(case, sort) assert tm.equalContents(result, answer) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.difference([1, 2, 3], sort) def test_symmetric_difference(self): for name, idx in self.indices.items(): first = idx[1:] second = idx[:-1] if isinstance(idx, CategoricalIndex): pass else: answer = idx[[0, -1]] result = first.symmetric_difference(second) assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.symmetric_difference(case) assert tm.equalContents(result, answer) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.symmetric_difference([1, 2, 3]) def test_insert_base(self): for name, idx in self.indices.items(): result = idx[1:4] if not len(idx): continue # test 0th element assert idx[0:4].equals(result.insert(0, idx[0])) def test_delete_base(self): for name, idx in self.indices.items(): if not len(idx): continue if isinstance(idx, RangeIndex): # tested in class continue expected = idx[1:] result = idx.delete(0) assert result.equals(expected) assert result.name == expected.name expected = idx[:-1] result = idx.delete(-1) assert result.equals(expected) assert result.name == expected.name with pytest.raises((IndexError, ValueError)): # either depending on numpy version idx.delete(len(idx)) def test_equals(self): for name, idx in self.indices.items(): assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) assert not idx.equals(list(idx)) assert not idx.equals(np.array(idx)) # Cannot pass in non-int64 dtype to RangeIndex if not isinstance(idx, RangeIndex): same_values = Index(idx, dtype=object) assert idx.equals(same_values) assert same_values.equals(idx) if idx.nlevels == 1: # do not test MultiIndex assert not idx.equals(pd.Series(idx)) def test_equals_op(self): # GH9947, GH10637 index_a = self.create_index() if isinstance(index_a, PeriodIndex): pytest.skip('Skip check for PeriodIndex') n = len(index_a) index_b = index_a[0:-1] index_c = index_a[0:-1].append(index_a[-2:-1]) index_d = index_a[0:1] msg = "Lengths must match|could not be broadcast" with pytest.raises(ValueError, match=msg): index_a == index_b expected1 = np.array([True] * n) expected2 = np.array([True] * (n - 1) + [False]) tm.assert_numpy_array_equal(index_a == index_a, expected1) tm.assert_numpy_array_equal(index_a == index_c, expected2) # test comparisons with numpy arrays array_a = np.array(index_a) array_b = np.array(index_a[0:-1]) array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) array_d = np.array(index_a[0:1]) with pytest.raises(ValueError, match=msg): index_a == array_b tm.assert_numpy_array_equal(index_a == array_a, expected1) tm.assert_numpy_array_equal(index_a == array_c, expected2) # test comparisons with Series series_a = Series(array_a) series_b = Series(array_b) series_c = Series(array_c) series_d = Series(array_d) with pytest.raises(ValueError, match=msg): index_a == series_b tm.assert_numpy_array_equal(index_a == series_a, expected1) tm.assert_numpy_array_equal(index_a == series_c, expected2) # cases where length is 1 for one of them with pytest.raises(ValueError, match="Lengths must match"): index_a == index_d with pytest.raises(ValueError, match="Lengths must match"): index_a == series_d with pytest.raises(ValueError, match="Lengths must match"): index_a == array_d msg = "Can only compare identically-labeled Series objects" with pytest.raises(ValueError, match=msg): series_a == series_d with pytest.raises(ValueError, match="Lengths must match"): series_a == array_d # comparing with a scalar should broadcast; note that we are excluding # MultiIndex because in this case each item in the index is a tuple of # length 2, and therefore is considered an array of length 2 in the # comparison instead of a scalar if not isinstance(index_a, MultiIndex): expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) # assuming the 2nd to last item is unique in the data item = index_a[-2] tm.assert_numpy_array_equal(index_a == item, expected3) tm.assert_series_equal(series_a == item, Series(expected3)) def test_hasnans_isnans(self): # GH 11343, added tests for hasnans / isnans for name, index in self.indices.items(): if isinstance(index, MultiIndex): pass else: idx = index.copy() # cases in indices doesn't include NaN expected = np.array([False] * len(idx), dtype=bool) tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is False idx = index.copy() values = np.asarray(idx.values) if len(index) == 0: continue elif isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan if isinstance(index, PeriodIndex): idx = index.__class__(values, freq=index.freq) else: idx = index.__class__(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is True def test_fillna(self): # GH 11343 for name, index in self.indices.items(): if len(index) == 0: pass elif isinstance(index, MultiIndex): idx = index.copy() msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.fillna(idx[0]) else: idx = index.copy() result = idx.fillna(idx[0]) tm.assert_index_equal(result, idx) assert result is not idx msg = "'value' must be a scalar, passed: " with pytest.raises(TypeError, match=msg): idx.fillna([idx[0]]) idx = index.copy() values = np.asarray(idx.values) if isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan if isinstance(index, PeriodIndex): idx = index.__class__(values, freq=index.freq) else: idx = index.__class__(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is True def test_nulls(self): # this is really a smoke test for the methods # as these are adequately tested for function elsewhere for name, index in self.indices.items(): if len(index) == 0: tm.assert_numpy_array_equal( index.isna(), np.array([], dtype=bool)) elif isinstance(index, MultiIndex): idx = index.copy() msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.isna() else: if not index.hasnans: tm.assert_numpy_array_equal( index.isna(), np.zeros(len(index), dtype=bool)) tm.assert_numpy_array_equal( index.notna(), np.ones(len(index), dtype=bool)) else: result = isna(index) tm.assert_numpy_array_equal(index.isna(), result) tm.assert_numpy_array_equal(index.notna(), ~result) def test_empty(self): # GH 15270 index = self.create_index() assert not index.empty assert index[:0].empty def test_join_self_unique(self, join_type): index = self.create_index() if index.is_unique: joined = index.join(index, how=join_type) assert (index == joined).all() def test_map(self): # callable index = self.create_index() # we don't infer UInt64 if isinstance(index, pd.UInt64Index): expected = index.astype('int64') else: expected = index result = index.map(lambda x: x) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "mapper", [ lambda values, index: {i: e for e, i in zip(values, index)}, lambda values, index: pd.Series(values, index)]) def test_map_dictlike(self, mapper): index = self.create_index() if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)): pytest.skip("skipping tests for {}".format(type(index))) identity = mapper(index.values, index) # we don't infer to UInt64 for a dict if isinstance(index, pd.UInt64Index) and isinstance(identity, dict): expected = index.astype('int64') else: expected = index result = index.map(identity) tm.assert_index_equal(result, expected) # empty mappable expected = pd.Index([np.nan] * len(index)) result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) def test_putmask_with_wrong_mask(self): # GH18368 index = self.create_index() with pytest.raises(ValueError): index.putmask(np.ones(len(index) + 1, np.bool), 1) with pytest.raises(ValueError): index.putmask(np.ones(len(index) - 1, np.bool), 1) with pytest.raises(ValueError): index.putmask('foo', 1) @pytest.mark.parametrize('copy', [True, False]) @pytest.mark.parametrize('name', [None, 'foo']) @pytest.mark.parametrize('ordered', [True, False]) def test_astype_category(self, copy, name, ordered): # GH 18630 index = self.create_index() if name: index = index.rename(name) # standard categories dtype = CategoricalDtype(ordered=ordered) result = index.astype(dtype, copy=copy) expected = CategoricalIndex(index.values, name=name, ordered=ordered) tm.assert_index_equal(result, expected) # non-standard categories dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered) result = index.astype(dtype, copy=copy) expected = CategoricalIndex(index.values, name=name, dtype=dtype) tm.assert_index_equal(result, expected) if ordered is False: # dtype='category' defaults to ordered=False, so only test once result = index.astype('category', copy=copy) expected = CategoricalIndex(index.values, name=name) tm.assert_index_equal(result, expected) def test_is_unique(self): # initialize a unique index index = self.create_index().drop_duplicates() assert index.is_unique is True # empty index should be unique index_empty = index[:0] assert index_empty.is_unique is True # test basic dupes index_dup = index.insert(0, index[0]) assert index_dup.is_unique is False # single NA should be unique index_na = index.insert(0, np.nan) assert index_na.is_unique is True # multiple NA should not be unique index_na_dup = index_na.insert(0, np.nan) assert index_na_dup.is_unique is False
from collections import OrderedDict import numpy as np from numpy import nan from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge import pandas.util.testing as tm @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) return DataFrame({'key1': key1, 'key2': key2, 'data': data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['key1', 'key2']) return DataFrame(np.random.randn(10, 3), index=index, columns=['j_one', 'j_two', 'j_three']) @pytest.fixture def left_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C'], Destination=['A', 'B', 'A', 'C', 'A'], Period=['AM', 'AM', 'IP', 'AM', 'OP'], TripPurp=['hbw', 'nhb', 'hbo', 'nhb', 'hbw'], Trips=[1987, 3647, 2470, 4296, 4444]), columns=['Origin', 'Destination', 'Period', 'TripPurp', 'Trips']) .set_index(['Origin', 'Destination', 'Period', 'TripPurp'])) @pytest.fixture def right_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C', 'C', 'E'], Destination=['A', 'B', 'A', 'B', 'A', 'B', 'F'], Period=['AM', 'AM', 'IP', 'AM', 'OP', 'IP', 'AM'], LinkType=['a', 'b', 'c', 'b', 'a', 'b', 'a'], Distance=[100, 80, 90, 80, 75, 35, 55]), columns=['Origin', 'Destination', 'Period', 'LinkType', 'Distance']) .set_index(['Origin', 'Destination', 'Period', 'LinkType'])) @pytest.fixture def on_cols_multi(): return ['Origin', 'Destination', 'Period'] @pytest.fixture def idx_cols_multi(): return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType'] class TestMergeMulti: def setup_method(self): self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.to_join = DataFrame(np.random.randn(10, 3), index=self.index, columns=['j_one', 'j_two', 'j_three']) # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) self.data = DataFrame({'key1': key1, 'key2': key2, 'data': data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ['key1', 'key2'] result = (left.join(right, on=on_cols, how=join_type) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = (left.join(right, on=on_cols, how=join_type, sort=True) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type, sort=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ['1st', '2nd', '3rd'] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord('a') return (f(df['1st']) + f(df['3rd']) * 1e2 + df['2nd'].fillna(0) * 1e4) def run_asserts(left, right, sort): res = left.join(right, on=icols, how='left', sort=sort) assert len(left) < len(res) + 1 assert not res['4th'].isna().any() assert not res['5th'].isna().any() tm.assert_series_equal( res['4th'], - res['5th'], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res['4th'], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal( res, res.sort_values(icols, kind='mergesort')) out = merge(left, right.reset_index(), on=icols, sort=sort, how='left') res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=['1st', '3rd']) left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left['4th'] = bind_cols(left) right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, '1st'] = np.nan left.loc[2::37, '2nd'] = np.nan left.loc[3::43, '3rd'] = np.nan left['4th'] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ['key1', 'key2'] merged_left_right = left.merge(right, left_on=on_cols, right_index=True, how='left', sort=sort) merge_right_left = right.merge(left, right_on=on_cols, left_index=True, how='right', sort=sort) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({'key1': key1, 'key2': key2, 'value1': np.random.randn(20000)}) df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2], 'value2': np.random.randn(10000)}) # just to hit the label compression code path merge(df, df2, how='outer') def test_left_join_index_preserve_order(self): on_cols = ['k1', 'k2'] left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'v': np.array(np.arange(24), dtype=np.int64)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind='mergesort', inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'k3': np.array([0, 1, 2] * 8, dtype=np.float32), 'v': np.array(np.arange(24), dtype=np.int32)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind='mergesort') expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame([ ['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]) right = (DataFrame([ ['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]], columns=['col1', 'col2', 'col3', 'val']) .set_index(['col1', 'col2', 'col3'])) result = left.join(right, on=['cola', 'colb', 'colc'], how='left') expected = DataFrame([ ['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', nan], ['X', 'Y', 'A', 'c', 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]) tm.assert_frame_equal(result, expected) result = left.join(right, on=['cola', 'colb', 'colc'], how='left', sort=True) expected = expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort') tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame([ ['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'], index=[2, 0, 1, 3]) right = (DataFrame([ ['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char']) .set_index('tag')) result = left.join(right, on='tag', how='left') expected = DataFrame([ ['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, nan]], columns=['tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3]) tm.assert_frame_equal(result, expected) result = left.join(right, on='tag', how='left', sort=True) expected2 = expected.sort_values('tag', kind='mergesort') tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame({'id': list('abcde'), 'v1': randn(5), 'v2': randn(5), 'dummy': list('abcde'), 'v3': randn(5)}, columns=['id', 'v1', 'v2', 'dummy', 'v3']) right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]}) result = merge(left, right, on='id', how='left') rdf = right.drop(['id'], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [[1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.], [1950, "C", 4.], [1960, "C", np.nan], [1965, "C", 3.], [1970, "C", 4.]] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [[1960, 'A', np.nan], [1970, 'A', np.nan], [1955, 'A', np.nan], [1965, 'A', np.nan], [1965, 'B', np.nan], [1955, 'C', np.nan]] other = DataFrame(other_data, columns=['year', 'panel', 'data']) result = frame.merge(other, how='outer') expected = frame.fillna(-999).merge(other.fillna(-999), how='outer') expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = ( DataFrame( dict(household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750]), columns=['household_id', 'male', 'wealth']) .set_index('household_id')) portfolio = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], name=["ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'name', 'share']) .set_index(['household_id', 'asset_id'])) result = household.join(portfolio, how='inner') expected = ( DataFrame( dict(male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=['ABN Amro', 'Robeco', 'Royal Dutch Shell', 'Royal Dutch Shell', 'AAB Eastern Europe Equity Fund', 'Postbank BioTech Fonds'], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29', 'gb00b03mlx29', 'lu0197800237', 'nl0000289965'])) .set_index(['household_id', 'asset_id']) .reindex(columns=['male', 'wealth', 'name', 'share'])) tm.assert_frame_equal(result, expected) # equivalency result = (merge(household.reset_index(), portfolio.reset_index(), on=['household_id'], how='inner') .set_index(['household_id', 'asset_id'])) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how='outer') expected = (concat([ expected, (DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) ], axis=0, sort=True).reindex(columns=expected.columns)) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = 'foo' with pytest.raises(ValueError): household.join(portfolio, how='inner') portfolio2 = portfolio.copy() portfolio2.index.set_names(['household_id', 'foo']) with pytest.raises(ValueError): portfolio2.join(portfolio, how='inner') def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'share']) .set_index(['household_id', 'asset_id'])) log_return = DataFrame(dict( asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 180, 181], log_return=[.09604978, -.06524096, .03532373, .03025441, .036997] )).set_index(["asset_id", "t"]) expected = ( DataFrame(dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) # this is the equivalency result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='inner') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) expected = ( DataFrame(dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0], log_return=[None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='outer') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): # Multi-index join tests expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi). set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi) .set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=index_left) index_right = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')], names=['key', 'Y']) right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index_right) result = left.join(right) expected = (pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner') .set_index(['key', 'X', 'Y'])) tm.assert_frame_equal(result, expected)
cbertinato/pandas
pandas/tests/reshape/merge/test_multi.py
pandas/tests/indexes/common.py
import numpy as np from pandas._libs import algos as libalgos, index as libindex import pandas.util.testing as tm class TestNumericEngine: def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype num = 1000 arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) # monotonic increasing engine = engine_type(lambda: arr, len(arr)) assert engine.is_monotonic_increasing is True assert engine.is_monotonic_decreasing is False # monotonic decreasing engine = engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is True # neither monotonic increasing or decreasing arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype) engine = engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is False def test_is_unique(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype # unique arr = np.array([1, 3, 2], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.is_unique is True # not unique arr = np.array([1, 2, 1], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.is_unique is False def test_get_loc(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype # unique arr = np.array([1, 2, 3], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.get_loc(2) == 1 # monotonic num = 1000 arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.get_loc(2) == slice(1000, 2000) # not monotonic arr = np.array([1, 2, 3] * num, dtype=dtype) engine = engine_type(lambda: arr, len(arr)) expected = np.array([False, True, False] * num, dtype=bool) result = engine.get_loc(2) assert (result == expected).all() def test_get_backfill_indexer( self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype arr = np.array([1, 5, 10], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) new = np.arange(12, dtype=dtype) result = engine.get_backfill_indexer(new) expected = libalgos.backfill(arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer( self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype arr = np.array([1, 5, 10], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) new = np.arange(12, dtype=dtype) result = engine.get_pad_indexer(new) expected = libalgos.pad(arr, new) tm.assert_numpy_array_equal(result, expected) class TestObjectEngine: engine_type = libindex.ObjectEngine dtype = np.object_ values = list('abc') def test_is_monotonic(self): num = 1000 arr = np.array(['a'] * num + ['a'] * num + ['c'] * num, dtype=self.dtype) # monotonic increasing engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_monotonic_increasing is True assert engine.is_monotonic_decreasing is False # monotonic decreasing engine = self.engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is True # neither monotonic increasing or decreasing arr = np.array(['a'] * num + ['b'] * num + ['a'] * num, dtype=self.dtype) engine = self.engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is False def test_is_unique(self): # unique arr = np.array(self.values, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_unique is True # not unique arr = np.array(['a', 'b', 'a'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_unique is False def test_get_loc(self): # unique arr = np.array(self.values, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.get_loc('b') == 1 # monotonic num = 1000 arr = np.array(['a'] * num + ['b'] * num + ['c'] * num, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.get_loc('b') == slice(1000, 2000) # not monotonic arr = np.array(self.values * num, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) expected = np.array([False, True, False] * num, dtype=bool) result = engine.get_loc('b') assert (result == expected).all() def test_get_backfill_indexer(self): arr = np.array(['a', 'e', 'j'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_backfill_indexer(new) expected = libalgos.backfill["object"](arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer(self): arr = np.array(['a', 'e', 'j'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_pad_indexer(new) expected = libalgos.pad["object"](arr, new) tm.assert_numpy_array_equal(result, expected)
from collections import OrderedDict import numpy as np from numpy import nan from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge import pandas.util.testing as tm @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) return DataFrame({'key1': key1, 'key2': key2, 'data': data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['key1', 'key2']) return DataFrame(np.random.randn(10, 3), index=index, columns=['j_one', 'j_two', 'j_three']) @pytest.fixture def left_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C'], Destination=['A', 'B', 'A', 'C', 'A'], Period=['AM', 'AM', 'IP', 'AM', 'OP'], TripPurp=['hbw', 'nhb', 'hbo', 'nhb', 'hbw'], Trips=[1987, 3647, 2470, 4296, 4444]), columns=['Origin', 'Destination', 'Period', 'TripPurp', 'Trips']) .set_index(['Origin', 'Destination', 'Period', 'TripPurp'])) @pytest.fixture def right_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C', 'C', 'E'], Destination=['A', 'B', 'A', 'B', 'A', 'B', 'F'], Period=['AM', 'AM', 'IP', 'AM', 'OP', 'IP', 'AM'], LinkType=['a', 'b', 'c', 'b', 'a', 'b', 'a'], Distance=[100, 80, 90, 80, 75, 35, 55]), columns=['Origin', 'Destination', 'Period', 'LinkType', 'Distance']) .set_index(['Origin', 'Destination', 'Period', 'LinkType'])) @pytest.fixture def on_cols_multi(): return ['Origin', 'Destination', 'Period'] @pytest.fixture def idx_cols_multi(): return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType'] class TestMergeMulti: def setup_method(self): self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.to_join = DataFrame(np.random.randn(10, 3), index=self.index, columns=['j_one', 'j_two', 'j_three']) # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) self.data = DataFrame({'key1': key1, 'key2': key2, 'data': data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ['key1', 'key2'] result = (left.join(right, on=on_cols, how=join_type) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = (left.join(right, on=on_cols, how=join_type, sort=True) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type, sort=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ['1st', '2nd', '3rd'] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord('a') return (f(df['1st']) + f(df['3rd']) * 1e2 + df['2nd'].fillna(0) * 1e4) def run_asserts(left, right, sort): res = left.join(right, on=icols, how='left', sort=sort) assert len(left) < len(res) + 1 assert not res['4th'].isna().any() assert not res['5th'].isna().any() tm.assert_series_equal( res['4th'], - res['5th'], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res['4th'], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal( res, res.sort_values(icols, kind='mergesort')) out = merge(left, right.reset_index(), on=icols, sort=sort, how='left') res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=['1st', '3rd']) left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left['4th'] = bind_cols(left) right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, '1st'] = np.nan left.loc[2::37, '2nd'] = np.nan left.loc[3::43, '3rd'] = np.nan left['4th'] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ['key1', 'key2'] merged_left_right = left.merge(right, left_on=on_cols, right_index=True, how='left', sort=sort) merge_right_left = right.merge(left, right_on=on_cols, left_index=True, how='right', sort=sort) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({'key1': key1, 'key2': key2, 'value1': np.random.randn(20000)}) df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2], 'value2': np.random.randn(10000)}) # just to hit the label compression code path merge(df, df2, how='outer') def test_left_join_index_preserve_order(self): on_cols = ['k1', 'k2'] left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'v': np.array(np.arange(24), dtype=np.int64)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind='mergesort', inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'k3': np.array([0, 1, 2] * 8, dtype=np.float32), 'v': np.array(np.arange(24), dtype=np.int32)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind='mergesort') expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame([ ['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]) right = (DataFrame([ ['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]], columns=['col1', 'col2', 'col3', 'val']) .set_index(['col1', 'col2', 'col3'])) result = left.join(right, on=['cola', 'colb', 'colc'], how='left') expected = DataFrame([ ['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', nan], ['X', 'Y', 'A', 'c', 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]) tm.assert_frame_equal(result, expected) result = left.join(right, on=['cola', 'colb', 'colc'], how='left', sort=True) expected = expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort') tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame([ ['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'], index=[2, 0, 1, 3]) right = (DataFrame([ ['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char']) .set_index('tag')) result = left.join(right, on='tag', how='left') expected = DataFrame([ ['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, nan]], columns=['tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3]) tm.assert_frame_equal(result, expected) result = left.join(right, on='tag', how='left', sort=True) expected2 = expected.sort_values('tag', kind='mergesort') tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame({'id': list('abcde'), 'v1': randn(5), 'v2': randn(5), 'dummy': list('abcde'), 'v3': randn(5)}, columns=['id', 'v1', 'v2', 'dummy', 'v3']) right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]}) result = merge(left, right, on='id', how='left') rdf = right.drop(['id'], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [[1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.], [1950, "C", 4.], [1960, "C", np.nan], [1965, "C", 3.], [1970, "C", 4.]] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [[1960, 'A', np.nan], [1970, 'A', np.nan], [1955, 'A', np.nan], [1965, 'A', np.nan], [1965, 'B', np.nan], [1955, 'C', np.nan]] other = DataFrame(other_data, columns=['year', 'panel', 'data']) result = frame.merge(other, how='outer') expected = frame.fillna(-999).merge(other.fillna(-999), how='outer') expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = ( DataFrame( dict(household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750]), columns=['household_id', 'male', 'wealth']) .set_index('household_id')) portfolio = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], name=["ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'name', 'share']) .set_index(['household_id', 'asset_id'])) result = household.join(portfolio, how='inner') expected = ( DataFrame( dict(male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=['ABN Amro', 'Robeco', 'Royal Dutch Shell', 'Royal Dutch Shell', 'AAB Eastern Europe Equity Fund', 'Postbank BioTech Fonds'], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29', 'gb00b03mlx29', 'lu0197800237', 'nl0000289965'])) .set_index(['household_id', 'asset_id']) .reindex(columns=['male', 'wealth', 'name', 'share'])) tm.assert_frame_equal(result, expected) # equivalency result = (merge(household.reset_index(), portfolio.reset_index(), on=['household_id'], how='inner') .set_index(['household_id', 'asset_id'])) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how='outer') expected = (concat([ expected, (DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) ], axis=0, sort=True).reindex(columns=expected.columns)) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = 'foo' with pytest.raises(ValueError): household.join(portfolio, how='inner') portfolio2 = portfolio.copy() portfolio2.index.set_names(['household_id', 'foo']) with pytest.raises(ValueError): portfolio2.join(portfolio, how='inner') def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'share']) .set_index(['household_id', 'asset_id'])) log_return = DataFrame(dict( asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 180, 181], log_return=[.09604978, -.06524096, .03532373, .03025441, .036997] )).set_index(["asset_id", "t"]) expected = ( DataFrame(dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) # this is the equivalency result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='inner') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) expected = ( DataFrame(dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0], log_return=[None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='outer') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): # Multi-index join tests expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi). set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi) .set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=index_left) index_right = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')], names=['key', 'Y']) right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index_right) result = left.join(right) expected = (pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner') .set_index(['key', 'X', 'Y'])) tm.assert_frame_equal(result, expected)
cbertinato/pandas
pandas/tests/reshape/merge/test_multi.py
pandas/tests/indexing/test_indexing_engines.py
""" Arithmetic operations for PandasObjects This is not a public API. """ import datetime import operator import textwrap from typing import Dict, Optional import warnings import numpy as np from pandas._libs import algos as libalgos, lib, ops as libops from pandas.errors import NullFrequencyError from pandas.util._decorators import Appender from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, find_common_type, maybe_upcast_putmask) from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike_v_numeric, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCIndexClass, ABCSeries, ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import isna, notna import pandas as pd import pandas.core.common as com import pandas.core.missing as missing # ----------------------------------------------------------------------------- # Ops Wrapping Utilities def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """ # `left` is always a pd.Series when called from within ops if isinstance(right, (ABCSeries, pd.Index)): name = _maybe_match_name(left, right) else: name = left.name return name def _maybe_match_name(a, b): """ Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match of None if they have different names. Parameters ---------- a : object b : object Returns ------- name : str or None See Also -------- pandas.core.common.consensus_name_attr """ a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') if a_has and b_has: if a.name == b.name: return a.name else: # TODO: what if they both have np.nan for their names? return None elif a_has: return a.name elif b_has: return b.name return None def maybe_upcast_for_op(obj): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """ if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError return pd.Timedelta(obj) elif isinstance(obj, np.timedelta64) and not isna(obj): # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') # The isna check is to avoid casting timedelta64("NaT"), which would # return NaT and incorrectly be treated as a datetime-NaT. return pd.Timedelta(obj) elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 return pd.TimedeltaIndex(obj) return obj # ----------------------------------------------------------------------------- # Reversed Operations not available in the stdlib operator module. # Defining these instead of using lambdas allows us to reference them by name. def radd(left, right): return right + left def rsub(left, right): return right - left def rmul(left, right): return right * left def rdiv(left, right): return right / left def rtruediv(left, right): return right / left def rfloordiv(left, right): return right // left def rmod(left, right): # check if right is a string as % is the string # formatting operation; this is a TypeError # otherwise perform the op if isinstance(right, str): raise TypeError("{typ} cannot perform the operation mod".format( typ=type(left).__name__)) return right % left def rdivmod(left, right): return divmod(right, left) def rpow(left, right): return right ** left def rand_(left, right): return operator.and_(right, left) def ror_(left, right): return operator.or_(right, left) def rxor(left, right): return operator.xor(right, left) # ----------------------------------------------------------------------------- def make_invalid_op(name): """ Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function """ def invalid_op(self, other=None): raise TypeError("cannot perform {name} with this index type: " "{typ}".format(name=name, typ=type(self).__name__)) invalid_op.__name__ = name return invalid_op def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. Parameters ---------- name : str Returns ------- eval_kwargs : dict Examples -------- >>> _gen_eval_kwargs("__add__") {} >>> _gen_eval_kwargs("rtruediv") {'reversed': True, 'truediv': True} """ kwargs = {} # Series appear to only pass __add__, __radd__, ... # but DataFrame gets both these dunder names _and_ non-dunder names # add, radd, ... name = name.replace('__', '') if name.startswith('r'): if name not in ['radd', 'rand', 'ror', 'rxor']: # Exclude commutative operations kwargs['reversed'] = True if name in ['truediv', 'rtruediv']: kwargs['truediv'] = True if name in ['ne']: kwargs['masker'] = True return kwargs def _gen_fill_zeros(name): """ Find the appropriate fill value to use when filling in undefined values in the results of the given operation caused by operating on (generally dividing by) zero. Parameters ---------- name : str Returns ------- fill_value : {None, np.nan, np.inf} """ name = name.strip('__') if 'div' in name: # truediv, floordiv, div, and reversed variants fill_value = np.inf elif 'mod' in name: # mod, rmod fill_value = np.nan else: fill_value = None return fill_value def _get_frame_op_default_axis(name): """ Only DataFrame cares about default_axis, specifically: special methods have default_axis=None and flex methods have default_axis='columns'. Parameters ---------- name : str Returns ------- default_axis: str or None """ if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']: # bool methods return 'columns' elif name.startswith('__'): # __add__, __mul__, ... return None else: # add, mul, ... return 'columns' def _get_opstr(op, cls): """ Find the operation string, if any, to pass to numexpr for this operation. Parameters ---------- op : binary operator cls : class Returns ------- op_str : string or None """ # numexpr is available for non-sparse classes subtyp = getattr(cls, '_subtyp', '') use_numexpr = 'sparse' not in subtyp if not use_numexpr: # if we're not using numexpr, then don't pass a str_rep return None return {operator.add: '+', radd: '+', operator.mul: '*', rmul: '*', operator.sub: '-', rsub: '-', operator.truediv: '/', rtruediv: '/', operator.floordiv: '//', rfloordiv: '//', operator.mod: None, # TODO: Why None for mod but '%' for rmod? rmod: '%', operator.pow: '**', rpow: '**', operator.eq: '==', operator.ne: '!=', operator.le: '<=', operator.lt: '<', operator.ge: '>=', operator.gt: '>', operator.and_: '&', rand_: '&', operator.or_: '|', ror_: '|', operator.xor: '^', rxor: '^', divmod: None, rdivmod: None}[op] def _get_op_name(op, special): """ Find the name to attach to this method according to conventions for special and non-special methods. Parameters ---------- op : binary operator special : bool Returns ------- op_name : str """ opname = op.__name__.strip('_') if special: opname = '__{opname}__'.format(opname=opname) return opname # ----------------------------------------------------------------------------- # Docstring Generation and Templates _add_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.add(b, fill_value=0) a 2.0 b 1.0 c 1.0 d 1.0 e NaN dtype: float64 """ _sub_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.subtract(b, fill_value=0) a 0.0 b 1.0 c 1.0 d -1.0 e NaN dtype: float64 """ _mul_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.multiply(b, fill_value=0) a 1.0 b 0.0 c 0.0 d 0.0 e NaN dtype: float64 """ _div_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.divide(b, fill_value=0) a 1.0 b inf c inf d 0.0 e NaN dtype: float64 """ _floordiv_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.floordiv(b, fill_value=0) a 1.0 b NaN c NaN d 0.0 e NaN dtype: float64 """ _mod_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.mod(b, fill_value=0) a 0.0 b NaN c NaN d 0.0 e NaN dtype: float64 """ _pow_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.pow(b, fill_value=0) a 1.0 b 1.0 c 1.0 d 0.0 e NaN dtype: float64 """ _op_descriptions = { # Arithmetic Operators 'add': {'op': '+', 'desc': 'Addition', 'reverse': 'radd', 'series_examples': _add_example_SERIES}, 'sub': {'op': '-', 'desc': 'Subtraction', 'reverse': 'rsub', 'series_examples': _sub_example_SERIES}, 'mul': {'op': '*', 'desc': 'Multiplication', 'reverse': 'rmul', 'series_examples': _mul_example_SERIES, 'df_examples': None}, 'mod': {'op': '%', 'desc': 'Modulo', 'reverse': 'rmod', 'series_examples': _mod_example_SERIES}, 'pow': {'op': '**', 'desc': 'Exponential power', 'reverse': 'rpow', 'series_examples': _pow_example_SERIES, 'df_examples': None}, 'truediv': {'op': '/', 'desc': 'Floating division', 'reverse': 'rtruediv', 'series_examples': _div_example_SERIES, 'df_examples': None}, 'floordiv': {'op': '//', 'desc': 'Integer division', 'reverse': 'rfloordiv', 'series_examples': _floordiv_example_SERIES, 'df_examples': None}, 'divmod': {'op': 'divmod', 'desc': 'Integer division and modulo', 'reverse': 'rdivmod', 'series_examples': None, 'df_examples': None}, # Comparison Operators 'eq': {'op': '==', 'desc': 'Equal to', 'reverse': None, 'series_examples': None}, 'ne': {'op': '!=', 'desc': 'Not equal to', 'reverse': None, 'series_examples': None}, 'lt': {'op': '<', 'desc': 'Less than', 'reverse': None, 'series_examples': None}, 'le': {'op': '<=', 'desc': 'Less than or equal to', 'reverse': None, 'series_examples': None}, 'gt': {'op': '>', 'desc': 'Greater than', 'reverse': None, 'series_examples': None}, 'ge': {'op': '>=', 'desc': 'Greater than or equal to', 'reverse': None, 'series_examples': None} } # type: Dict[str, Dict[str, Optional[str]]] _op_names = list(_op_descriptions.keys()) for key in _op_names: reverse_op = _op_descriptions[key]['reverse'] if reverse_op is not None: _op_descriptions[reverse_op] = _op_descriptions[key].copy() _op_descriptions[reverse_op]['reverse'] = key _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. Parameters ---------- other : Series or scalar value fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result will be missing. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series The result of the operation. See Also -------- Series.{reverse} """ _arith_doc_FRAME = """ Binary operator %s with support to substitute a fill_value for missing data in one of the inputs Parameters ---------- other : Series, DataFrame, or constant axis : {0, 1, 'index', 'columns'} For Series input, axis to match Series index on fill_value : None or float value, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- result : DataFrame Notes ----- Mismatched indices will be unioned together """ _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. Parameters ---------- other : scalar, sequence, Series, or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}} Whether to compare by the index (0 or 'index') or columns (1 or 'columns'). For Series input, axis to match Series index on. level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : float or None, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing. Returns ------- DataFrame Result of the arithmetic operation. See Also -------- DataFrame.add : Add DataFrames. DataFrame.sub : Subtract DataFrames. DataFrame.mul : Multiply DataFrames. DataFrame.div : Divide DataFrames (float division). DataFrame.truediv : Divide DataFrames (float division). DataFrame.floordiv : Divide DataFrames (integer division). DataFrame.mod : Calculate modulo (remainder after division). DataFrame.pow : Calculate exponential power. Notes ----- Mismatched indices will be unioned together. Examples -------- >>> df = pd.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide by constant with reverse version. >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract a list and Series by axis with operator version. >>> df - [1, 2] angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub([1, 2], axis='columns') angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), ... axis='index') angles degrees circle -1 359 triangle 2 179 rectangle 3 359 Multiply a DataFrame of different shape with operator version. >>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, ... index=['circle', 'triangle', 'rectangle']) >>> other angles circle 0 triangle 3 rectangle 4 >>> df * other angles degrees circle 0 NaN triangle 9 NaN rectangle 16 NaN >>> df.mul(other, fill_value=0) angles degrees circle 0 0.0 triangle 9 0.0 rectangle 16 0.0 Divide by a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], ... 'degrees': [360, 180, 360, 360, 540, 720]}}, ... index=[['A', 'A', 'A', 'B', 'B', 'B'], ... ['circle', 'triangle', 'rectangle', ... 'square', 'pentagon', 'hexagon']]) >>> df_multindex angles degrees A circle 0 360 triangle 3 180 rectangle 4 360 B square 4 360 pentagon 5 540 hexagon 6 720 >>> df.div(df_multindex, level=1, fill_value=0) angles degrees A circle NaN 1.0 triangle 1.0 1.0 rectangle 1.0 1.0 B square 0.0 0.0 pentagon 0.0 0.0 hexagon 0.0 0.0 """ _flex_comp_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison operators. Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis (rows or columns) and level for comparison. Parameters ---------- other : scalar, sequence, Series, or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' Whether to compare by the index (0 or 'index') or columns (1 or 'columns'). level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- DataFrame of bool Result of the comparison. See Also -------- DataFrame.eq : Compare DataFrames for equality elementwise. DataFrame.ne : Compare DataFrames for inequality elementwise. DataFrame.le : Compare DataFrames for less than inequality or equality elementwise. DataFrame.lt : Compare DataFrames for strictly less than inequality elementwise. DataFrame.ge : Compare DataFrames for greater than inequality or equality elementwise. DataFrame.gt : Compare DataFrames for strictly greater than inequality elementwise. Notes ----- Mismatched indices will be unioned together. `NaN` values are considered different (i.e. `NaN` != `NaN`). Examples -------- >>> df = pd.DataFrame({{'cost': [250, 150, 100], ... 'revenue': [100, 250, 300]}}, ... index=['A', 'B', 'C']) >>> df cost revenue A 250 100 B 150 250 C 100 300 Comparison with a scalar, using either the operator or method: >>> df == 100 cost revenue A False True B False False C True False >>> df.eq(100) cost revenue A False True B False False C True False When `other` is a :class:`Series`, the columns of a DataFrame are aligned with the index of `other` and broadcast: >>> df != pd.Series([100, 250], index=["cost", "revenue"]) cost revenue A True True B True False C False True Use the method to control the broadcast axis: >>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') cost revenue A True False B True True C True True D True True When comparing to an arbitrary sequence, the number of columns must match the number elements in `other`: >>> df == [250, 100] cost revenue A True True B False False C False False Use the method to control the axis: >>> df.eq([250, 250, 100], axis='index') cost revenue A True False B False True C True False Compare to a DataFrame of different shape. >>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, ... index=['A', 'B', 'C', 'D']) >>> other revenue A 300 B 250 C 100 D 150 >>> df.gt(other) cost revenue A False False B False False C False True D False False Compare to a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], ... 'revenue': [100, 250, 300, 200, 175, 225]}}, ... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], ... ['A', 'B', 'C', 'A', 'B', 'C']]) >>> df_multindex cost revenue Q1 A 250 100 B 150 250 C 100 300 Q2 A 150 200 B 300 175 C 220 225 >>> df.le(df_multindex, level=1) cost revenue Q1 A True True B True True C True True Q2 A False True B True False C True False """ def _make_flex_doc(op_name, typ): """ Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str """ op_name = op_name.replace('__', '') op_desc = _op_descriptions[op_name] if op_name.startswith('r'): equiv = 'other ' + op_desc['op'] + ' ' + typ else: equiv = typ + ' ' + op_desc['op'] + ' other' if typ == 'series': base_doc = _flex_doc_SERIES doc_no_examples = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) if op_desc['series_examples']: doc = doc_no_examples + op_desc['series_examples'] else: doc = doc_no_examples elif typ == 'dataframe': base_doc = _flex_doc_FRAME doc = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) else: raise AssertionError('Invalid typ argument.') return doc # ----------------------------------------------------------------------------- # Masking NA values and fallbacks for operations numpy does not support def fill_binop(left, right, fill_value): """ If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None """ # TODO: can we make a no-copy implementation? if fill_value is not None: left_mask = isna(left) right_mask = isna(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return left, right def mask_cmp_op(x, y, op): """ Apply the function `op` to only non-null points in x and y. Parameters ---------- x : array-like y : array-like op : binary operation Returns ------- result : ndarray[bool] """ xrav = x.ravel() result = np.empty(x.size, dtype=bool) if isinstance(y, (np.ndarray, ABCSeries)): yrav = y.ravel() mask = notna(xrav) & notna(yrav) result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask]))) else: mask = notna(xrav) result[mask] = op(np.array(list(xrav[mask])), y) if op == operator.ne: # pragma: no cover np.putmask(result, ~mask, True) else: np.putmask(result, ~mask, False) result = result.reshape(x.shape) return result def masked_arith_op(x, y, op): """ If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator """ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes # the logic valid for both Series and DataFrame ops. xrav = x.ravel() assert isinstance(x, (np.ndarray, ABCSeries)), type(x) if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) # PeriodIndex.ravel() returns int64 dtype, so we have # to work around that case. See GH#19956 yrav = y if is_period_dtype(y) else y.ravel() mask = notna(xrav) & notna(yrav) if yrav.shape != mask.shape: # FIXME: GH#5284, GH#5035, GH#19448 # Without specifically raising here we get mismatched # errors in Py3 (TypeError) vs Py2 (ValueError) # Note: Only = an issue in DataFrame case raise ValueError('Cannot broadcast operands together.') if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], com.values_from_object(yrav[mask])) else: assert is_scalar(y), type(y) assert isinstance(x, np.ndarray), type(x) # mask is only meaningful for x result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) # 1 ** np.nan is 1. So we have to unmask those. if op == pow: mask = np.where(x == 1, False, mask) elif op == rpow: mask = np.where(y == 1, False, mask) if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], y) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) # 2D compat return result def invalid_comparison(left, right, op): """ If a comparison has mismatched types and is not necessarily meaningful, follow python3 conventions by: - returning all-False for equality - returning all-True for inequality - raising TypeError otherwise Parameters ---------- left : array-like right : scalar, array-like op : operator.{eq, ne, lt, le, gt} Raises ------ TypeError : on inequality comparisons """ if op is operator.eq: res_values = np.zeros(left.shape, dtype=bool) elif op is operator.ne: res_values = np.ones(left.shape, dtype=bool) else: raise TypeError("Invalid comparison between dtype={dtype} and {typ}" .format(dtype=left.dtype, typ=type(right).__name__)) return res_values # ----------------------------------------------------------------------------- # Dispatch logic def should_series_dispatch(left, right, op): """ Identify cases where a DataFrame operation should dispatch to its Series counterpart. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- override : bool """ if left._is_mixed_type or right._is_mixed_type: return True if not len(left.columns) or not len(right.columns): # ensure obj.dtypes[0] exists for each obj return False ldtype = left.dtypes.iloc[0] rdtype = right.dtypes.iloc[0] if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))): # numpy integer dtypes as timedelta64 dtypes in this scenario return True if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): # in particular case where right is an array of DateOffsets return True return False def dispatch_to_series(left, right, func, str_rep=None, axis=None): """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. Parameters ---------- left : DataFrame right : scalar or DataFrame func : arithmetic or comparison operator str_rep : str or None, default None axis : {None, 0, 1, "index", "columns"} Returns ------- DataFrame """ # Note: we use iloc to access columns for compat with cases # with non-unique columns. import pandas.core.computation.expressions as expressions right = lib.item_from_zerodim(right) if lib.is_scalar(right) or np.ndim(right) == 0: def column_op(a, b): return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))} elif isinstance(right, ABCDataFrame): assert right._indexed_same(left) def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries) and axis == "columns": # We only get here if called via left._combine_match_columns, # in which case we specifically want to operate row-by-row assert right.index.equals(left.columns) def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries): assert right.index.equals(left.index) # Handle other cases later def column_op(a, b): return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))} else: # Remaining cases have less-obvious dispatch rules raise NotImplementedError(right) new_data = expressions.evaluate(column_op, str_rep, left, right) result = left._constructor(new_data, index=left.index, copy=False) # Pin columns instead of passing to constructor for compat with # non-unique columns case result.columns = left.columns return result def dispatch_to_index_op(op, left, right, index_class): """ Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series """ left_idx = index_class(left) # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. """ # The op calls will raise TypeError if the op is not defined # on the ExtensionArray # unbox Series and Index to arrays if isinstance(left, (ABCSeries, ABCIndexClass)): new_left = left._values else: new_left = left if isinstance(right, (ABCSeries, ABCIndexClass)): new_right = right._values else: new_right = right res_values = op(new_left, new_right) res_name = get_op_result_name(left, right) if op.__name__ in ['divmod', 'rdivmod']: return _construct_divmod_result( left, res_values, left.index, res_name) return _construct_result(left, res_values, left.index, res_name) # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory # methods def _get_method_wrappers(cls): """ Find the appropriate operation-wrappers to use when defining flex/special arithmetic, boolean, and comparison operations with the given class. Parameters ---------- cls : class Returns ------- arith_flex : function or None comp_flex : function or None arith_special : function comp_special : function bool_special : function Notes ----- None is only returned for SparseArray """ if issubclass(cls, ABCSparseSeries): # Be sure to catch this before ABCSeries and ABCSparseArray, # as they will both come see SparseSeries as a subclass arith_flex = _flex_method_SERIES comp_flex = _flex_method_SERIES arith_special = _arith_method_SPARSE_SERIES comp_special = _arith_method_SPARSE_SERIES bool_special = _bool_method_SERIES # TODO: I don't think the functions defined by bool_method are tested elif issubclass(cls, ABCSeries): # Just Series; SparseSeries is caught above arith_flex = _flex_method_SERIES comp_flex = _flex_method_SERIES arith_special = _arith_method_SERIES comp_special = _comp_method_SERIES bool_special = _bool_method_SERIES elif issubclass(cls, ABCSparseArray): arith_flex = None comp_flex = None arith_special = _arith_method_SPARSE_ARRAY comp_special = _arith_method_SPARSE_ARRAY bool_special = _arith_method_SPARSE_ARRAY elif issubclass(cls, ABCDataFrame): # Same for DataFrame and SparseDataFrame arith_flex = _arith_method_FRAME comp_flex = _flex_comp_method_FRAME arith_special = _arith_method_FRAME comp_special = _comp_method_FRAME bool_special = _arith_method_FRAME return arith_flex, comp_flex, arith_special, comp_special, bool_special def _create_methods(cls, arith_method, comp_method, bool_method, special): # creates actual methods based upon arithmetic, comp and bool method # constructors. have_divmod = issubclass(cls, ABCSeries) # divmod is available for Series and SparseSeries # yapf: disable new_methods = dict( add=arith_method(cls, operator.add, special), radd=arith_method(cls, radd, special), sub=arith_method(cls, operator.sub, special), mul=arith_method(cls, operator.mul, special), truediv=arith_method(cls, operator.truediv, special), floordiv=arith_method(cls, operator.floordiv, special), # Causes a floating point exception in the tests when numexpr enabled, # so for now no speedup mod=arith_method(cls, operator.mod, special), pow=arith_method(cls, operator.pow, special), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(cls, rmul, special), rsub=arith_method(cls, rsub, special), rtruediv=arith_method(cls, rtruediv, special), rfloordiv=arith_method(cls, rfloordiv, special), rpow=arith_method(cls, rpow, special), rmod=arith_method(cls, rmod, special)) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] if have_divmod: # divmod doesn't have an op that is supported by numexpr new_methods['divmod'] = arith_method(cls, divmod, special) new_methods['rdivmod'] = arith_method(cls, rdivmod, special) new_methods.update(dict( eq=comp_method(cls, operator.eq, special), ne=comp_method(cls, operator.ne, special), lt=comp_method(cls, operator.lt, special), gt=comp_method(cls, operator.gt, special), le=comp_method(cls, operator.le, special), ge=comp_method(cls, operator.ge, special))) if bool_method: new_methods.update( dict(and_=bool_method(cls, operator.and_, special), or_=bool_method(cls, operator.or_, special), # For some reason ``^`` wasn't used in original. xor=bool_method(cls, operator.xor, special), rand_=bool_method(cls, rand_, special), ror_=bool_method(cls, ror_, special), rxor=bool_method(cls, rxor, special))) if special: dunderize = lambda x: '__{name}__'.format(name=x.strip('_')) else: dunderize = lambda x: x new_methods = {dunderize(k): v for k, v in new_methods.items()} return new_methods def add_methods(cls, new_methods): for name, method in new_methods.items(): # For most methods, if we find that the class already has a method # of the same name, it is OK to over-write it. The exception is # inplace methods (__iadd__, __isub__, ...) for SparseArray, which # retain the np.ndarray versions. force = not (issubclass(cls, ABCSparseArray) and name.startswith('__i')) if force or name not in cls.__dict__: setattr(cls, name, method) # ---------------------------------------------------------------------- # Arithmetic def add_special_arithmetic_methods(cls): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. Parameters ---------- cls : class special methods will be defined and pinned to this class """ _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, arith_method, comp_method, bool_method, special=True) # inplace operators (I feel like these should get passed an `inplace=True` # or just be removed def _wrap_inplace_method(method): """ return an inplace wrapper for this method """ def f(self, other): result = method(self, other) # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace(result.reindex_like(self, copy=False)._data, verify_is_copy=False) return self f.__name__ = "__i{name}__".format(name=method.__name__.strip("__")) return f new_methods.update( dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]), __isub__=_wrap_inplace_method(new_methods["__sub__"]), __imul__=_wrap_inplace_method(new_methods["__mul__"]), __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]), __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]), __imod__=_wrap_inplace_method(new_methods["__mod__"]), __ipow__=_wrap_inplace_method(new_methods["__pow__"]))) new_methods.update( dict(__iand__=_wrap_inplace_method(new_methods["__and__"]), __ior__=_wrap_inplace_method(new_methods["__or__"]), __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) add_methods(cls, new_methods=new_methods) def add_flex_arithmetic_methods(cls): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class """ flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method, bool_method=None, special=False) new_methods.update(dict(multiply=new_methods['mul'], subtract=new_methods['sub'], divide=new_methods['div'])) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in ('ror_', 'rxor', 'rand_')) add_methods(cls, new_methods=new_methods) # ----------------------------------------------------------------------------- # Series def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ # ToDo: Different from _align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 if isinstance(right, ABCSeries): # avoid repeated alignment if not left.index.equals(right.index): if align_asobject: # to keep original value's dtype for bool ops left = left.astype(object) right = right.astype(object) left, right = left.align(right, copy=False) return left, right def _construct_result(left, result, index, name, dtype=None): """ If the raw op result has a non-None name (e.g. it is an Index object) and the name argument is None, then passing name to the constructor will not be enough; we still need to override the name attribute. """ out = left._constructor(result, index=index, dtype=dtype) out = out.__finalize__(left) out.name = name return out def _construct_divmod_result(left, result, index, name, dtype=None): """divmod returns a tuple of like indexed series instead of a single series. """ return ( _construct_result(left, result[0], index=index, name=name, dtype=dtype), _construct_result(left, result[1], index=index, name=name, dtype=dtype), ) def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) construct_result = (_construct_divmod_result if op in [divmod, rdivmod] else _construct_result) def na_op(x, y): """ Return the result of evaluating op on the passed in values. If native types are not compatible, try coersion to object dtype. Parameters ---------- x : array-like y : array-like or scalar Returns ------- array-like Raises ------ TypeError : invalid operation """ import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) except Exception: # TODO: more specific? if is_object_dtype(x): return libalgos.arrmap_object(x, lambda val: op(val, y)) raise result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result def wrapper(left, right): if isinstance(right, ABCDataFrame): return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) right = maybe_upcast_for_op(right) if is_categorical_dtype(left): raise TypeError("{typ} cannot perform the operation " "{op}".format(typ=type(left).__name__, op=str_rep)) elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): # Give dispatch_to_index_op a chance for tests like # test_dt64_series_add_intlike, which the index dispatching handles # specifically. result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) elif (is_extension_array_dtype(left) or (is_extension_array_dtype(right) and not is_scalar(right))): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" return dispatch_to_extension_op(op, left, right) elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): # We should only get here with non-scalar or timedelta64('NaT') # values for right # Note: we cannot use dispatch_to_index_op because # that may incorrectly raise TypeError when we # should get NullFrequencyError result = op(pd.Index(left), right) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): rvalues = rvalues.values with np.errstate(all='ignore'): result = na_op(lvalues, rvalues) return construct_result(left, result, index=left.index, name=res_name, dtype=None) wrapper.__name__ = op_name return wrapper def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): if not is_object_dtype(y.dtype): y = y.astype(np.object_) if isinstance(y, (ABCSeries, ABCIndex)): y = y.values result = libops.vec_compare(x, y, op) else: result = libops.scalar_compare(x, y, op) return result def _comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) masker = _gen_eval_kwargs(op_name).get('masker', False) def na_op(x, y): # TODO: # should have guarantess on what x, y can be type-wise # Extension Dtypes are not called here # Checking that cases that were once handled here are no longer # reachable. assert not (is_categorical_dtype(y) and not is_scalar(y)) if is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) elif is_datetimelike_v_numeric(x, y): return invalid_comparison(x, y, op) else: # we want to compare like types # we only want to convert to integer like if # we are not NotImplemented, otherwise # we would allow datetime64 (but viewed as i8) against # integer comparisons # we have a datetime/timedelta and may need to convert assert not needs_i8_conversion(x) mask = None if not is_scalar(y) and needs_i8_conversion(y): mask = isna(x) | isna(y) y = y.view('i8') x = x.view('i8') method = getattr(x, op_name, None) if method is not None: with np.errstate(all='ignore'): result = method(y) if result is NotImplemented: return invalid_comparison(x, y, op) else: result = op(x, y) if mask is not None and mask.any(): result[mask] = masker return result def wrapper(self, other, axis=None): # Validate the axis parameter if axis is not None: self._get_axis_number(axis) res_name = get_op_result_name(self, other) if isinstance(other, list): # TODO: same for tuples? other = np.asarray(other) if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled " "Series objects") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; pd.CategoricalIndex # behavior is non-canonical GH#19513 res_values = dispatch_to_index_op(op, self, other, pd.Categorical) return self._constructor(res_values, index=self.index, name=res_name) elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior if (isinstance(other, datetime.date) and not isinstance(other, datetime.datetime)): # https://github.com/pandas-dev/pandas/issues/21152 # Compatibility for difference between Series comparison w/ # datetime and date msg = ( "Comparing Series of datetimes with 'datetime.date'. " "Currently, the 'datetime.date' is coerced to a " "datetime. In the future pandas will not coerce, " "and {future}. " "To retain the current behavior, " "convert the 'datetime.date' to a datetime with " "'pd.Timestamp'." ) if op in {operator.lt, operator.le, operator.gt, operator.ge}: future = "a TypeError will be raised" else: future = ( "'the values will not compare equal to the " "'datetime.date'" ) msg = '\n'.join(textwrap.wrap(msg.format(future=future))) warnings.warn(msg, FutureWarning, stacklevel=2) other = pd.Timestamp(other) res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) return self._constructor(res_values, index=self.index, name=res_name) elif is_timedelta64_dtype(self): res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) return self._constructor(res_values, index=self.index, name=res_name) elif (is_extension_array_dtype(self) or (is_extension_array_dtype(other) and not is_scalar(other))): # Note: the `not is_scalar(other)` condition rules out # e.g. other == "category" return dispatch_to_extension_op(op, self, other) elif isinstance(other, ABCSeries): # By this point we have checked that self._indexed_same(other) res_values = na_op(self.values, other.values) # rename is needed in case res_name is None and res_values.name # is not. return self._constructor(res_values, index=self.index, name=res_name).rename(res_name) elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast if other.ndim != 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') res_values = na_op(self.values, np.asarray(other)) result = self._constructor(res_values, index=self.index) # rename is needed in case res_name is None and self.name # is not. return result.__finalize__(self).rename(res_name) elif is_scalar(other) and isna(other): # numpy does not like comparisons vs None if op is operator.ne: res_values = np.ones(len(self), dtype=bool) else: res_values = np.zeros(len(self), dtype=bool) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') else: values = self.get_values() with np.errstate(all='ignore'): res = na_op(values, other) if is_scalar(res): raise TypeError('Could not compare {typ} type with Series' .format(typ=type(other))) # always return a full value series here res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') wrapper.__name__ = op_name return wrapper def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here assert not (is_bool_dtype(x) and is_bool_dtype(y)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError): raise TypeError("cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" .format(dtype=x.dtype, typ=type(y).__name__)) return result fill_int = lambda x: x.fillna(0) fill_bool = lambda x: x.fillna(False).astype(bool) def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) ovalues = other.values finalizer = lambda x: x else: # scalars, list, tuple, np.array is_other_int_dtype = is_integer_dtype(np.asarray(other)) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) ovalues = other finalizer = lambda x: x.__finalize__(self) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) res_values = na_op(self.values, ovalues) unfilled = self._constructor(res_values, index=self.index, name=res_name) filled = filler(unfilled) return finalizer(filled) wrapper.__name__ = op_name return wrapper def _flex_method_SERIES(cls, op, special): name = _get_op_name(op, special) doc = _make_flex_doc(name, 'series') @Appender(doc) def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # validate axis if axis is not None: self._get_axis_number(axis) if isinstance(other, ABCSeries): return self._binop(other, op, level=level, fill_value=fill_value) elif isinstance(other, (np.ndarray, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') other = self._constructor(other, self.index) return self._binop(other, op, level=level, fill_value=fill_value) else: if fill_value is not None: self = self.fillna(fill_value) return self._constructor(op(self, other), self.index).__finalize__(self) flex_wrapper.__name__ = name return flex_wrapper # ----------------------------------------------------------------------------- # DataFrame def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None): """ Apply binary operator `func` to self, other using alignment and fill conventions determined by the fill_value, axis, and level kwargs. Parameters ---------- self : DataFrame other : Series func : binary operator fill_value : object, default None axis : {0, 1, 'columns', 'index', None}, default None level : int or None, default None Returns ------- result : DataFrame """ if fill_value is not None: raise NotImplementedError("fill_value {fill} not supported." .format(fill=fill_value)) if axis is not None: axis = self._get_axis_number(axis) if axis == 0: return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level) else: if not len(other): return self * np.nan if not len(self): # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # default axis is columns return self._combine_match_columns(other, func, level=level) def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ def to_series(right): msg = ('Unable to coerce to Series, length must be {req_len}: ' 'given {given_len}') if axis is not None and left._get_axis_name(axis) == 'index': if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) elif (is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame))): # GH17901 right = to_series(right) return right def _arith_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result if op_name in _op_descriptions: # i.e. include "add" but not "__add__" doc = _make_flex_doc(op_name, 'dataframe') else: doc = _arith_doc_FRAME % op_name @Appender(doc) def f(self, other, axis=default_axis, level=None, fill_value=None): other = _align_method_FRAME(self, other, axis) if isinstance(other, ABCDataFrame): # Another DataFrame pass_op = op if should_series_dispatch(self, other, op) else na_op return self._combine_frame(other, pass_op, fill_value, level) elif isinstance(other, ABCSeries): # For these values of `axis`, we end up dispatching to Series op, # so do not want the masked op. pass_op = op if axis in [0, "columns", None] else na_op return _combine_series_frame(self, other, pass_op, fill_value=fill_value, axis=axis, level=level) else: if fill_value is not None: self = self.fillna(fill_value) assert np.ndim(other) == 0 return self._combine_const(other, op) f.__name__ = op_name return f def _flex_comp_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): try: with np.errstate(invalid='ignore'): result = op(x, y) except TypeError: result = mask_cmp_op(x, y, op) return result doc = _flex_comp_doc_FRAME.format(op_name=op_name, desc=_op_descriptions[op_name]['desc']) @Appender(doc) def f(self, other, axis=default_axis, level=None): other = _align_method_FRAME(self, other, axis) if isinstance(other, ABCDataFrame): # Another DataFrame if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) return dispatch_to_series(self, other, na_op, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, na_op, fill_value=None, axis=axis, level=level) else: assert np.ndim(other) == 0, other return self._combine_const(other, na_op) f.__name__ = op_name return f def _comp_method_FRAME(cls, func, special): str_rep = _get_opstr(func, cls) op_name = _get_op_name(func, special) @Appender('Wrapper for comparison method {name}'.format(name=op_name)) def f(self, other): other = _align_method_FRAME(self, other, axis=None) if isinstance(other, ABCDataFrame): # Another DataFrame if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') return dispatch_to_series(self, other, func, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None) else: # straight boolean comparisons we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. res = self._combine_const(other, func) return res.fillna(True).astype(bool) f.__name__ = op_name return f # ----------------------------------------------------------------------------- # Sparse def _cast_sparse_series_op(left, right, opname): """ For SparseSeries operation, coerce to float64 if the result is expected to have NaN or inf values Parameters ---------- left : SparseArray right : SparseArray opname : str Returns ------- left : SparseArray right : SparseArray """ from pandas.core.sparse.api import SparseDtype opname = opname.strip('_') # TODO: This should be moved to the array? if is_integer_dtype(left) and is_integer_dtype(right): # series coerces to float64 if result should have NaN/inf if opname in ('floordiv', 'mod') and (right.to_dense() == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) elif opname in ('rfloordiv', 'rmod') and (left.to_dense() == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) return left, right def _arith_method_SPARSE_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): if isinstance(other, ABCDataFrame): return NotImplemented elif isinstance(other, ABCSeries): if not isinstance(other, ABCSparseSeries): other = other.to_sparse(fill_value=self.fill_value) return _sparse_series_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): new_values = op(self.values, other) return self._constructor(new_values, index=self.index, name=self.name) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index new_name = get_op_result_name(left, right) from pandas.core.arrays.sparse import _sparse_array_op lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name) result = _sparse_array_op(lvalues, rvalues, op, name) return left._constructor(result, index=new_index, name=new_name) def _arith_method_SPARSE_ARRAY(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): from pandas.core.arrays.sparse.array import ( SparseArray, _sparse_array_op, _wrap_result, _get_fill) if isinstance(other, np.ndarray): if len(self) != len(other): raise AssertionError("length mismatch: {self} vs. {other}" .format(self=len(self), other=len(other))) if not isinstance(other, SparseArray): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) return _sparse_array_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) result = op(self.sp_values, other) return _wrap_result(op_name, result, self.sp_index, fill) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper
from collections import OrderedDict import numpy as np from numpy import nan from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge import pandas.util.testing as tm @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) return DataFrame({'key1': key1, 'key2': key2, 'data': data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['key1', 'key2']) return DataFrame(np.random.randn(10, 3), index=index, columns=['j_one', 'j_two', 'j_three']) @pytest.fixture def left_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C'], Destination=['A', 'B', 'A', 'C', 'A'], Period=['AM', 'AM', 'IP', 'AM', 'OP'], TripPurp=['hbw', 'nhb', 'hbo', 'nhb', 'hbw'], Trips=[1987, 3647, 2470, 4296, 4444]), columns=['Origin', 'Destination', 'Period', 'TripPurp', 'Trips']) .set_index(['Origin', 'Destination', 'Period', 'TripPurp'])) @pytest.fixture def right_multi(): return ( DataFrame( dict(Origin=['A', 'A', 'B', 'B', 'C', 'C', 'E'], Destination=['A', 'B', 'A', 'B', 'A', 'B', 'F'], Period=['AM', 'AM', 'IP', 'AM', 'OP', 'IP', 'AM'], LinkType=['a', 'b', 'c', 'b', 'a', 'b', 'a'], Distance=[100, 80, 90, 80, 75, 35, 55]), columns=['Origin', 'Destination', 'Period', 'LinkType', 'Distance']) .set_index(['Origin', 'Destination', 'Period', 'LinkType'])) @pytest.fixture def on_cols_multi(): return ['Origin', 'Destination', 'Period'] @pytest.fixture def idx_cols_multi(): return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType'] class TestMergeMulti: def setup_method(self): self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) self.to_join = DataFrame(np.random.randn(10, 3), index=self.index, columns=['j_one', 'j_two', 'j_three']) # a little relevant example with NAs key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', 'qux', 'snap'] key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', 'three', 'one'] data = np.random.randn(len(key1)) self.data = DataFrame({'key1': key1, 'key2': key2, 'data': data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ['key1', 'key2'] result = (left.join(right, on=on_cols, how=join_type) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = (left.join(right, on=on_cols, how=join_type, sort=True) .reset_index(drop=True)) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type, sort=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ['1st', '2nd', '3rd'] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord('a') return (f(df['1st']) + f(df['3rd']) * 1e2 + df['2nd'].fillna(0) * 1e4) def run_asserts(left, right, sort): res = left.join(right, on=icols, how='left', sort=sort) assert len(left) < len(res) + 1 assert not res['4th'].isna().any() assert not res['5th'].isna().any() tm.assert_series_equal( res['4th'], - res['5th'], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res['4th'], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal( res, res.sort_values(icols, kind='mergesort')) out = merge(left, right.reset_index(), on=icols, sort=sort, how='left') res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=['1st', '3rd']) left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left['4th'] = bind_cols(left) right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, '1st'] = np.nan left.loc[2::37, '2nd'] = np.nan left.loc[3::43, '3rd'] = np.nan left['4th'] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right['5th'] = - bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ['key1', 'key2'] merged_left_right = left.merge(right, left_on=on_cols, right_index=True, how='left', sort=sort) merge_right_left = right.merge(left, right_on=on_cols, left_index=True, how='right', sort=sort) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({'key1': key1, 'key2': key2, 'value1': np.random.randn(20000)}) df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2], 'value2': np.random.randn(10000)}) # just to hit the label compression code path merge(df, df2, how='outer') def test_left_join_index_preserve_order(self): on_cols = ['k1', 'k2'] left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'v': np.array(np.arange(24), dtype=np.int64)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind='mergesort', inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, 'k2': ['foo', 'bar'] * 12, 'k3': np.array([0, 1, 2] * 8, dtype=np.float32), 'v': np.array(np.arange(24), dtype=np.int32)}) index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')]) right = DataFrame({'v2': [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected['v2'] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind='mergesort') expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame([ ['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]) right = (DataFrame([ ['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]], columns=['col1', 'col2', 'col3', 'val']) .set_index(['col1', 'col2', 'col3'])) result = left.join(right, on=['cola', 'colb', 'colc'], how='left') expected = DataFrame([ ['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', nan], ['X', 'Y', 'A', 'c', 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]) tm.assert_frame_equal(result, expected) result = left.join(right, on=['cola', 'colb', 'colc'], how='left', sort=True) expected = expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort') tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame([ ['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'], index=[2, 0, 1, 3]) right = (DataFrame([ ['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char']) .set_index('tag')) result = left.join(right, on='tag', how='left') expected = DataFrame([ ['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, nan]], columns=['tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3]) tm.assert_frame_equal(result, expected) result = left.join(right, on='tag', how='left', sort=True) expected2 = expected.sort_values('tag', kind='mergesort') tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame({'id': list('abcde'), 'v1': randn(5), 'v2': randn(5), 'dummy': list('abcde'), 'v3': randn(5)}, columns=['id', 'v1', 'v2', 'dummy', 'v3']) right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]}) result = merge(left, right, on='id', how='left') rdf = right.drop(['id'], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [[1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.], [1950, "C", 4.], [1960, "C", np.nan], [1965, "C", 3.], [1970, "C", 4.]] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [[1960, 'A', np.nan], [1970, 'A', np.nan], [1955, 'A', np.nan], [1965, 'A', np.nan], [1965, 'B', np.nan], [1955, 'C', np.nan]] other = DataFrame(other_data, columns=['year', 'panel', 'data']) result = frame.merge(other, how='outer') expected = frame.fillna(-999).merge(other.fillna(-999), how='outer') expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = ( DataFrame( dict(household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750]), columns=['household_id', 'male', 'wealth']) .set_index('household_id')) portfolio = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], name=["ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'name', 'share']) .set_index(['household_id', 'asset_id'])) result = household.join(portfolio, how='inner') expected = ( DataFrame( dict(male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=['ABN Amro', 'Robeco', 'Royal Dutch Shell', 'Royal Dutch Shell', 'AAB Eastern Europe Equity Fund', 'Postbank BioTech Fonds'], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29', 'gb00b03mlx29', 'lu0197800237', 'nl0000289965'])) .set_index(['household_id', 'asset_id']) .reindex(columns=['male', 'wealth', 'name', 'share'])) tm.assert_frame_equal(result, expected) # equivalency result = (merge(household.reset_index(), portfolio.reset_index(), on=['household_id'], how='inner') .set_index(['household_id', 'asset_id'])) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how='outer') expected = (concat([ expected, (DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=['household_id', 'asset_id']))) ], axis=0, sort=True).reindex(columns=expected.columns)) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = 'foo' with pytest.raises(ValueError): household.join(portfolio, how='inner') portfolio2 = portfolio.copy() portfolio2.index.set_names(['household_id', 'foo']) with pytest.raises(ValueError): portfolio2.join(portfolio, how='inner') def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = ( DataFrame( dict(household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]), columns=['household_id', 'asset_id', 'share']) .set_index(['household_id', 'asset_id'])) log_return = DataFrame(dict( asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 180, 181], log_return=[.09604978, -.06524096, .03532373, .03025441, .036997] )).set_index(["asset_id", "t"]) expected = ( DataFrame(dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) # this is the equivalency result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='inner') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) expected = ( DataFrame(dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0], log_return=[None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None] )) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=['share', 'log_return'])) result = (merge(household.reset_index(), log_return.reset_index(), on=['asset_id'], how='outer') .set_index(['household_id', 'asset_id', 't'])) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): # Multi-index join tests expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi). set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames(self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = (pd.merge(left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi) .set_index(idx_cols_multi).sort_index()) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame([1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame( OrderedDict([ ("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018]), ]) ) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( OrderedDict([ ("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3]), ]) ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names=['key', 'X']) left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=index_left) index_right = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')], names=['key', 'Y']) right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index_right) result = left.join(right) expected = (pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner') .set_index(['key', 'X', 'Y'])) tm.assert_frame_equal(result, expected)
cbertinato/pandas
pandas/tests/reshape/merge/test_multi.py
pandas/core/ops.py
import py import sys, inspect from compiler import parse, ast, pycodegen from _pytest.assertion.util import format_explanation, BuiltinAssertionError passthroughex = py.builtin._sysex class Failure: def __init__(self, node): self.exc, self.value, self.tb = sys.exc_info() self.node = node class View(object): """View base class. If C is a subclass of View, then C(x) creates a proxy object around the object x. The actual class of the proxy is not C in general, but a *subclass* of C determined by the rules below. To avoid confusion we call view class the class of the proxy (a subclass of C, so of View) and object class the class of x. Attributes and methods not found in the proxy are automatically read on x. Other operations like setting attributes are performed on the proxy, as determined by its view class. The object x is available from the proxy as its __obj__ attribute. The view class selection is determined by the __view__ tuples and the optional __viewkey__ method. By default, the selected view class is the most specific subclass of C whose __view__ mentions the class of x. If no such subclass is found, the search proceeds with the parent object classes. For example, C(True) will first look for a subclass of C with __view__ = (..., bool, ...) and only if it doesn't find any look for one with __view__ = (..., int, ...), and then ..., object,... If everything fails the class C itself is considered to be the default. Alternatively, the view class selection can be driven by another aspect of the object x, instead of the class of x, by overriding __viewkey__. See last example at the end of this module. """ _viewcache = {} __view__ = () def __new__(rootclass, obj, *args, **kwds): self = object.__new__(rootclass) self.__obj__ = obj self.__rootclass__ = rootclass key = self.__viewkey__() try: self.__class__ = self._viewcache[key] except KeyError: self.__class__ = self._selectsubclass(key) return self def __getattr__(self, attr): # attributes not found in the normal hierarchy rooted on View # are looked up in the object's real class return getattr(self.__obj__, attr) def __viewkey__(self): return self.__obj__.__class__ def __matchkey__(self, key, subclasses): if inspect.isclass(key): keys = inspect.getmro(key) else: keys = [key] for key in keys: result = [C for C in subclasses if key in C.__view__] if result: return result return [] def _selectsubclass(self, key): subclasses = list(enumsubclasses(self.__rootclass__)) for C in subclasses: if not isinstance(C.__view__, tuple): C.__view__ = (C.__view__,) choices = self.__matchkey__(key, subclasses) if not choices: return self.__rootclass__ elif len(choices) == 1: return choices[0] else: # combine the multiple choices return type('?', tuple(choices), {}) def __repr__(self): return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) def enumsubclasses(cls): for subcls in cls.__subclasses__(): for subsubclass in enumsubclasses(subcls): yield subsubclass yield cls class Interpretable(View): """A parse tree node with a few extra methods.""" explanation = None def is_builtin(self, frame): return False def eval(self, frame): # fall-back for unknown expression nodes try: expr = ast.Expression(self.__obj__) expr.filename = '<eval>' self.__obj__.filename = '<eval>' co = pycodegen.ExpressionCodeGenerator(expr).getCode() result = frame.eval(co) except passthroughex: raise except: raise Failure(self) self.result = result self.explanation = self.explanation or frame.repr(self.result) def run(self, frame): # fall-back for unknown statement nodes try: expr = ast.Module(None, ast.Stmt([self.__obj__])) expr.filename = '<run>' co = pycodegen.ModuleCodeGenerator(expr).getCode() frame.exec_(co) except passthroughex: raise except: raise Failure(self) def nice_explanation(self): return format_explanation(self.explanation) class Name(Interpretable): __view__ = ast.Name def is_local(self, frame): source = '%r in locals() is not globals()' % self.name try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def is_global(self, frame): source = '%r in globals()' % self.name try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def is_builtin(self, frame): source = '%r not in locals() and %r not in globals()' % ( self.name, self.name) try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def eval(self, frame): super(Name, self).eval(frame) if not self.is_local(frame): self.explanation = self.name class Compare(Interpretable): __view__ = ast.Compare def eval(self, frame): expr = Interpretable(self.expr) expr.eval(frame) for operation, expr2 in self.ops: if hasattr(self, 'result'): # shortcutting in chained expressions if not frame.is_true(self.result): break expr2 = Interpretable(expr2) expr2.eval(frame) self.explanation = "%s %s %s" % ( expr.explanation, operation, expr2.explanation) source = "__exprinfo_left %s __exprinfo_right" % operation try: self.result = frame.eval(source, __exprinfo_left=expr.result, __exprinfo_right=expr2.result) except passthroughex: raise except: raise Failure(self) expr = expr2 class And(Interpretable): __view__ = ast.And def eval(self, frame): explanations = [] for expr in self.nodes: expr = Interpretable(expr) expr.eval(frame) explanations.append(expr.explanation) self.result = expr.result if not frame.is_true(expr.result): break self.explanation = '(' + ' and '.join(explanations) + ')' class Or(Interpretable): __view__ = ast.Or def eval(self, frame): explanations = [] for expr in self.nodes: expr = Interpretable(expr) expr.eval(frame) explanations.append(expr.explanation) self.result = expr.result if frame.is_true(expr.result): break self.explanation = '(' + ' or '.join(explanations) + ')' # == Unary operations == keepalive = [] for astclass, astpattern in { ast.Not : 'not __exprinfo_expr', ast.Invert : '(~__exprinfo_expr)', }.items(): class UnaryArith(Interpretable): __view__ = astclass def eval(self, frame, astpattern=astpattern): expr = Interpretable(self.expr) expr.eval(frame) self.explanation = astpattern.replace('__exprinfo_expr', expr.explanation) try: self.result = frame.eval(astpattern, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) keepalive.append(UnaryArith) # == Binary operations == for astclass, astpattern in { ast.Add : '(__exprinfo_left + __exprinfo_right)', ast.Sub : '(__exprinfo_left - __exprinfo_right)', ast.Mul : '(__exprinfo_left * __exprinfo_right)', ast.Div : '(__exprinfo_left / __exprinfo_right)', ast.Mod : '(__exprinfo_left % __exprinfo_right)', ast.Power : '(__exprinfo_left ** __exprinfo_right)', }.items(): class BinaryArith(Interpretable): __view__ = astclass def eval(self, frame, astpattern=astpattern): left = Interpretable(self.left) left.eval(frame) right = Interpretable(self.right) right.eval(frame) self.explanation = (astpattern .replace('__exprinfo_left', left .explanation) .replace('__exprinfo_right', right.explanation)) try: self.result = frame.eval(astpattern, __exprinfo_left=left.result, __exprinfo_right=right.result) except passthroughex: raise except: raise Failure(self) keepalive.append(BinaryArith) class CallFunc(Interpretable): __view__ = ast.CallFunc def is_bool(self, frame): source = 'isinstance(__exprinfo_value, bool)' try: return frame.is_true(frame.eval(source, __exprinfo_value=self.result)) except passthroughex: raise except: return False def eval(self, frame): node = Interpretable(self.node) node.eval(frame) explanations = [] vars = {'__exprinfo_fn': node.result} source = '__exprinfo_fn(' for a in self.args: if isinstance(a, ast.Keyword): keyword = a.name a = a.expr else: keyword = None a = Interpretable(a) a.eval(frame) argname = '__exprinfo_%d' % len(vars) vars[argname] = a.result if keyword is None: source += argname + ',' explanations.append(a.explanation) else: source += '%s=%s,' % (keyword, argname) explanations.append('%s=%s' % (keyword, a.explanation)) if self.star_args: star_args = Interpretable(self.star_args) star_args.eval(frame) argname = '__exprinfo_star' vars[argname] = star_args.result source += '*' + argname + ',' explanations.append('*' + star_args.explanation) if self.dstar_args: dstar_args = Interpretable(self.dstar_args) dstar_args.eval(frame) argname = '__exprinfo_kwds' vars[argname] = dstar_args.result source += '**' + argname + ',' explanations.append('**' + dstar_args.explanation) self.explanation = "%s(%s)" % ( node.explanation, ', '.join(explanations)) if source.endswith(','): source = source[:-1] source += ')' try: self.result = frame.eval(source, **vars) except passthroughex: raise except: raise Failure(self) if not node.is_builtin(frame) or not self.is_bool(frame): r = frame.repr(self.result) self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) class Getattr(Interpretable): __view__ = ast.Getattr def eval(self, frame): expr = Interpretable(self.expr) expr.eval(frame) source = '__exprinfo_expr.%s' % self.attrname try: self.result = frame.eval(source, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) self.explanation = '%s.%s' % (expr.explanation, self.attrname) # if the attribute comes from the instance, its value is interesting source = ('hasattr(__exprinfo_expr, "__dict__") and ' '%r in __exprinfo_expr.__dict__' % self.attrname) try: from_instance = frame.is_true( frame.eval(source, __exprinfo_expr=expr.result)) except passthroughex: raise except: from_instance = True if from_instance: r = frame.repr(self.result) self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) # == Re-interpretation of full statements == class Assert(Interpretable): __view__ = ast.Assert def run(self, frame): test = Interpretable(self.test) test.eval(frame) # print the result as 'assert <explanation>' self.result = test.result self.explanation = 'assert ' + test.explanation if not frame.is_true(test.result): try: raise BuiltinAssertionError except passthroughex: raise except: raise Failure(self) class Assign(Interpretable): __view__ = ast.Assign def run(self, frame): expr = Interpretable(self.expr) expr.eval(frame) self.result = expr.result self.explanation = '... = ' + expr.explanation # fall-back-run the rest of the assignment ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) mod = ast.Module(None, ast.Stmt([ass])) mod.filename = '<run>' co = pycodegen.ModuleCodeGenerator(mod).getCode() try: frame.exec_(co, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) class Discard(Interpretable): __view__ = ast.Discard def run(self, frame): expr = Interpretable(self.expr) expr.eval(frame) self.result = expr.result self.explanation = expr.explanation class Stmt(Interpretable): __view__ = ast.Stmt def run(self, frame): for stmt in self.nodes: stmt = Interpretable(stmt) stmt.run(frame) def report_failure(e): explanation = e.node.nice_explanation() if explanation: explanation = ", in: " + explanation else: explanation = "" sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) def check(s, frame=None): if frame is None: frame = sys._getframe(1) frame = py.code.Frame(frame) expr = parse(s, 'eval') assert isinstance(expr, ast.Expression) node = Interpretable(expr.node) try: node.eval(frame) except passthroughex: raise except Failure: e = sys.exc_info()[1] report_failure(e) else: if not frame.is_true(node.result): sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) ########################################################### # API / Entry points # ######################################################### def interpret(source, frame, should_fail=False): module = Interpretable(parse(source, 'exec').node) #print "got module", module if isinstance(frame, py.std.types.FrameType): frame = py.code.Frame(frame) try: module.run(frame) except Failure: e = sys.exc_info()[1] return getfailure(e) except passthroughex: raise except: import traceback traceback.print_exc() if should_fail: return ("(assertion failed, but when it was re-run for " "printing intermediate values, it did not fail. Suggestions: " "compute assert expression before the assert or use --assert=plain)") else: return None def getmsg(excinfo): if isinstance(excinfo, tuple): excinfo = py.code.ExceptionInfo(excinfo) #frame, line = gettbline(tb) #frame = py.code.Frame(frame) #return interpret(line, frame) tb = excinfo.traceback[-1] source = str(tb.statement).strip() x = interpret(source, tb.frame, should_fail=True) if not isinstance(x, str): raise TypeError("interpret returned non-string %r" % (x,)) return x def getfailure(e): explanation = e.node.nice_explanation() if str(e.value): lines = explanation.split('\n') lines[0] += " << %s" % (e.value,) explanation = '\n'.join(lines) text = "%s: %s" % (e.exc.__name__, explanation) if text.startswith('AssertionError: assert '): text = text[16:] return text def run(s, frame=None): if frame is None: frame = sys._getframe(1) frame = py.code.Frame(frame) module = Interpretable(parse(s, 'exec').node) try: module.run(frame) except Failure: e = sys.exc_info()[1] report_failure(e) if __name__ == '__main__': # example: def f(): return 5 def g(): return 3 def h(x): return 'never' check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") check("f() == g()") i = 4 check("i == f()") check("len(f()) == 0") check("isinstance(2+3+4, float)") run("x = i") check("x == 5") run("assert not f(), 'oops'") run("a, b, c = 1, 2") run("a, b, c = f()") check("max([f(),g()]) == 4") check("'hello'[g()] == 'h'") run("'guk%d' % h(f())")
"PYTEST_DONT_REWRITE" import pytest, py from _pytest.assertion import util def exvalue(): return py.std.sys.exc_info()[1] def f(): return 2 def test_not_being_rewritten(): assert "@py_builtins" not in globals() def test_assert(): try: assert f() == 3 except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 == 3\n') def test_assert_with_explicit_message(): try: assert f() == 3, "hello" except AssertionError: e = exvalue() assert e.msg == 'hello' def test_assert_within_finally(): excinfo = py.test.raises(ZeroDivisionError, """ try: 1/0 finally: i = 42 """) s = excinfo.exconly() assert py.std.re.search("division.+by zero", s) is not None #def g(): # A.f() #excinfo = getexcinfo(TypeError, g) #msg = getmsg(excinfo) #assert msg.find("must be called with A") != -1 def test_assert_multiline_1(): try: assert (f() == 3) except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 == 3\n') def test_assert_multiline_2(): try: assert (f() == (4, 3)[-1]) except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 ==') def test_in(): try: assert "hi" in [1, 2] except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 'hi' in") def test_is(): try: assert 1 is 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 is 2") @py.test.mark.skipif("sys.version_info < (2,6)") def test_attrib(): class Foo(object): b = 1 i = Foo() try: assert i.b == 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 == 2") @py.test.mark.skipif("sys.version_info < (2,6)") def test_attrib_inst(): class Foo(object): b = 1 try: assert Foo().b == 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 == 2") def test_len(): l = list(range(42)) try: assert len(l) == 100 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 42 == 100") assert "where 42 = len([" in s def test_assert_non_string_message(): class A: def __str__(self): return "hello" try: assert 0 == 1, A() except AssertionError: e = exvalue() assert e.msg == "hello" def test_assert_keyword_arg(): def f(x=3): return False try: assert f(x=5) except AssertionError: e = exvalue() assert "x=5" in e.msg # These tests should both fail, but should fail nicely... class WeirdRepr: def __repr__(self): return '<WeirdRepr\nsecond line>' def bug_test_assert_repr(): v = WeirdRepr() try: assert v == 1 except AssertionError: e = exvalue() assert e.msg.find('WeirdRepr') != -1 assert e.msg.find('second line') != -1 assert 0 def test_assert_non_string(): try: assert 0, ['list'] except AssertionError: e = exvalue() assert e.msg.find("list") != -1 def test_assert_implicit_multiline(): try: x = [1,2,3] assert x != [1, 2, 3] except AssertionError: e = exvalue() assert e.msg.find('assert [1, 2, 3] !=') != -1 def test_assert_with_brokenrepr_arg(): class BrokenRepr: def __repr__(self): 0 / 0 e = AssertionError(BrokenRepr()) if e.msg.find("broken __repr__") == -1: py.test.fail("broken __repr__ not handle correctly") def test_multiple_statements_per_line(): try: a = 1; assert a == 2 except AssertionError: e = exvalue() assert "assert 1 == 2" in e.msg def test_power(): try: assert 2**3 == 7 except AssertionError: e = exvalue() assert "assert (2 ** 3) == 7" in e.msg class TestView: def setup_class(cls): cls.View = pytest.importorskip("_pytest.assertion.oldinterpret").View def test_class_dispatch(self): ### Use a custom class hierarchy with existing instances class Picklable(self.View): pass class Simple(Picklable): __view__ = object def pickle(self): return repr(self.__obj__) class Seq(Picklable): __view__ = list, tuple, dict def pickle(self): return ';'.join( [Picklable(item).pickle() for item in self.__obj__]) class Dict(Seq): __view__ = dict def pickle(self): return Seq.pickle(self) + '!' + Seq(self.values()).pickle() assert Picklable(123).pickle() == '123' assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4' assert Picklable({1:2}).pickle() == '1!2' def test_viewtype_class_hierarchy(self): # Use a custom class hierarchy based on attributes of existing instances class Operation: "Existing class that I don't want to change." def __init__(self, opname, *args): self.opname = opname self.args = args existing = [Operation('+', 4, 5), Operation('getitem', '', 'join'), Operation('setattr', 'x', 'y', 3), Operation('-', 12, 1)] class PyOp(self.View): def __viewkey__(self): return self.opname def generate(self): return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args))) class PyBinaryOp(PyOp): __view__ = ('+', '-', '*', '/') def generate(self): return '%s %s %s' % (self.args[0], self.opname, self.args[1]) codelines = [PyOp(op).generate() for op in existing] assert codelines == ["4 + 5", "getitem('', 'join')", "setattr('x', 'y', 3)", "12 - 1"] @py.test.mark.skipif("sys.version_info < (2,6)") def test_assert_customizable_reprcompare(monkeypatch): monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello') try: assert 3 == 4 except AssertionError: e = exvalue() s = str(e) assert "hello" in s def test_assert_long_source_1(): try: assert len == [ (None, ['somet text', 'more text']), ] except AssertionError: e = exvalue() s = str(e) assert 're-run' not in s assert 'somet text' in s def test_assert_long_source_2(): try: assert(len == [ (None, ['somet text', 'more text']), ]) except AssertionError: e = exvalue() s = str(e) assert 're-run' not in s assert 'somet text' in s def test_assert_raise_alias(testdir): testdir.makepyfile(""" "PYTEST_DONT_REWRITE" import sys EX = AssertionError def test_hello(): raise EX("hello" "multi" "line") """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello*", "*raise EX*", "*1 failed*", ]) @pytest.mark.skipif("sys.version_info < (2,5)") def test_assert_raise_subclass(): class SomeEx(AssertionError): def __init__(self, *args): super(SomeEx, self).__init__() try: raise SomeEx("hello") except AssertionError: s = str(exvalue()) assert 're-run' not in s assert 'could not determine' in s def test_assert_raises_in_nonzero_of_object_pytest_issue10(): class A(object): def __nonzero__(self): raise ValueError(42) def __lt__(self, other): return A() def __repr__(self): return "<MY42 object>" def myany(x): return True try: assert not(myany(A() < 0)) except AssertionError: e = exvalue() s = str(e) assert "<MY42 object> < 0" in s @py.test.mark.skipif("sys.version_info >= (2,6)") def test_oldinterpret_importation(): # we had a cyclic import there # requires pytest on sys.path res = py.std.subprocess.call([ py.std.sys.executable, '-c', str(py.code.Source(""" try: from _pytest.assertion.newinterpret import interpret except ImportError: from _pytest.assertion.oldinterpret import interpret """)) ]) assert res == 0
geraldoandradee/pytest
testing/test_assertinterpret.py
_pytest/assertion/oldinterpret.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import math from .constant import Constant, EMConstant # PHYSICAL CONSTANTS # https://en.wikipedia.org/wiki/2019_redefinition_of_SI_base_units class CODATA2018(Constant): default_reference = 'CODATA 2018' _registry = {} _has_incompatible_units = set() class EMCODATA2018(CODATA2018, EMConstant): _registry = CODATA2018._registry h = CODATA2018('h', "Planck constant", 6.62607015e-34, 'J s', 0.0, system='si') hbar = CODATA2018('hbar', "Reduced Planck constant", h.value / (2 * math.pi), 'J s', 0.0, system='si') k_B = CODATA2018('k_B', "Boltzmann constant", 1.380649e-23, 'J / (K)', 0.0, system='si') c = CODATA2018('c', "Speed of light in vacuum", 299792458., 'm / (s)', 0.0, system='si') G = CODATA2018('G', "Gravitational constant", 6.67430e-11, 'm3 / (kg s2)', 0.00015e-11, system='si') g0 = CODATA2018('g0', "Standard acceleration of gravity", 9.80665, 'm / s2', 0.0, system='si') m_p = CODATA2018('m_p', "Proton mass", 1.67262192369e-27, 'kg', 0.00000000051e-27, system='si') m_n = CODATA2018('m_n', "Neutron mass", 1.67492749804e-27, 'kg', 0.00000000095e-27, system='si') m_e = CODATA2018('m_e', "Electron mass", 9.1093837015e-31, 'kg', 0.0000000028e-31, system='si') u = CODATA2018('u', "Atomic mass", 1.66053906660e-27, 'kg', 0.00000000050e-27, system='si') sigma_sb = CODATA2018( 'sigma_sb', "Stefan-Boltzmann constant", 2 * math.pi ** 5 * k_B.value ** 4 / (15 * h.value ** 3 * c.value ** 2), 'W / (K4 m2)', 0.0, system='si') e = EMCODATA2018('e', 'Electron charge', 1.602176634e-19, 'C', 0.0, system='si') eps0 = EMCODATA2018('eps0', 'Vacuum electric permittivity', 8.8541878128e-12, 'F/m', 0.0000000013e-12, system='si') N_A = CODATA2018('N_A', "Avogadro's number", 6.02214076e23, '1 / (mol)', 0.0, system='si') R = CODATA2018('R', "Gas constant", k_B.value * N_A.value, 'J / (K mol)', 0.0, system='si') Ryd = CODATA2018('Ryd', 'Rydberg constant', 10973731.568160, '1 / (m)', 0.000021, system='si') a0 = CODATA2018('a0', "Bohr radius", 5.29177210903e-11, 'm', 0.00000000080e-11, system='si') muB = CODATA2018('muB', "Bohr magneton", 9.2740100783e-24, 'J/T', 0.0000000028e-24, system='si') alpha = CODATA2018('alpha', "Fine-structure constant", 7.2973525693e-3, '', 0.0000000011e-3, system='si') atm = CODATA2018('atm', "Standard atmosphere", 101325, 'Pa', 0.0, system='si') mu0 = CODATA2018('mu0', "Vacuum magnetic permeability", 1.25663706212e-6, 'N/A2', 0.00000000019e-6, system='si') sigma_T = CODATA2018('sigma_T', "Thomson scattering cross-section", 6.6524587321e-29, 'm2', 0.0000000060e-29, system='si') # Formula taken from NIST wall chart. # The numerical factor is from a numerical solution to the equation for the # maximum. See https://en.wikipedia.org/wiki/Wien%27s_displacement_law b_wien = CODATA2018('b_wien', 'Wien wavelength displacement law constant', h.value * c.value / (k_B.value * 4.965114231744276), 'm K', 0.0, system='si') # CGS constants. # Only constants that cannot be converted directly from S.I. are defined here. # Because both e and c are exact, these are also exact by definition. e_esu = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0, 'statC', 0.0, system='esu') e_emu = EMCODATA2018(e.abbrev, e.name, e.value / 10, 'abC', 0.0, system='emu') e_gauss = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0, 'Fr', 0.0, system='gauss')
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from numpy.testing import assert_equal, assert_allclose from astropy import units as u from astropy.time import Time, TimeDelta from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle from astropy.timeseries.binned import BinnedTimeSeries from astropy.tests.helper import assert_quantity_allclose CSV_FILE = get_pkg_data_filename('data/binned.csv') def test_empty_initialization(): ts = BinnedTimeSeries() ts['time_bin_start'] = Time([1, 2, 3], format='mjd') def test_empty_initialization_invalid(): # Make sure things crash when the first column added is not a time column ts = BinnedTimeSeries() with pytest.raises(ValueError) as exc: ts['flux'] = [1, 2, 3] assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected " "'time_bin_start' as the first column but found 'flux'") def test_initialization_time_bin_invalid(): # Make sure things crash when time_bin_* is passed incorrectly. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data=[[1, 4, 3]]) assert exc.value.args[0] == ("'time_bin_start' has not been specified") with pytest.raises(TypeError) as exc: BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]]) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_initialization_time_bin_both(): # Make sure things crash when time_bin_* is passed twice. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31") assert exc.value.args[0] == ("'time_bin_start' has been given both in the table " "and as a keyword argument") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1]*u.s) assert exc.value.args[0] == ("'time_bin_size' has been given both in the table " "and as a keyword argument") def test_initialization_time_bin_size(): # Make sure things crash when time_bin_size has no units with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=1) assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta") # TimeDelta for time_bin_size ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=TimeDelta(1)) assert isinstance(ts.time_bin_size, u.quantity.Quantity) def test_initialization_time_bin_start_scalar(): # Make sure things crash when time_bin_start is a scalar with no time_bin_size with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required") def test_initialization_n_bins(): # Make sure things crash with incorrect n_bins with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd'), n_bins=10) assert exc.value.args[0] == ("'n_bins' has been given and it is not the " "same length as the input data.") def test_initialization_non_scalar_time(): # Make sure things crash with incorrect size of time_bin_start with pytest.raises(ValueError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"], time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=None, time_bin_end=None) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_even_contiguous(): # Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying # the bin width: ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500', '2016-03-22T12:30:35.500', '2016-03-22T12:30:38.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000', '2016-03-22T12:30:40.000']) def test_uneven_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an # end time: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:32', '2016-03-22T12:30:40'], time_bin_end='2016-03-22T12:30:55', data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:36.000', '2016-03-22T12:30:47.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000', '2016-03-22T12:30:55.000']) def test_uneven_non_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with # lists of start times, bin sizes and data: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'], time_bin_size=[5, 100, 2]*u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:38.000', '2016-03-22T12:34:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500', '2016-03-22T12:31:28.000', '2016-03-22T12:34:41.000']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000', '2016-03-22T12:32:18.000', '2016-03-22T12:34:42.000']) def test_uneven_non_contiguous_full(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by # specifying the start and end times for the bins: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33', '2016-03-22T12:30:40'], time_bin_end=['2016-03-22T12:30:32', '2016-03-22T12:30:35', '2016-03-22T12:30:41'], data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:34.000', '2016-03-22T12:30:40.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:35.000', '2016-03-22T12:30:41.000']) def test_read_empty(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, format='csv') assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.' def test_read_no_size_end(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv') assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.' def test_read_both_extra_bins(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`." def test_read_size_no_unit(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read_start_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data." def test_read_end_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv') assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data." def test_read_size_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin size column 'missing' not found in the input data." def test_read_time_unit_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read(): timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='time_end', format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 @pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle]) def test_periodogram(cls): # Note that we don't need to check the actual results from the periodogram # classes here since these are tested extensively in # astropy.timeseries.periodograms. ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b']) p1 = cls.from_timeseries(ts, 'a') assert isinstance(p1, cls) assert_allclose(p1.t.jd, ts.time_bin_center.jd) assert_equal(p1.y, ts['a']) assert p1.dy is None p2 = cls.from_timeseries(ts, 'a', uncertainty='b') assert_quantity_allclose(p2.dy, ts['b']) p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1) assert_allclose(p3.dy, 0.1)
MSeifert04/astropy
astropy/timeseries/tests/test_binned.py
astropy/constants/codata2018.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains functions for reading and writing HDF5 tables that are not meant to be used directly, but instead are available as readers/writers in `astropy.table`. See :ref:`table_io` for more details. """ import os import warnings import numpy as np # NOTE: Do not import anything from astropy.table here. # https://github.com/astropy/astropy/issues/6604 from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n' META_KEY = '__table_column_meta__' __all__ = ['read_table_hdf5', 'write_table_hdf5'] def meta_path(path): return path + '.' + META_KEY def _find_all_structured_arrays(handle): """ Find all structured arrays in an HDF5 file """ import h5py structured_arrays = [] def append_structured_arrays(name, obj): if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V': structured_arrays.append(name) handle.visititems(append_structured_arrays) return structured_arrays def is_hdf5(origin, filepath, fileobj, *args, **kwargs): if fileobj is not None: loc = fileobj.tell() try: signature = fileobj.read(8) finally: fileobj.seek(loc) return signature == HDF5_SIGNATURE elif filepath is not None: return filepath.endswith(('.hdf5', '.h5')) try: import h5py except ImportError: return False else: return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset)) def read_table_hdf5(input, path=None, character_as_bytes=True): """ Read a Table object from an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one table is present in the HDF5 file or group, the first table is read in and a warning is displayed. Parameters ---------- input : str or :class:`h5py:File` or :class:`h5py:Group` or :class:`h5py:Dataset` If a string, the filename to read the table from. If an h5py object, either the file or the group object to read the table from. path : str The path from which to read the table inside the HDF5 file. This should be relative to the input file or group. character_as_bytes: boolean If `True` then Table columns are left as bytes. If `False` then Table columns are converted to unicode. """ try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") # This function is iterative, and only gets to writing the file when # the input is an hdf5 Group. Moreover, the input variable is changed in # place. # Here, we save its value to be used at the end when the conditions are # right. input_save = input if isinstance(input, (h5py.File, h5py.Group)): # If a path was specified, follow the path if path is not None: try: input = input[path] except (KeyError, ValueError): raise OSError(f"Path {path} does not exist") # `input` is now either a group or a dataset. If it is a group, we # will search for all structured arrays inside the group, and if there # is one we can proceed otherwise an error is raised. If it is a # dataset, we just proceed with the reading. if isinstance(input, h5py.Group): # Find all structured arrays in group arrays = _find_all_structured_arrays(input) if len(arrays) == 0: raise ValueError("no table found in HDF5 group {}". format(path)) elif len(arrays) > 0: path = arrays[0] if path is None else path + '/' + arrays[0] if len(arrays) > 1: warnings.warn("path= was not specified but multiple tables" " are present, reading in first available" " table (path={})".format(path), AstropyUserWarning) return read_table_hdf5(input, path=path) elif not isinstance(input, h5py.Dataset): # If a file object was passed, then we need to extract the filename # because h5py cannot properly read in file objects. if hasattr(input, 'read'): try: input = input.name except AttributeError: raise TypeError("h5py can only open regular files") # Open the file for reading, and recursively call read_table_hdf5 with # the file object and the path. f = h5py.File(input, 'r') try: return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes) finally: f.close() # If we are here, `input` should be a Dataset object, which we can now # convert to a Table. # Create a Table object from astropy.table import Table, meta, serialize table = Table(np.array(input)) # Read the meta-data from the file. For back-compatibility, we can read # the old file format where the serialized metadata were saved in the # attributes of the HDF5 dataset. # In the new format, instead, metadata are stored in a new dataset in the # same file. This is introduced in Astropy 3.0 old_version_meta = META_KEY in input.attrs new_version_meta = path is not None and meta_path(path) in input_save if old_version_meta or new_version_meta: if new_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input_save[meta_path(path)]) elif old_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input.attrs[META_KEY]) if 'meta' in list(header.keys()): table.meta = header['meta'] header_cols = dict((x['name'], x) for x in header['datatype']) for col in table.columns.values(): for attr in ('description', 'format', 'unit', 'meta'): if attr in header_cols[col.name]: setattr(col, attr, header_cols[col.name][attr]) # Construct new table with mixins, using tbl.meta['__serialized_columns__'] # as guidance. table = serialize._construct_mixins_from_columns(table) else: # Read the meta-data from the file table.meta.update(input.attrs) if not character_as_bytes: table.convert_bytestring_to_unicode() return table def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from astropy.table import serialize from astropy.table.table import has_info_class from astropy import units as u from astropy.utils.data_info import MixinInfo, serialize_context_as # If PyYAML is not available then check to see if there are any mixin cols # that *require* YAML serialization. HDF5 already has support for # Quantity, so if those are the only mixins the proceed without doing the # YAML bit, for backward compatibility (i.e. not requiring YAML to write # Quantity). try: import yaml except ImportError: for col in tbl.itercols(): if (has_info_class(col, MixinInfo) and col.__class__ is not u.Quantity): raise TypeError("cannot write type {} column '{}' " "to HDF5 without PyYAML installed." .format(col.__class__.__name__, col.info.name)) # Convert the table to one with no mixins, only Column objects. This adds # meta data which is extracted with meta.get_yaml_from_table. with serialize_context_as('hdf5'): encode_tbl = serialize.represent_mixins_as_columns(tbl) return encode_tbl def write_table_hdf5(table, output, path=None, compression=False, append=False, overwrite=False, serialize_meta=False): """ Write a Table object to an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. Parameters ---------- table : `~astropy.table.Table` Data table that is to be written to file. output : str or :class:`h5py:File` or :class:`h5py:Group` If a string, the filename to write the table to. If an h5py object, either the file or the group object to write the table to. path : str The path to which to write the table inside the HDF5 file. This should be relative to the input file or group. If not specified, defaults to ``__astropy_table__``. compression : bool or str or int Whether to compress the table inside the HDF5 file. If set to `True`, ``'gzip'`` compression is used. If a string is specified, it should be one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is specified (in the range 0-9), ``'gzip'`` compression is used, and the integer denotes the compression level. append : bool Whether to append the table to an existing HDF5 file. overwrite : bool Whether to overwrite any existing file without warning. If ``append=True`` and ``overwrite=True`` then only the dataset will be replaced; the file/group will not be overwritten. """ from astropy.table import meta try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") if path is None: # table is just an arbitrary, hardcoded string here. path = '__astropy_table__' elif path.endswith('/'): raise ValueError("table path should end with table name, not /") if '/' in path: group, name = path.rsplit('/', 1) else: group, name = None, path if isinstance(output, (h5py.File, h5py.Group)): if len(list(output.keys())) > 0 and name == '__astropy_table__': raise ValueError("table path should always be set via the " "path= argument when writing to existing " "files") elif name == '__astropy_table__': warnings.warn("table path was not set via the path= argument; " "using default path {}".format(path)) if group: try: output_group = output[group] except (KeyError, ValueError): output_group = output.create_group(group) else: output_group = output elif isinstance(output, str): if os.path.exists(output) and not append: if overwrite and not append: os.remove(output) else: raise OSError(f"File exists: {output}") # Open the file for appending or writing f = h5py.File(output, 'a' if append else 'w') # Recursively call the write function try: return write_table_hdf5(table, f, path=path, compression=compression, append=append, overwrite=overwrite, serialize_meta=serialize_meta) finally: f.close() else: raise TypeError('output should be a string or an h5py File or ' 'Group object') # Check whether table already exists if name in output_group: if append and overwrite: # Delete only the dataset itself del output_group[name] else: raise OSError(f"Table {path} already exists") # Encode any mixin columns as plain columns + appropriate metadata table = _encode_mixins(table) # Table with numpy unicode strings can't be written in HDF5 so # to write such a table a copy of table is made containing columns as # bytestrings. Now this copy of the table can be written in HDF5. if any(col.info.dtype.kind == 'U' for col in table.itercols()): table = table.copy(copy_data=False) table.convert_unicode_to_bytestring() # Warn if information will be lost when serialize_meta=False. This is # hardcoded to the set difference between column info attributes and what # HDF5 can store natively (name, dtype) with no meta. if serialize_meta is False: for col in table.itercols(): for attr in ('unit', 'format', 'description', 'meta'): if getattr(col.info, attr, None) not in (None, {}): warnings.warn("table contains column(s) with defined 'unit', 'format'," " 'description', or 'meta' info attributes. These will" " be dropped since serialize_meta=False.", AstropyUserWarning) # Write the table to the file if compression: if compression is True: compression = 'gzip' dset = output_group.create_dataset(name, data=table.as_array(), compression=compression) else: dset = output_group.create_dataset(name, data=table.as_array()) if serialize_meta: header_yaml = meta.get_yaml_from_table(table) header_encoded = [h.encode('utf-8') for h in header_yaml] output_group.create_dataset(meta_path(name), data=header_encoded) else: # Write the Table meta dict key:value pairs to the file as HDF5 # attributes. This works only for a limited set of scalar data types # like numbers, strings, etc., but not any complex types. This path # also ignores column meta like unit or format. for key in table.meta: val = table.meta[key] try: dset.attrs[key] = val except TypeError: warnings.warn("Attribute `{}` of type {} cannot be written to " "HDF5 files - skipping. (Consider specifying " "serialize_meta=True to write all meta data)".format(key, type(val)), AstropyUserWarning) def register_hdf5(): """ Register HDF5 with Unified I/O. """ from astropy.io import registry as io_registry from astropy.table import Table io_registry.register_reader('hdf5', Table, read_table_hdf5) io_registry.register_writer('hdf5', Table, write_table_hdf5) io_registry.register_identifier('hdf5', Table, is_hdf5)
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from numpy.testing import assert_equal, assert_allclose from astropy import units as u from astropy.time import Time, TimeDelta from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle from astropy.timeseries.binned import BinnedTimeSeries from astropy.tests.helper import assert_quantity_allclose CSV_FILE = get_pkg_data_filename('data/binned.csv') def test_empty_initialization(): ts = BinnedTimeSeries() ts['time_bin_start'] = Time([1, 2, 3], format='mjd') def test_empty_initialization_invalid(): # Make sure things crash when the first column added is not a time column ts = BinnedTimeSeries() with pytest.raises(ValueError) as exc: ts['flux'] = [1, 2, 3] assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected " "'time_bin_start' as the first column but found 'flux'") def test_initialization_time_bin_invalid(): # Make sure things crash when time_bin_* is passed incorrectly. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data=[[1, 4, 3]]) assert exc.value.args[0] == ("'time_bin_start' has not been specified") with pytest.raises(TypeError) as exc: BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]]) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_initialization_time_bin_both(): # Make sure things crash when time_bin_* is passed twice. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31") assert exc.value.args[0] == ("'time_bin_start' has been given both in the table " "and as a keyword argument") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1]*u.s) assert exc.value.args[0] == ("'time_bin_size' has been given both in the table " "and as a keyword argument") def test_initialization_time_bin_size(): # Make sure things crash when time_bin_size has no units with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=1) assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta") # TimeDelta for time_bin_size ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=TimeDelta(1)) assert isinstance(ts.time_bin_size, u.quantity.Quantity) def test_initialization_time_bin_start_scalar(): # Make sure things crash when time_bin_start is a scalar with no time_bin_size with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required") def test_initialization_n_bins(): # Make sure things crash with incorrect n_bins with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd'), n_bins=10) assert exc.value.args[0] == ("'n_bins' has been given and it is not the " "same length as the input data.") def test_initialization_non_scalar_time(): # Make sure things crash with incorrect size of time_bin_start with pytest.raises(ValueError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"], time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=None, time_bin_end=None) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_even_contiguous(): # Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying # the bin width: ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500', '2016-03-22T12:30:35.500', '2016-03-22T12:30:38.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000', '2016-03-22T12:30:40.000']) def test_uneven_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an # end time: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:32', '2016-03-22T12:30:40'], time_bin_end='2016-03-22T12:30:55', data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:36.000', '2016-03-22T12:30:47.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000', '2016-03-22T12:30:55.000']) def test_uneven_non_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with # lists of start times, bin sizes and data: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'], time_bin_size=[5, 100, 2]*u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:38.000', '2016-03-22T12:34:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500', '2016-03-22T12:31:28.000', '2016-03-22T12:34:41.000']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000', '2016-03-22T12:32:18.000', '2016-03-22T12:34:42.000']) def test_uneven_non_contiguous_full(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by # specifying the start and end times for the bins: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33', '2016-03-22T12:30:40'], time_bin_end=['2016-03-22T12:30:32', '2016-03-22T12:30:35', '2016-03-22T12:30:41'], data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:34.000', '2016-03-22T12:30:40.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:35.000', '2016-03-22T12:30:41.000']) def test_read_empty(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, format='csv') assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.' def test_read_no_size_end(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv') assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.' def test_read_both_extra_bins(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`." def test_read_size_no_unit(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read_start_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data." def test_read_end_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv') assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data." def test_read_size_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin size column 'missing' not found in the input data." def test_read_time_unit_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read(): timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='time_end', format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 @pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle]) def test_periodogram(cls): # Note that we don't need to check the actual results from the periodogram # classes here since these are tested extensively in # astropy.timeseries.periodograms. ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b']) p1 = cls.from_timeseries(ts, 'a') assert isinstance(p1, cls) assert_allclose(p1.t.jd, ts.time_bin_center.jd) assert_equal(p1.y, ts['a']) assert p1.dy is None p2 = cls.from_timeseries(ts, 'a', uncertainty='b') assert_quantity_allclose(p2.dy, ts['b']) p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1) assert_allclose(p3.dy, 0.1)
MSeifert04/astropy
astropy/timeseries/tests/test_binned.py
astropy/io/misc/hdf5.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage provides a framework for representing models and performing model evaluation and fitting. It supports 1D and 2D models and fitting with parameter constraints. It has some predefined models and fitting routines. """ from . import fitting from . import models from .core import * from .parameters import * from .separable import *
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from numpy.testing import assert_equal, assert_allclose from astropy import units as u from astropy.time import Time, TimeDelta from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle from astropy.timeseries.binned import BinnedTimeSeries from astropy.tests.helper import assert_quantity_allclose CSV_FILE = get_pkg_data_filename('data/binned.csv') def test_empty_initialization(): ts = BinnedTimeSeries() ts['time_bin_start'] = Time([1, 2, 3], format='mjd') def test_empty_initialization_invalid(): # Make sure things crash when the first column added is not a time column ts = BinnedTimeSeries() with pytest.raises(ValueError) as exc: ts['flux'] = [1, 2, 3] assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected " "'time_bin_start' as the first column but found 'flux'") def test_initialization_time_bin_invalid(): # Make sure things crash when time_bin_* is passed incorrectly. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data=[[1, 4, 3]]) assert exc.value.args[0] == ("'time_bin_start' has not been specified") with pytest.raises(TypeError) as exc: BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]]) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_initialization_time_bin_both(): # Make sure things crash when time_bin_* is passed twice. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31") assert exc.value.args[0] == ("'time_bin_start' has been given both in the table " "and as a keyword argument") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1]*u.s) assert exc.value.args[0] == ("'time_bin_size' has been given both in the table " "and as a keyword argument") def test_initialization_time_bin_size(): # Make sure things crash when time_bin_size has no units with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=1) assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta") # TimeDelta for time_bin_size ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=TimeDelta(1)) assert isinstance(ts.time_bin_size, u.quantity.Quantity) def test_initialization_time_bin_start_scalar(): # Make sure things crash when time_bin_start is a scalar with no time_bin_size with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required") def test_initialization_n_bins(): # Make sure things crash with incorrect n_bins with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd'), n_bins=10) assert exc.value.args[0] == ("'n_bins' has been given and it is not the " "same length as the input data.") def test_initialization_non_scalar_time(): # Make sure things crash with incorrect size of time_bin_start with pytest.raises(ValueError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"], time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=None, time_bin_end=None) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_even_contiguous(): # Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying # the bin width: ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500', '2016-03-22T12:30:35.500', '2016-03-22T12:30:38.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000', '2016-03-22T12:30:40.000']) def test_uneven_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an # end time: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:32', '2016-03-22T12:30:40'], time_bin_end='2016-03-22T12:30:55', data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:36.000', '2016-03-22T12:30:47.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000', '2016-03-22T12:30:55.000']) def test_uneven_non_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with # lists of start times, bin sizes and data: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'], time_bin_size=[5, 100, 2]*u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:38.000', '2016-03-22T12:34:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500', '2016-03-22T12:31:28.000', '2016-03-22T12:34:41.000']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000', '2016-03-22T12:32:18.000', '2016-03-22T12:34:42.000']) def test_uneven_non_contiguous_full(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by # specifying the start and end times for the bins: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33', '2016-03-22T12:30:40'], time_bin_end=['2016-03-22T12:30:32', '2016-03-22T12:30:35', '2016-03-22T12:30:41'], data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:34.000', '2016-03-22T12:30:40.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:35.000', '2016-03-22T12:30:41.000']) def test_read_empty(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, format='csv') assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.' def test_read_no_size_end(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv') assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.' def test_read_both_extra_bins(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`." def test_read_size_no_unit(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read_start_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data." def test_read_end_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv') assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data." def test_read_size_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin size column 'missing' not found in the input data." def test_read_time_unit_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read(): timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='time_end', format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 @pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle]) def test_periodogram(cls): # Note that we don't need to check the actual results from the periodogram # classes here since these are tested extensively in # astropy.timeseries.periodograms. ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b']) p1 = cls.from_timeseries(ts, 'a') assert isinstance(p1, cls) assert_allclose(p1.t.jd, ts.time_bin_center.jd) assert_equal(p1.y, ts['a']) assert p1.dy is None p2 = cls.from_timeseries(ts, 'a', uncertainty='b') assert_quantity_allclose(p2.dy, ts['b']) p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1) assert_allclose(p3.dy, 0.1)
MSeifert04/astropy
astropy/timeseries/tests/test_binned.py
astropy/modeling/__init__.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.baseframe import frame_transform_graph, base_doc from astropy.coordinates.attributes import TimeAttribute from astropy.coordinates.transformations import DynamicMatrixTransform from astropy.coordinates import earth_orientation as earth from .baseradec import BaseRADecFrame, doc_components from .utils import EQUINOX_J2000 __all__ = ['FK5'] doc_footer = """ Other parameters ---------------- equinox : `~astropy.time.Time` The equinox of this frame. """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class FK5(BaseRADecFrame): """ A coordinate or frame in the FK5 system. Note that this is a barycentric version of FK5 - that is, the origin for this frame is the Solar System Barycenter, *not* the Earth geocenter. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) @staticmethod def _precession_matrix(oldequinox, newequinox): """ Compute and return the precession matrix for FK5 based on Capitaine et al. 2003/IAU2006. Used inside some of the transformation functions. Parameters ---------- oldequinox : `~astropy.time.Time` The equinox to precess from. newequinox : `~astropy.time.Time` The equinox to precess to. Returns ------- newcoord : array The precession matrix to transform to the new equinox """ return earth.precession_matrix_Capitaine(oldequinox, newequinox) # This is the "self-transform". Defined at module level because the decorator # needs a reference to the FK5 class @frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5) def fk5_to_fk5(fk5coord1, fk5frame2): return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from numpy.testing import assert_equal, assert_allclose from astropy import units as u from astropy.time import Time, TimeDelta from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle from astropy.timeseries.binned import BinnedTimeSeries from astropy.tests.helper import assert_quantity_allclose CSV_FILE = get_pkg_data_filename('data/binned.csv') def test_empty_initialization(): ts = BinnedTimeSeries() ts['time_bin_start'] = Time([1, 2, 3], format='mjd') def test_empty_initialization_invalid(): # Make sure things crash when the first column added is not a time column ts = BinnedTimeSeries() with pytest.raises(ValueError) as exc: ts['flux'] = [1, 2, 3] assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected " "'time_bin_start' as the first column but found 'flux'") def test_initialization_time_bin_invalid(): # Make sure things crash when time_bin_* is passed incorrectly. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data=[[1, 4, 3]]) assert exc.value.args[0] == ("'time_bin_start' has not been specified") with pytest.raises(TypeError) as exc: BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]]) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_initialization_time_bin_both(): # Make sure things crash when time_bin_* is passed twice. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31") assert exc.value.args[0] == ("'time_bin_start' has been given both in the table " "and as a keyword argument") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1]*u.s) assert exc.value.args[0] == ("'time_bin_size' has been given both in the table " "and as a keyword argument") def test_initialization_time_bin_size(): # Make sure things crash when time_bin_size has no units with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=1) assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta") # TimeDelta for time_bin_size ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=TimeDelta(1)) assert isinstance(ts.time_bin_size, u.quantity.Quantity) def test_initialization_time_bin_start_scalar(): # Make sure things crash when time_bin_start is a scalar with no time_bin_size with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required") def test_initialization_n_bins(): # Make sure things crash with incorrect n_bins with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd'), n_bins=10) assert exc.value.args[0] == ("'n_bins' has been given and it is not the " "same length as the input data.") def test_initialization_non_scalar_time(): # Make sure things crash with incorrect size of time_bin_start with pytest.raises(ValueError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"], time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=None, time_bin_end=None) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_even_contiguous(): # Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying # the bin width: ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500', '2016-03-22T12:30:35.500', '2016-03-22T12:30:38.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000', '2016-03-22T12:30:40.000']) def test_uneven_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an # end time: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:32', '2016-03-22T12:30:40'], time_bin_end='2016-03-22T12:30:55', data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:36.000', '2016-03-22T12:30:47.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000', '2016-03-22T12:30:55.000']) def test_uneven_non_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with # lists of start times, bin sizes and data: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'], time_bin_size=[5, 100, 2]*u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:38.000', '2016-03-22T12:34:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500', '2016-03-22T12:31:28.000', '2016-03-22T12:34:41.000']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000', '2016-03-22T12:32:18.000', '2016-03-22T12:34:42.000']) def test_uneven_non_contiguous_full(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by # specifying the start and end times for the bins: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33', '2016-03-22T12:30:40'], time_bin_end=['2016-03-22T12:30:32', '2016-03-22T12:30:35', '2016-03-22T12:30:41'], data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:34.000', '2016-03-22T12:30:40.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:35.000', '2016-03-22T12:30:41.000']) def test_read_empty(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, format='csv') assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.' def test_read_no_size_end(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv') assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.' def test_read_both_extra_bins(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`." def test_read_size_no_unit(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read_start_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data." def test_read_end_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv') assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data." def test_read_size_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin size column 'missing' not found in the input data." def test_read_time_unit_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read(): timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='time_end', format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 @pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle]) def test_periodogram(cls): # Note that we don't need to check the actual results from the periodogram # classes here since these are tested extensively in # astropy.timeseries.periodograms. ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b']) p1 = cls.from_timeseries(ts, 'a') assert isinstance(p1, cls) assert_allclose(p1.t.jd, ts.time_bin_center.jd) assert_equal(p1.y, ts['a']) assert p1.dy is None p2 = cls.from_timeseries(ts, 'a', uncertainty='b') assert_quantity_allclose(p2.dy, ts['b']) p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1) assert_allclose(p3.dy, 0.1)
MSeifert04/astropy
astropy/timeseries/tests/test_binned.py
astropy/coordinates/builtin_frames/fk5.py
# Licensed under a 3-clause BSD style license - see PYFITS.rst import collections import copy import itertools import re import warnings from .card import Card, _pad, KEYWORD_LENGTH, UNDEFINED from .file import _File from .util import (encode_ascii, decode_ascii, fileobj_closed, fileobj_is_binary, path_like) from ._utils import parse_header from astropy.utils import isiterable from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.decorators import deprecated_renamed_argument BLOCK_SIZE = 2880 # the FITS block size # This regular expression can match a *valid* END card which just consists of # the string 'END' followed by all spaces, or an *invalid* end card which # consists of END, followed by any character that is *not* a valid character # for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which # starts with 'END' but is not 'END'), followed by any arbitrary bytes. An # invalid end card may also consist of just 'END' with no trailing bytes. HEADER_END_RE = re.compile(encode_ascii( r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])')) # According to the FITS standard the only characters that may appear in a # header record are the restricted ASCII chars from 0x20 through 0x7E. VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F))) END_CARD = 'END' + ' ' * 77 __doctest_skip__ = ['Header', 'Header.comments', 'Header.fromtextfile', 'Header.totextfile', 'Header.set', 'Header.update'] class Header: """ FITS header class. This class exposes both a dict-like interface and a list-like interface to FITS headers. The header may be indexed by keyword and, like a dict, the associated value will be returned. When the header contains cards with duplicate keywords, only the value of the first card with the given keyword will be returned. It is also possible to use a 2-tuple as the index in the form (keyword, n)--this returns the n-th value with that keyword, in the case where there are duplicate keywords. For example:: >>> header['NAXIS'] 0 >>> header[('FOO', 1)] # Return the value of the second FOO keyword 'foo' The header may also be indexed by card number:: >>> header[0] # Return the value of the first card in the header 'T' Commentary keywords such as HISTORY and COMMENT are special cases: When indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all the HISTORY/COMMENT values is returned:: >>> header['HISTORY'] This is the first history entry in this header. This is the second history entry in this header. ... See the Astropy documentation for more details on working with headers. """ def __init__(self, cards=[], copy=False): """ Construct a `Header` from an iterable and/or text file. Parameters ---------- cards : A list of `Card` objects, optional The cards to initialize the header with. Also allowed are other `Header` (or `dict`-like) objects. .. versionchanged:: 1.2 Allowed ``cards`` to be a `dict`-like object. copy : bool, optional If ``True`` copies the ``cards`` if they were another `Header` instance. Default is ``False``. .. versionadded:: 1.3 """ self.clear() if isinstance(cards, Header): if copy: cards = cards.copy() cards = cards.cards elif isinstance(cards, dict): cards = cards.items() for card in cards: self.append(card, end=True) self._modified = False def __len__(self): return len(self._cards) def __iter__(self): for card in self._cards: yield card.keyword def __contains__(self, keyword): if keyword in self._keyword_indices or keyword in self._rvkc_indices: # For the most common case (single, standard form keyword lookup) # this will work and is an O(1) check. If it fails that doesn't # guarantee absence, just that we have to perform the full set of # checks in self._cardindex return True try: self._cardindex(keyword) except (KeyError, IndexError): return False return True def __getitem__(self, key): if isinstance(key, slice): return self.__class__([copy.copy(c) for c in self._cards[key]]) elif self._haswildcard(key): return self.__class__([copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]) elif (isinstance(key, str) and key.upper() in Card._commentary_keywords): key = key.upper() # Special case for commentary cards return _HeaderCommentaryCards(self, key) if isinstance(key, tuple): keyword = key[0] else: keyword = key card = self._cards[self._cardindex(key)] if card.field_specifier is not None and keyword == card.rawkeyword: # This is RVKC; if only the top-level keyword was specified return # the raw value, not the parsed out float value return card.rawvalue value = card.value if value == UNDEFINED: return None return value def __setitem__(self, key, value): if self._set_slice(key, value, self): return if isinstance(value, tuple): if not (0 < len(value) <= 2): raise ValueError( 'A Header item may be set with either a scalar value, ' 'a 1-tuple containing a scalar value, or a 2-tuple ' 'containing a scalar value and comment string.') if len(value) == 1: value, comment = value[0], None if value is None: value = UNDEFINED elif len(value) == 2: value, comment = value if value is None: value = UNDEFINED if comment is None: comment = '' else: comment = None card = None if isinstance(key, int): card = self._cards[key] elif isinstance(key, tuple): card = self._cards[self._cardindex(key)] if value is None: value = UNDEFINED if card: card.value = value if comment is not None: card.comment = comment if card._modified: self._modified = True else: # If we get an IndexError that should be raised; we don't allow # assignment to non-existing indices self._update((key, value, comment)) def __delitem__(self, key): if isinstance(key, slice) or self._haswildcard(key): # This is very inefficient but it's not a commonly used feature. # If someone out there complains that they make heavy use of slice # deletions and it's too slow, well, we can worry about it then # [the solution is not too complicated--it would be wait 'til all # the cards are deleted before updating _keyword_indices rather # than updating it once for each card that gets deleted] if isinstance(key, slice): indices = range(*key.indices(len(self))) # If the slice step is backwards we want to reverse it, because # it will be reversed in a few lines... if key.step and key.step < 0: indices = reversed(indices) else: indices = self._wildcardmatch(key) for idx in reversed(indices): del self[idx] return elif isinstance(key, str): # delete ALL cards with the same keyword name key = Card.normalize_keyword(key) indices = self._keyword_indices if key not in self._keyword_indices: indices = self._rvkc_indices if key not in indices: # if keyword is not present raise KeyError. # To delete keyword without caring if they were present, # Header.remove(Keyword) can be used with optional argument ignore_missing as True raise KeyError(f"Keyword '{key}' not found.") for idx in reversed(indices[key]): # Have to copy the indices list since it will be modified below del self[idx] return idx = self._cardindex(key) card = self._cards[idx] keyword = card.keyword del self._cards[idx] keyword = Card.normalize_keyword(keyword) indices = self._keyword_indices[keyword] indices.remove(idx) if not indices: del self._keyword_indices[keyword] # Also update RVKC indices if necessary :/ if card.field_specifier is not None: indices = self._rvkc_indices[card.rawkeyword] indices.remove(idx) if not indices: del self._rvkc_indices[card.rawkeyword] # We also need to update all other indices self._updateindices(idx, increment=False) self._modified = True def __repr__(self): return self.tostring(sep='\n', endcard=False, padding=False) def __str__(self): return self.tostring() def __eq__(self, other): """ Two Headers are equal only if they have the exact same string representation. """ return str(self) == str(other) def __add__(self, other): temp = self.copy(strip=False) temp.extend(other) return temp def __iadd__(self, other): self.extend(other) return self def _ipython_key_completions_(self): return self.__iter__() @property def cards(self): """ The underlying physical cards that make up this Header; it can be looked at, but it should not be modified directly. """ return _CardAccessor(self) @property def comments(self): """ View the comments associated with each keyword, if any. For example, to see the comment on the NAXIS keyword: >>> header.comments['NAXIS'] number of data axes Comments can also be updated through this interface: >>> header.comments['NAXIS'] = 'Number of data axes' """ return _HeaderComments(self) @property def _modified(self): """ Whether or not the header has been modified; this is a property so that it can also check each card for modifications--cards may have been modified directly without the header containing it otherwise knowing. """ modified_cards = any(c._modified for c in self._cards) if modified_cards: # If any cards were modified then by definition the header was # modified self.__dict__['_modified'] = True return self.__dict__['_modified'] @_modified.setter def _modified(self, val): self.__dict__['_modified'] = val @classmethod def fromstring(cls, data, sep=''): """ Creates an HDU header from a byte string containing the entire header data. Parameters ---------- data : str or bytes String or bytes containing the entire header. In the case of bytes they will be decoded using latin-1 (only plain ASCII characters are allowed in FITS headers but latin-1 allows us to retain any invalid bytes that might appear in malformatted FITS files). sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). In general this is only used in cases where a header was printed as text (e.g. with newlines after each card) and you want to create a new `Header` from it by copy/pasting. Examples -------- >>> from astropy.io.fits import Header >>> hdr = Header({'SIMPLE': True}) >>> Header.fromstring(hdr.tostring()) == hdr True If you want to create a `Header` from printed text it's not necessary to have the exact binary structure as it would appear in a FITS file, with the full 80 byte card length. Rather, each "card" can end in a newline and does not have to be padded out to a full card length as long as it "looks like" a FITS header: >>> hdr = Header.fromstring(\"\"\"\\ ... SIMPLE = T / conforms to FITS standard ... BITPIX = 8 / array data type ... NAXIS = 0 / number of array dimensions ... EXTEND = T ... \"\"\", sep='\\n') >>> hdr['SIMPLE'] True >>> hdr['BITPIX'] 8 >>> len(hdr) 4 Returns ------- header A new `Header` instance. """ cards = [] # If the card separator contains characters that may validly appear in # a card, the only way to unambiguously distinguish between cards is to # require that they be Card.length long. However, if the separator # contains non-valid characters (namely \n) the cards may be split # immediately at the separator require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS) if isinstance(data, bytes): # FITS supports only ASCII, but decode as latin1 and just take all # bytes for now; if it results in mojibake due to e.g. UTF-8 # encoded data in a FITS header that's OK because it shouldn't be # there in the first place--accepting it here still gives us the # opportunity to display warnings later during validation CONTINUE = b'CONTINUE' END = b'END' end_card = END_CARD.encode('ascii') sep = sep.encode('latin1') empty = b'' else: CONTINUE = 'CONTINUE' END = 'END' end_card = END_CARD empty = '' # Split the header into individual cards idx = 0 image = [] while idx < len(data): if require_full_cardlength: end_idx = idx + Card.length else: try: end_idx = data.index(sep, idx) except ValueError: end_idx = len(data) next_image = data[idx:end_idx] idx = end_idx + len(sep) if image: if next_image[:8] == CONTINUE: image.append(next_image) continue cards.append(Card.fromstring(empty.join(image))) if require_full_cardlength: if next_image == end_card: image = [] break else: if next_image.split(sep)[0].rstrip() == END: image = [] break image = [next_image] # Add the last image that was found before the end, if any if image: cards.append(Card.fromstring(empty.join(image))) return cls._fromcards(cards) @classmethod def fromfile(cls, fileobj, sep='', endcard=True, padding=True): """ Similar to :meth:`Header.fromstring`, but reads the header string from a given file-like object or filename. Parameters ---------- fileobj : str, file-like A filename or an open file-like object from which a FITS header is to be read. For open file handles the file pointer must be at the beginning of the header. sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). endcard : bool, optional If True (the default) the header must end with an END card in order to be considered valid. If an END card is not found an `OSError` is raised. padding : bool, optional If True (the default) the header will be required to be padded out to a multiple of 2880, the FITS header block size. Otherwise any padding, or lack thereof, is ignored. Returns ------- header A new `Header` instance. """ close_file = False if isinstance(fileobj, path_like): # If sep is non-empty we are trying to read a header printed to a # text file, so open in text mode by default to support newline # handling; if a binary-mode file object is passed in, the user is # then on their own w.r.t. newline handling. # # Otherwise assume we are reading from an actual FITS file and open # in binary mode. if sep: fileobj = open(fileobj, 'r', encoding='latin1') else: fileobj = open(fileobj, 'rb') close_file = True try: is_binary = fileobj_is_binary(fileobj) def block_iter(nbytes): while True: data = fileobj.read(nbytes) if data: yield data else: break return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1] finally: if close_file: fileobj.close() @classmethod def _fromcards(cls, cards): header = cls() for idx, card in enumerate(cards): header._cards.append(card) keyword = Card.normalize_keyword(card.keyword) header._keyword_indices[keyword].append(idx) if card.field_specifier is not None: header._rvkc_indices[card.rawkeyword].append(idx) header._modified = False return header @classmethod def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding): """ The meat of `Header.fromfile`; in a separate method so that `Header.fromfile` itself is just responsible for wrapping file handling. Also used by `_BaseHDU.fromstring`. ``block_iter`` should be a callable which, given a block size n (typically 2880 bytes as used by the FITS standard) returns an iterator of byte strings of that block size. ``is_binary`` specifies whether the returned blocks are bytes or text Returns both the entire header *string*, and the `Header` object returned by Header.fromstring on that string. """ actual_block_size = _block_size(sep) clen = Card.length + len(sep) blocks = block_iter(actual_block_size) # Read the first header block. try: block = next(blocks) except StopIteration: raise EOFError() if not is_binary: # TODO: There needs to be error handling at *this* level for # non-ASCII characters; maybe at this stage decoding latin-1 might # be safer block = encode_ascii(block) read_blocks = [] is_eof = False end_found = False # continue reading header blocks until END card or EOF is reached while True: # find the END card end_found, block = cls._find_end_card(block, clen) read_blocks.append(decode_ascii(block)) if end_found: break try: block = next(blocks) except StopIteration: is_eof = True break if not block: is_eof = True break if not is_binary: block = encode_ascii(block) if not end_found and is_eof and endcard: # TODO: Pass this error to validation framework as an ERROR, # rather than raising an exception raise OSError('Header missing END card.') header_str = ''.join(read_blocks) _check_padding(header_str, actual_block_size, is_eof, check_block_size=padding) return header_str, cls.fromstring(header_str, sep=sep) @classmethod def _find_end_card(cls, block, card_len): """ Utility method to search a header block for the END card and handle invalid END cards. This method can also returned a modified copy of the input header block in case an invalid end card needs to be sanitized. """ for mo in HEADER_END_RE.finditer(block): # Ensure the END card was found, and it started on the # boundary of a new card (see ticket #142) if mo.start() % card_len != 0: continue # This must be the last header block, otherwise the # file is malformatted if mo.group('invalid'): offset = mo.start() trailing = block[offset + 3:offset + card_len - 3].rstrip() if trailing: trailing = repr(trailing).lstrip('ub') # TODO: Pass this warning up to the validation framework warnings.warn( 'Unexpected bytes trailing END keyword: {}; these ' 'bytes will be replaced with spaces on write.'.format( trailing), AstropyUserWarning) else: # TODO: Pass this warning up to the validation framework warnings.warn( 'Missing padding to end of the FITS block after the ' 'END keyword; additional spaces will be appended to ' 'the file upon writing to pad out to {} ' 'bytes.'.format(BLOCK_SIZE), AstropyUserWarning) # Sanitize out invalid END card now that the appropriate # warnings have been issued block = (block[:offset] + encode_ascii(END_CARD) + block[offset + len(END_CARD):]) return True, block return False, block def tostring(self, sep='', endcard=True, padding=True): r""" Returns a string representation of the header. By default this uses no separator between cards, adds the END card, and pads the string with spaces to the next multiple of 2880 bytes. That is, it returns the header exactly as it would appear in a FITS file. Parameters ---------- sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If True (default) adds the END card to the end of the header string padding : bool, optional If True (default) pads the string with spaces out to the next multiple of 2880 characters Returns ------- s : str A string representing a FITS header. """ lines = [] for card in self._cards: s = str(card) # Cards with CONTINUE cards may be longer than 80 chars; so break # them into multiple lines while s: lines.append(s[:Card.length]) s = s[Card.length:] s = sep.join(lines) if endcard: s += sep + _pad('END') if padding: s += ' ' * _pad_length(len(s)) return s @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def tofile(self, fileobj, sep='', endcard=True, padding=True, overwrite=False): r""" Writes the header to file or file-like object. By default this writes the header exactly as it would be written to a FITS file, with the END card included and padding to the next multiple of 2880 bytes. However, aspects of this may be controlled. Parameters ---------- fileobj : str, file, optional Either the pathname of a file, or an open file handle or file-like object sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If `True` (default) adds the END card to the end of the header string padding : bool, optional If `True` (default) pads the string with spaces out to the next multiple of 2880 characters overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. """ close_file = fileobj_closed(fileobj) if not isinstance(fileobj, _File): fileobj = _File(fileobj, mode='ostream', overwrite=overwrite) try: blocks = self.tostring(sep=sep, endcard=endcard, padding=padding) actual_block_size = _block_size(sep) if padding and len(blocks) % actual_block_size != 0: raise OSError( 'Header size ({}) is not a multiple of block ' 'size ({}).'.format( len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE)) if not fileobj.simulateonly: fileobj.flush() fileobj.write(blocks.encode('ascii')) fileobj.flush() finally: if close_file: fileobj.close() @classmethod def fromtextfile(cls, fileobj, endcard=False): """ Read a header from a simple text file or file-like object. Equivalent to:: >>> Header.fromfile(fileobj, sep='\\n', endcard=False, ... padding=False) See Also -------- fromfile """ return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def totextfile(self, fileobj, endcard=False, overwrite=False): """ Write the header as text to a file or a file-like object. Equivalent to:: >>> Header.tofile(fileobj, sep='\\n', endcard=False, ... padding=False, overwrite=overwrite) .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. See Also -------- tofile """ self.tofile(fileobj, sep='\n', endcard=endcard, padding=False, overwrite=overwrite) def clear(self): """ Remove all cards from the header. """ self._cards = [] self._keyword_indices = collections.defaultdict(list) self._rvkc_indices = collections.defaultdict(list) def copy(self, strip=False): """ Make a copy of the :class:`Header`. .. versionchanged:: 1.3 `copy.copy` and `copy.deepcopy` on a `Header` will call this method. Parameters ---------- strip : bool, optional If `True`, strip any headers that are specific to one of the standard HDU types, so that this header can be used in a different HDU. Returns ------- header A new :class:`Header` instance. """ tmp = self.__class__((copy.copy(card) for card in self._cards)) if strip: tmp._strip() return tmp def __copy__(self): return self.copy() def __deepcopy__(self, *args, **kwargs): return self.copy() @classmethod def fromkeys(cls, iterable, value=None): """ Similar to :meth:`dict.fromkeys`--creates a new `Header` from an iterable of keywords and an optional default value. This method is not likely to be particularly useful for creating real world FITS headers, but it is useful for testing. Parameters ---------- iterable Any iterable that returns strings representing FITS keywords. value : optional A default value to assign to each keyword; must be a valid type for FITS keywords. Returns ------- header A new `Header` instance. """ d = cls() if not isinstance(value, tuple): value = (value,) for key in iterable: d.append((key,) + value) return d def get(self, key, default=None): """ Similar to :meth:`dict.get`--returns the value associated with keyword in the header, or a default value if the keyword is not found. Parameters ---------- key : str A keyword that may or may not be in the header. default : optional A default value to return if the keyword is not found in the header. Returns ------- value The value associated with the given keyword, or the default value if the keyword is not in the header. """ try: return self[key] except (KeyError, IndexError): return default def set(self, keyword, value=None, comment=None, before=None, after=None): """ Set the value and/or comment and/or position of a specified keyword. If the keyword does not already exist in the header, a new keyword is created in the specified position, or appended to the end of the header if no position is specified. This method is similar to :meth:`Header.update` prior to Astropy v0.1. .. note:: It should be noted that ``header.set(keyword, value)`` and ``header.set(keyword, value, comment)`` are equivalent to ``header[keyword] = value`` and ``header[keyword] = (value, comment)`` respectively. New keywords can also be inserted relative to existing keywords using, for example:: >>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes')) to insert before an existing keyword, or:: >>> header.insert('NAXIS', ('NAXIS1', 4096), after=True) to insert after an existing keyword. The only advantage of using :meth:`Header.set` is that it easily replaces the old usage of :meth:`Header.update` both conceptually and in terms of function signature. Parameters ---------- keyword : str A header keyword value : str, optional The value to set for the given keyword; if None the existing value is kept, but '' may be used to set a blank value comment : str, optional The comment to set for the given keyword; if None the existing comment is kept, but ``''`` may be used to set a blank comment before : str, int, optional Name of the keyword, or index of the `Card` before which this card should be located in the header. The argument ``before`` takes precedence over ``after`` if both specified. after : str, int, optional Name of the keyword, or index of the `Card` after which this card should be located in the header. """ # Create a temporary card that looks like the one being set; if the # temporary card turns out to be a RVKC this will make it easier to # deal with the idiosyncrasies thereof # Don't try to make a temporary card though if they keyword looks like # it might be a HIERARCH card or is otherwise invalid--this step is # only for validating RVKCs. if (len(keyword) <= KEYWORD_LENGTH and Card._keywd_FSC_RE.match(keyword) and keyword not in self._keyword_indices): new_card = Card(keyword, value, comment) new_keyword = new_card.keyword else: new_keyword = keyword if (new_keyword not in Card._commentary_keywords and new_keyword in self): if comment is None: comment = self.comments[keyword] if value is None: value = self[keyword] self[keyword] = (value, comment) if before is not None or after is not None: card = self._cards[self._cardindex(keyword)] self._relativeinsert(card, before=before, after=after, replace=True) elif before is not None or after is not None: self._relativeinsert((keyword, value, comment), before=before, after=after) else: self[keyword] = (value, comment) def items(self): """Like :meth:`dict.items`.""" for card in self._cards: yield (card.keyword, card.value) def keys(self): """ Like :meth:`dict.keys`--iterating directly over the `Header` instance has the same behavior. """ for card in self._cards: yield card.keyword def values(self): """Like :meth:`dict.values`.""" for card in self._cards: yield card.value def pop(self, *args): """ Works like :meth:`list.pop` if no arguments or an index argument are supplied; otherwise works like :meth:`dict.pop`. """ if len(args) > 2: raise TypeError('Header.pop expected at most 2 arguments, got ' '{}'.format(len(args))) if len(args) == 0: key = -1 else: key = args[0] try: value = self[key] except (KeyError, IndexError): if len(args) == 2: return args[1] raise del self[key] return value def popitem(self): """Similar to :meth:`dict.popitem`.""" try: k, v = next(self.items()) except StopIteration: raise KeyError('Header is empty') del self[k] return k, v def setdefault(self, key, default=None): """Similar to :meth:`dict.setdefault`.""" try: return self[key] except (KeyError, IndexError): self[key] = default return default def update(self, *args, **kwargs): """ Update the Header with new keyword values, updating the values of existing keywords and appending new keywords otherwise; similar to `dict.update`. `update` accepts either a dict-like object or an iterable. In the former case the keys must be header keywords and the values may be either scalar values or (value, comment) tuples. In the case of an iterable the items must be (keyword, value) tuples or (keyword, value, comment) tuples. Arbitrary arguments are also accepted, in which case the update() is called again with the kwargs dict as its only argument. That is, :: >>> header.update(NAXIS1=100, NAXIS2=100) is equivalent to:: header.update({'NAXIS1': 100, 'NAXIS2': 100}) .. warning:: As this method works similarly to `dict.update` it is very different from the ``Header.update()`` method in Astropy v0.1. Use of the old API was **deprecated** for a long time and is now removed. Most uses of the old API can be replaced as follows: * Replace :: header.update(keyword, value) with :: header[keyword] = value * Replace :: header.update(keyword, value, comment=comment) with :: header[keyword] = (value, comment) * Replace :: header.update(keyword, value, before=before_keyword) with :: header.insert(before_keyword, (keyword, value)) * Replace :: header.update(keyword, value, after=after_keyword) with :: header.insert(after_keyword, (keyword, value), after=True) See also :meth:`Header.set` which is a new method that provides an interface similar to the old ``Header.update()`` and may help make transition a little easier. """ if args: other = args[0] else: other = None def update_from_dict(k, v): if not isinstance(v, tuple): card = Card(k, v) elif 0 < len(v) <= 2: card = Card(*((k,) + v)) else: raise ValueError( 'Header update value for key %r is invalid; the ' 'value must be either a scalar, a 1-tuple ' 'containing the scalar value, or a 2-tuple ' 'containing the value and a comment string.' % k) self._update(card) if other is None: pass elif isinstance(other, Header): for card in other.cards: self._update(card) elif hasattr(other, 'items'): for k, v in other.items(): update_from_dict(k, v) elif hasattr(other, 'keys'): for k in other.keys(): update_from_dict(k, other[k]) else: for idx, card in enumerate(other): if isinstance(card, Card): self._update(card) elif isinstance(card, tuple) and (1 < len(card) <= 3): self._update(Card(*card)) else: raise ValueError( 'Header update sequence item #{} is invalid; ' 'the item must either be a 2-tuple containing ' 'a keyword and value, or a 3-tuple containing ' 'a keyword, value, and comment string.'.format(idx)) if kwargs: self.update(kwargs) def append(self, card=None, useblanks=True, bottom=False, end=False): """ Appends a new keyword+value card to the end of the Header, similar to `list.append`. By default if the last cards in the Header have commentary keywords, this will append the new keyword before the commentary (unless the new keyword is also commentary). Also differs from `list.append` in that it can be called with no arguments: In this case a blank card is appended to the end of the Header. In the case all the keyword arguments are ignored. Parameters ---------- card : str, tuple A keyword or a (keyword, value, [comment]) tuple representing a single header card; the comment is optional in which case a 2-tuple may be used useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. bottom : bool, optional If True, instead of appending after the last non-commentary card, append after the last non-blank card. end : bool, optional If True, ignore the useblanks and bottom options, and append at the very end of the Header. """ if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif card is None: card = Card() elif not isinstance(card, Card): raise ValueError( 'The value appended to a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) if not end and card.is_blank: # Blank cards should always just be appended to the end end = True if end: self._cards.append(card) idx = len(self._cards) - 1 else: idx = len(self._cards) - 1 while idx >= 0 and self._cards[idx].is_blank: idx -= 1 if not bottom and card.keyword not in Card._commentary_keywords: while (idx >= 0 and self._cards[idx].keyword in Card._commentary_keywords): idx -= 1 idx += 1 self._cards.insert(idx, card) self._updateindices(idx) keyword = Card.normalize_keyword(card.keyword) self._keyword_indices[keyword].append(idx) if card.field_specifier is not None: self._rvkc_indices[card.rawkeyword].append(idx) if not end: # If the appended card was a commentary card, and it was appended # before existing cards with the same keyword, the indices for # cards with that keyword may have changed if not bottom and card.keyword in Card._commentary_keywords: self._keyword_indices[keyword].sort() # Finally, if useblanks, delete a blank cards from the end if useblanks and self._countblanks(): # Don't do this unless there is at least one blanks at the end # of the header; we need to convert the card to its string # image to see how long it is. In the vast majority of cases # this will just be 80 (Card.length) but it may be longer for # CONTINUE cards self._useblanks(len(str(card)) // Card.length) self._modified = True def extend(self, cards, strip=True, unique=False, update=False, update_first=False, useblanks=True, bottom=False, end=False): """ Appends multiple keyword+value cards to the end of the header, similar to `list.extend`. Parameters ---------- cards : iterable An iterable of (keyword, value, [comment]) tuples; see `Header.append`. strip : bool, optional Remove any keywords that have meaning only to specific types of HDUs, so that only more general keywords are added from extension Header or Card list (default: `True`). unique : bool, optional If `True`, ensures that no duplicate keywords are appended; keywords already in this header are simply discarded. The exception is commentary keywords (COMMENT, HISTORY, etc.): they are only treated as duplicates if their values match. update : bool, optional If `True`, update the current header with the values and comments from duplicate keywords in the input header. This supersedes the ``unique`` argument. Commentary keywords are treated the same as if ``unique=True``. update_first : bool, optional If the first keyword in the header is 'SIMPLE', and the first keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is replaced by the 'XTENSION' keyword. Likewise if the first keyword in the header is 'XTENSION' and the first keyword in the input header is 'SIMPLE', the 'XTENSION' keyword is replaced by the 'SIMPLE' keyword. This behavior is otherwise dumb as to whether or not the resulting header is a valid primary or extension header. This is mostly provided to support backwards compatibility with the old ``Header.fromTxtFile`` method, and only applies if ``update=True``. useblanks, bottom, end : bool, optional These arguments are passed to :meth:`Header.append` while appending new cards to the header. """ temp = self.__class__(cards) if strip: temp._strip() if len(self): first = self._cards[0].keyword else: first = None # We don't immediately modify the header, because first we need to sift # out any duplicates in the new header prior to adding them to the # existing header, but while *allowing* duplicates from the header # being extended from (see ticket #156) extend_cards = [] for idx, card in enumerate(temp.cards): keyword = card.keyword if keyword not in Card._commentary_keywords: if unique and not update and keyword in self: continue elif update: if idx == 0 and update_first: # Dumbly update the first keyword to either SIMPLE or # XTENSION as the case may be, as was in the case in # Header.fromTxtFile if ((keyword == 'SIMPLE' and first == 'XTENSION') or (keyword == 'XTENSION' and first == 'SIMPLE')): del self[0] self.insert(0, card) else: self[keyword] = (card.value, card.comment) elif keyword in self: self[keyword] = (card.value, card.comment) else: extend_cards.append(card) else: extend_cards.append(card) else: if (unique or update) and keyword in self: if card.is_blank: extend_cards.append(card) continue for value in self[keyword]: if value == card.value: break else: extend_cards.append(card) else: extend_cards.append(card) for card in extend_cards: self.append(card, useblanks=useblanks, bottom=bottom, end=end) def count(self, keyword): """ Returns the count of the given keyword in the header, similar to `list.count` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to count instances of in the header """ keyword = Card.normalize_keyword(keyword) # We have to look before we leap, since otherwise _keyword_indices, # being a defaultdict, will create an entry for the nonexistent keyword if keyword not in self._keyword_indices: raise KeyError(f"Keyword {keyword!r} not found.") return len(self._keyword_indices[keyword]) def index(self, keyword, start=None, stop=None): """ Returns the index if the first instance of the given keyword in the header, similar to `list.index` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to look up in the list of all keywords in the header start : int, optional The lower bound for the index stop : int, optional The upper bound for the index """ if start is None: start = 0 if stop is None: stop = len(self._cards) if stop < start: step = -1 else: step = 1 norm_keyword = Card.normalize_keyword(keyword) for idx in range(start, stop, step): if self._cards[idx].keyword.upper() == norm_keyword: return idx else: raise ValueError('The keyword {!r} is not in the ' ' header.'.format(keyword)) def insert(self, key, card, useblanks=True, after=False): """ Inserts a new keyword+value card into the Header at a given location, similar to `list.insert`. Parameters ---------- key : int, str, or tuple The index into the list of header keywords before which the new keyword should be inserted, or the name of a keyword before which the new keyword should be inserted. Can also accept a (keyword, index) tuple for inserting around duplicate keywords. card : str, tuple A keyword or a (keyword, value, [comment]) tuple; see `Header.append` useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. after : bool, optional If set to `True`, insert *after* the specified index or keyword, rather than before it. Defaults to `False`. """ if not isinstance(key, int): # Don't pass through ints to _cardindex because it will not take # kindly to indices outside the existing number of cards in the # header, which insert needs to be able to support (for example # when inserting into empty headers) idx = self._cardindex(key) else: idx = key if after: if idx == -1: idx = len(self._cards) else: idx += 1 if idx >= len(self._cards): # This is just an append (Though it must be an append absolutely to # the bottom, ignoring blanks, etc.--the point of the insert method # is that you get exactly what you asked for with no surprises) self.append(card, end=True) return if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif not isinstance(card, Card): raise ValueError( 'The value inserted into a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) self._cards.insert(idx, card) keyword = card.keyword # If idx was < 0, determine the actual index according to the rules # used by list.insert() if idx < 0: idx += len(self._cards) - 1 if idx < 0: idx = 0 # All the keyword indices above the insertion point must be updated self._updateindices(idx) keyword = Card.normalize_keyword(keyword) self._keyword_indices[keyword].append(idx) count = len(self._keyword_indices[keyword]) if count > 1: # There were already keywords with this same name if keyword not in Card._commentary_keywords: warnings.warn( 'A {!r} keyword already exists in this header. Inserting ' 'duplicate keyword.'.format(keyword), AstropyUserWarning) self._keyword_indices[keyword].sort() if card.field_specifier is not None: # Update the index of RVKC as well rvkc_indices = self._rvkc_indices[card.rawkeyword] rvkc_indices.append(idx) rvkc_indices.sort() if useblanks: self._useblanks(len(str(card)) // Card.length) self._modified = True def remove(self, keyword, ignore_missing=False, remove_all=False): """ Removes the first instance of the given keyword from the header similar to `list.remove` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword of which to remove the first instance in the header. ignore_missing : bool, optional When True, ignores missing keywords. Otherwise, if the keyword is not present in the header a KeyError is raised. remove_all : bool, optional When True, all instances of keyword will be removed. Otherwise only the first instance of the given keyword is removed. """ keyword = Card.normalize_keyword(keyword) if keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] if remove_all: while keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] elif not ignore_missing: raise KeyError(f"Keyword '{keyword}' not found.") def rename_keyword(self, oldkeyword, newkeyword, force=False): """ Rename a card's keyword in the header. Parameters ---------- oldkeyword : str or int Old keyword or card index newkeyword : str New keyword force : bool, optional When `True`, if the new keyword already exists in the header, force the creation of a duplicate keyword. Otherwise a `ValueError` is raised. """ oldkeyword = Card.normalize_keyword(oldkeyword) newkeyword = Card.normalize_keyword(newkeyword) if newkeyword == 'CONTINUE': raise ValueError('Can not rename to CONTINUE') if (newkeyword in Card._commentary_keywords or oldkeyword in Card._commentary_keywords): if not (newkeyword in Card._commentary_keywords and oldkeyword in Card._commentary_keywords): raise ValueError('Regular and commentary keys can not be ' 'renamed to each other.') elif not force and newkeyword in self: raise ValueError('Intended keyword {} already exists in header.' .format(newkeyword)) idx = self.index(oldkeyword) card = self._cards[idx] del self[idx] self.insert(idx, (newkeyword, card.value, card.comment)) def add_history(self, value, before=None, after=None): """ Add a ``HISTORY`` card. Parameters ---------- value : str History text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('HISTORY', value, before=before, after=after) def add_comment(self, value, before=None, after=None): """ Add a ``COMMENT`` card. Parameters ---------- value : str Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('COMMENT', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): """ Add a blank card. Parameters ---------- value : str, optional Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('', value, before=before, after=after) def _update(self, card): """ The real update code. If keyword already exists, its value and/or comment will be updated. Otherwise a new card will be appended. This will not create a duplicate keyword except in the case of commentary cards. The only other way to force creation of a duplicate is to use the insert(), append(), or extend() methods. """ keyword, value, comment = card # Lookups for existing/known keywords are case-insensitive keyword = keyword.upper() if keyword.startswith('HIERARCH '): keyword = keyword[9:] if (keyword not in Card._commentary_keywords and keyword in self._keyword_indices): # Easy; just update the value/comment idx = self._keyword_indices[keyword][0] existing_card = self._cards[idx] existing_card.value = value if comment is not None: # '' should be used to explicitly blank a comment existing_card.comment = comment if existing_card._modified: self._modified = True elif keyword in Card._commentary_keywords: cards = self._splitcommentary(keyword, value) if keyword in self._keyword_indices: # Append after the last keyword of the same type idx = self.index(keyword, start=len(self) - 1, stop=-1) isblank = not (keyword or value or comment) for c in reversed(cards): self.insert(idx + 1, c, useblanks=(not isblank)) else: for c in cards: self.append(c, bottom=True) else: # A new keyword! self.append() will handle updating _modified self.append(card) def _cardindex(self, key): """Returns an index into the ._cards list given a valid lookup key.""" # This used to just set key = (key, 0) and then go on to act as if the # user passed in a tuple, but it's much more common to just be given a # string as the key, so optimize more for that case if isinstance(key, str): keyword = key n = 0 elif isinstance(key, int): # If < 0, determine the actual index if key < 0: key += len(self._cards) if key < 0 or key >= len(self._cards): raise IndexError('Header index out of range.') return key elif isinstance(key, slice): return key elif isinstance(key, tuple): if (len(key) != 2 or not isinstance(key[0], str) or not isinstance(key[1], int)): raise ValueError( 'Tuple indices must be 2-tuples consisting of a ' 'keyword string and an integer index.') keyword, n = key else: raise ValueError( 'Header indices must be either a string, a 2-tuple, or ' 'an integer.') keyword = Card.normalize_keyword(keyword) # Returns the index into _cards for the n-th card with the given # keyword (where n is 0-based) indices = self._keyword_indices.get(keyword, None) if keyword and not indices: if len(keyword) > KEYWORD_LENGTH or '.' in keyword: raise KeyError(f"Keyword {keyword!r} not found.") else: # Maybe it's a RVKC? indices = self._rvkc_indices.get(keyword, None) if not indices: raise KeyError(f"Keyword {keyword!r} not found.") try: return indices[n] except IndexError: raise IndexError('There are only {} {!r} cards in the ' 'header.'.format(len(indices), keyword)) def _keyword_from_index(self, idx): """ Given an integer index, return the (keyword, repeat) tuple that index refers to. For most keywords the repeat will always be zero, but it may be greater than zero for keywords that are duplicated (especially commentary keywords). In a sense this is the inverse of self.index, except that it also supports duplicates. """ if idx < 0: idx += len(self._cards) keyword = self._cards[idx].keyword keyword = Card.normalize_keyword(keyword) repeat = self._keyword_indices[keyword].index(idx) return keyword, repeat def _relativeinsert(self, card, before=None, after=None, replace=False): """ Inserts a new card before or after an existing card; used to implement support for the legacy before/after keyword arguments to Header.update(). If replace=True, move an existing card with the same keyword. """ if before is None: insertionkey = after else: insertionkey = before def get_insertion_idx(): if not (isinstance(insertionkey, int) and insertionkey >= len(self._cards)): idx = self._cardindex(insertionkey) else: idx = insertionkey if before is None: idx += 1 return idx if replace: # The card presumably already exists somewhere in the header. # Check whether or not we actually have to move it; if it does need # to be moved we just delete it and then it will be reinserted # below old_idx = self._cardindex(card.keyword) insertion_idx = get_insertion_idx() if (insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1): # The card would be appended to the end, but it's already at # the end return if before is not None: if old_idx == insertion_idx - 1: return elif after is not None and old_idx == insertion_idx: return del self[old_idx] # Even if replace=True, the insertion idx may have changed since the # old card was deleted idx = get_insertion_idx() if card[0] in Card._commentary_keywords: cards = reversed(self._splitcommentary(card[0], card[1])) else: cards = [card] for c in cards: self.insert(idx, c) def _updateindices(self, idx, increment=True): """ For all cards with index above idx, increment or decrement its index value in the keyword_indices dict. """ if idx > len(self._cards): # Save us some effort return increment = 1 if increment else -1 for index_sets in (self._keyword_indices, self._rvkc_indices): for indices in index_sets.values(): for jdx, keyword_index in enumerate(indices): if keyword_index >= idx: indices[jdx] += increment def _countblanks(self): """Returns the number of blank cards at the end of the Header.""" for idx in range(1, len(self._cards)): if not self._cards[-idx].is_blank: return idx - 1 return 0 def _useblanks(self, count): for _ in range(count): if self._cards[-1].is_blank: del self[-1] else: break def _haswildcard(self, keyword): """Return `True` if the input keyword contains a wildcard pattern.""" return (isinstance(keyword, str) and (keyword.endswith('...') or '*' in keyword or '?' in keyword)) def _wildcardmatch(self, pattern): """ Returns a list of indices of the cards matching the given wildcard pattern. * '*' matches 0 or more characters * '?' matches a single character * '...' matches 0 or more of any non-whitespace character """ pattern = pattern.replace('*', r'.*').replace('?', r'.') pattern = pattern.replace('...', r'\S*') + '$' pattern_re = re.compile(pattern, re.I) return [idx for idx, card in enumerate(self._cards) if pattern_re.match(card.keyword)] def _set_slice(self, key, value, target): """ Used to implement Header.__setitem__ and CardAccessor.__setitem__. """ if isinstance(key, slice) or self._haswildcard(key): if isinstance(key, slice): indices = range(*key.indices(len(target))) else: indices = self._wildcardmatch(key) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): target[idx] = val return True return False def _splitcommentary(self, keyword, value): """ Given a commentary keyword and value, returns a list of the one or more cards needed to represent the full value. This is primarily used to create the multiple commentary cards needed to represent a long value that won't fit into a single commentary card. """ # The maximum value in each card can be the maximum card length minus # the maximum key length (which can include spaces if they key length # less than 8 maxlen = Card.length - KEYWORD_LENGTH valuestr = str(value) if len(valuestr) <= maxlen: # The value can fit in a single card cards = [Card(keyword, value)] else: # The value must be split across multiple consecutive commentary # cards idx = 0 cards = [] while idx < len(valuestr): cards.append(Card(keyword, valuestr[idx:idx + maxlen])) idx += maxlen return cards def _strip(self): """ Strip cards specific to a certain kind of header. Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of the header can be used to reconstruct another kind of header. """ # TODO: Previously this only deleted some cards specific to an HDU if # _hdutype matched that type. But it seemed simple enough to just # delete all desired cards anyways, and just ignore the KeyErrors if # they don't exist. # However, it might be desirable to make this extendable somehow--have # a way for HDU classes to specify some headers that are specific only # to that type, and should be removed otherwise. if 'NAXIS' in self: naxis = self['NAXIS'] else: naxis = 0 if 'TFIELDS' in self: tfields = self['TFIELDS'] else: tfields = 0 for idx in range(naxis): try: del self['NAXIS' + str(idx + 1)] except KeyError: pass for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'): for idx in range(tfields): try: del self[name + str(idx + 1)] except KeyError: pass for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND', 'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO', 'TFIELDS'): try: del self[name] except KeyError: pass def _add_commentary(self, key, value, before=None, after=None): """ Add a commentary card. If ``before`` and ``after`` are `None`, add to the last occurrence of cards of the same name (except blank card). If there is no card (or blank card), append at the end. """ if before is not None or after is not None: self._relativeinsert((key, value), before=before, after=after) else: self[key] = value collections.abc.MutableSequence.register(Header) collections.abc.MutableMapping.register(Header) class _DelayedHeader: """ Descriptor used to create the Header object from the header string that was stored in HDU._header_str when parsing the file. """ def __get__(self, obj, owner=None): try: return obj.__dict__['_header'] except KeyError: if obj._header_str is not None: hdr = Header.fromstring(obj._header_str) obj._header_str = None else: raise AttributeError("'{}' object has no attribute '_header'" .format(obj.__class__.__name__)) obj.__dict__['_header'] = hdr return hdr def __set__(self, obj, val): obj.__dict__['_header'] = val def __delete__(self, obj): del obj.__dict__['_header'] class _BasicHeaderCards: """ This class allows to access cards with the _BasicHeader.cards attribute. This is needed because during the HDU class detection, some HDUs uses the .cards interface. Cards cannot be modified here as the _BasicHeader object will be deleted once the HDU object is created. """ def __init__(self, header): self.header = header def __getitem__(self, key): # .cards is a list of cards, so key here is an integer. # get the keyword name from its index. key = self.header._keys[key] # then we get the card from the _BasicHeader._cards list, or parse it # if needed. try: return self.header._cards[key] except KeyError: cardstr = self.header._raw_cards[key] card = Card.fromstring(cardstr) self.header._cards[key] = card return card class _BasicHeader(collections.abc.Mapping): """This class provides a fast header parsing, without all the additional features of the Header class. Here only standard keywords are parsed, no support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc. The raw card images are stored and parsed only if needed. The idea is that to create the HDU objects, only a small subset of standard cards is needed. Once a card is parsed, which is deferred to the Card class, the Card object is kept in a cache. This is useful because a small subset of cards is used a lot in the HDU creation process (NAXIS, XTENSION, ...). """ def __init__(self, cards): # dict of (keywords, card images) self._raw_cards = cards self._keys = list(cards.keys()) # dict of (keyword, Card object) storing the parsed cards self._cards = {} # the _BasicHeaderCards object allows to access Card objects from # keyword indices self.cards = _BasicHeaderCards(self) self._modified = False def __getitem__(self, key): if isinstance(key, int): key = self._keys[key] try: return self._cards[key].value except KeyError: # parse the Card and store it cardstr = self._raw_cards[key] self._cards[key] = card = Card.fromstring(cardstr) return card.value def __len__(self): return len(self._raw_cards) def __iter__(self): return iter(self._raw_cards) def index(self, keyword): return self._keys.index(keyword) @classmethod def fromfile(cls, fileobj): """The main method to parse a FITS header from a file. The parsing is done with the parse_header function implemented in Cython.""" close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'rb') close_file = True try: header_str, cards = parse_header(fileobj) _check_padding(header_str, BLOCK_SIZE, False) return header_str, cls(cards) finally: if close_file: fileobj.close() class _CardAccessor: """ This is a generic class for wrapping a Header in such a way that you can use the header's slice/filtering capabilities to return a subset of cards and do something with them. This is sort of the opposite notion of the old CardList class--whereas Header used to use CardList to get lists of cards, this uses Header to get lists of cards. """ # TODO: Consider giving this dict/list methods like Header itself def __init__(self, header): self._header = header def __repr__(self): return '\n'.join(repr(c) for c in self._header._cards) def __len__(self): return len(self._header._cards) def __iter__(self): return iter(self._header._cards) def __eq__(self, other): # If the `other` item is a scalar we will still treat it as equal if # this _CardAccessor only contains one item if not isiterable(other) or isinstance(other, str): if len(self) == 1: other = [other] else: return False for a, b in itertools.zip_longest(self, other): if a != b: return False else: return True def __ne__(self, other): return not (self == other) def __getitem__(self, item): if isinstance(item, slice) or self._header._haswildcard(item): return self.__class__(self._header[item]) idx = self._header._cardindex(item) return self._header._cards[idx] def _setslice(self, item, value): """ Helper for implementing __setitem__ on _CardAccessor subclasses; slices should always be handled in this same way. """ if isinstance(item, slice) or self._header._haswildcard(item): if isinstance(item, slice): indices = range(*item.indices(len(self))) else: indices = self._header._wildcardmatch(item) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): self[idx] = val return True return False collections.abc.Mapping.register(_CardAccessor) collections.abc.Sequence.register(_CardAccessor) class _HeaderComments(_CardAccessor): """ A class used internally by the Header class for the Header.comments attribute access. This object can be used to display all the keyword comments in the Header, or look up the comments on specific keywords. It allows all the same forms of keyword lookup as the Header class itself, but returns comments instead of values. """ def __iter__(self): for card in self._header._cards: yield card.comment def __repr__(self): """Returns a simple list of all keywords and their comments.""" keyword_length = KEYWORD_LENGTH for card in self._header._cards: keyword_length = max(keyword_length, len(card.keyword)) return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment, len=keyword_length) for c in self._header._cards) def __getitem__(self, item): """ Slices and filter strings return a new _HeaderComments containing the returned cards. Otherwise the comment of a single card is returned. """ item = super().__getitem__(item) if isinstance(item, _HeaderComments): # The item key was a slice return item return item.comment def __setitem__(self, item, comment): """ Set/update the comment on specified card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, comment, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards idx = self._header._cardindex(item) value = self._header[idx] self._header[idx] = (value, comment) class _HeaderCommentaryCards(_CardAccessor): """ This is used to return a list-like sequence over all the values in the header for a given commentary keyword, such as HISTORY. """ def __init__(self, header, keyword=''): super().__init__(header) self._keyword = keyword self._count = self._header.count(self._keyword) self._indices = slice(self._count).indices(self._count) # __len__ and __iter__ need to be overridden from the base class due to the # different approach this class has to take for slicing def __len__(self): return len(range(*self._indices)) def __iter__(self): for idx in range(*self._indices): yield self._header[(self._keyword, idx)] def __repr__(self): return '\n'.join(self) def __getitem__(self, idx): if isinstance(idx, slice): n = self.__class__(self._header, self._keyword) n._indices = idx.indices(self._count) return n elif not isinstance(idx, int): raise ValueError(f'{self._keyword} index must be an integer') idx = list(range(*self._indices))[idx] return self._header[(self._keyword, idx)] def __setitem__(self, item, value): """ Set the value of a specified commentary card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, value, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards self._header[(self._keyword, item)] = value def _block_size(sep): """ Determine the size of a FITS header block if a non-blank separator is used between cards. """ return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1)) def _pad_length(stringlen): """Bytes needed to pad the input stringlen to the next FITS block.""" return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE def _check_padding(header_str, block_size, is_eof, check_block_size=True): # Strip any zero-padding (see ticket #106) if header_str and header_str[-1] == '\0': if is_eof and header_str.strip('\0') == '': # TODO: Pass this warning to validation framework warnings.warn( 'Unexpected extra padding at the end of the file. This ' 'padding may not be preserved when saving changes.', AstropyUserWarning) raise EOFError() else: # Replace the illegal null bytes with spaces as required by # the FITS standard, and issue a nasty warning # TODO: Pass this warning to validation framework warnings.warn( 'Header block contains null bytes instead of spaces for ' 'padding, and is not FITS-compliant. Nulls may be ' 'replaced with spaces upon writing.', AstropyUserWarning) header_str.replace('\0', ' ') if check_block_size and (len(header_str) % block_size) != 0: # This error message ignores the length of the separator for # now, but maybe it shouldn't? actual_len = len(header_str) - block_size + BLOCK_SIZE # TODO: Pass this error to validation framework raise ValueError('Header size is not multiple of {}: {}' .format(BLOCK_SIZE, actual_len))
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from numpy.testing import assert_equal, assert_allclose from astropy import units as u from astropy.time import Time, TimeDelta from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle from astropy.timeseries.binned import BinnedTimeSeries from astropy.tests.helper import assert_quantity_allclose CSV_FILE = get_pkg_data_filename('data/binned.csv') def test_empty_initialization(): ts = BinnedTimeSeries() ts['time_bin_start'] = Time([1, 2, 3], format='mjd') def test_empty_initialization_invalid(): # Make sure things crash when the first column added is not a time column ts = BinnedTimeSeries() with pytest.raises(ValueError) as exc: ts['flux'] = [1, 2, 3] assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected " "'time_bin_start' as the first column but found 'flux'") def test_initialization_time_bin_invalid(): # Make sure things crash when time_bin_* is passed incorrectly. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data=[[1, 4, 3]]) assert exc.value.args[0] == ("'time_bin_start' has not been specified") with pytest.raises(TypeError) as exc: BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]]) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_initialization_time_bin_both(): # Make sure things crash when time_bin_* is passed twice. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31") assert exc.value.args[0] == ("'time_bin_start' has been given both in the table " "and as a keyword argument") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1]*u.s) assert exc.value.args[0] == ("'time_bin_size' has been given both in the table " "and as a keyword argument") def test_initialization_time_bin_size(): # Make sure things crash when time_bin_size has no units with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=1) assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta") # TimeDelta for time_bin_size ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=TimeDelta(1)) assert isinstance(ts.time_bin_size, u.quantity.Quantity) def test_initialization_time_bin_start_scalar(): # Make sure things crash when time_bin_start is a scalar with no time_bin_size with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required") def test_initialization_n_bins(): # Make sure things crash with incorrect n_bins with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd'), n_bins=10) assert exc.value.args[0] == ("'n_bins' has been given and it is not the " "same length as the input data.") def test_initialization_non_scalar_time(): # Make sure things crash with incorrect size of time_bin_start with pytest.raises(ValueError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"], time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=None, time_bin_end=None) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_even_contiguous(): # Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying # the bin width: ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500', '2016-03-22T12:30:35.500', '2016-03-22T12:30:38.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000', '2016-03-22T12:30:40.000']) def test_uneven_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an # end time: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:32', '2016-03-22T12:30:40'], time_bin_end='2016-03-22T12:30:55', data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:36.000', '2016-03-22T12:30:47.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000', '2016-03-22T12:30:55.000']) def test_uneven_non_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with # lists of start times, bin sizes and data: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'], time_bin_size=[5, 100, 2]*u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:38.000', '2016-03-22T12:34:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500', '2016-03-22T12:31:28.000', '2016-03-22T12:34:41.000']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000', '2016-03-22T12:32:18.000', '2016-03-22T12:34:42.000']) def test_uneven_non_contiguous_full(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by # specifying the start and end times for the bins: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33', '2016-03-22T12:30:40'], time_bin_end=['2016-03-22T12:30:32', '2016-03-22T12:30:35', '2016-03-22T12:30:41'], data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:34.000', '2016-03-22T12:30:40.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:35.000', '2016-03-22T12:30:41.000']) def test_read_empty(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, format='csv') assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.' def test_read_no_size_end(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv') assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.' def test_read_both_extra_bins(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`." def test_read_size_no_unit(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read_start_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data." def test_read_end_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv') assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data." def test_read_size_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin size column 'missing' not found in the input data." def test_read_time_unit_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read(): timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='time_end', format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 @pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle]) def test_periodogram(cls): # Note that we don't need to check the actual results from the periodogram # classes here since these are tested extensively in # astropy.timeseries.periodograms. ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b']) p1 = cls.from_timeseries(ts, 'a') assert isinstance(p1, cls) assert_allclose(p1.t.jd, ts.time_bin_center.jd) assert_equal(p1.y, ts['a']) assert p1.dy is None p2 = cls.from_timeseries(ts, 'a', uncertainty='b') assert_quantity_allclose(p2.dy, ts['b']) p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1) assert_allclose(p3.dy, 0.1)
MSeifert04/astropy
astropy/timeseries/tests/test_binned.py
astropy/io/fits/header.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from os.path import join from distutils.core import Extension from distutils import log from astropy_helpers import setup_helpers, utils from astropy_helpers.version_helpers import get_pkg_version_module wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py')) MODELING_ROOT = os.path.relpath(os.path.dirname(__file__)) MODELING_SRC = join(MODELING_ROOT, 'src') SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'), __file__] GEN_FILES = [join(MODELING_SRC, 'projections.c')] # This defines the set of projection functions that we want to wrap. # The key is the projection name, and the value is the number of # parameters. # (These are in the order that the appear in the WCS coordinate # systems paper). projections = { 'azp': 2, 'szp': 3, 'tan': 0, 'stg': 0, 'sin': 2, 'arc': 0, 'zea': 0, 'air': 1, 'cyp': 2, 'cea': 1, 'mer': 0, 'sfl': 0, 'par': 0, 'mol': 0, 'ait': 0, 'cop': 2, 'coe': 2, 'cod': 2, 'coo': 2, 'bon': 1, 'pco': 0, 'tsc': 0, 'csc': 0, 'qsc': 0, 'hpx': 2, 'xph': 0, } def pre_build_py_hook(cmd_obj): preprocess_source() def pre_build_ext_hook(cmd_obj): preprocess_source() def pre_sdist_hook(cmd_obj): preprocess_source() def preprocess_source(): # TODO: Move this to setup_helpers # Generating the wcslib wrappers should only be done if needed. This also # ensures that it is not done for any release tarball since those will # include core.py and core.c. if all(os.path.exists(filename) for filename in GEN_FILES): # Determine modification times src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES) gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES) version = get_pkg_version_module('astropy') if gen_mtime > src_mtime: # If generated source is recent enough, don't update return elif version.release: # or, if we're on a release, issue a warning, but go ahead and use # the wrappers anyway log.warn('WARNING: The autogenerated wrappers in ' 'astropy.modeling._projections seem to be older ' 'than the source templates used to create ' 'them. Because this is a release version we will ' 'use them anyway, but this might be a sign of ' 'some sort of version mismatch or other ' 'tampering. Or it might just mean you moved ' 'some files around or otherwise accidentally ' 'changed timestamps.') return # otherwise rebuild the autogenerated files # If jinja2 isn't present, then print a warning and use existing files try: import jinja2 # pylint: disable=W0611 except ImportError: log.warn("WARNING: jinja2 could not be imported, so the existing " "modeling _projections.c file will be used") return from jinja2 import Environment, FileSystemLoader # Prepare the jinja2 templating environment env = Environment(loader=FileSystemLoader(MODELING_SRC)) c_in = env.get_template('projections.c.templ') c_out = c_in.render(projections=projections) with open(join(MODELING_SRC, 'projections.c'), 'w') as fd: fd.write(c_out) def get_extensions(): wcslib_files = [ # List of wcslib files to compile 'prj.c', 'wcserr.c', 'wcsprintf.c', 'wcsutil.c' ] wcslib_config_paths = [ join(MODELING_SRC, 'wcsconfig.h') ] cfg = setup_helpers.DistutilsExtensionArgs() wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(MODELING_SRC) astropy_files = [ # List of astropy.modeling files to compile 'projections.c' ] cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in cfg.items()) return [Extension('astropy.modeling._projections', **cfg)]
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from numpy.testing import assert_equal, assert_allclose from astropy import units as u from astropy.time import Time, TimeDelta from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle from astropy.timeseries.binned import BinnedTimeSeries from astropy.tests.helper import assert_quantity_allclose CSV_FILE = get_pkg_data_filename('data/binned.csv') def test_empty_initialization(): ts = BinnedTimeSeries() ts['time_bin_start'] = Time([1, 2, 3], format='mjd') def test_empty_initialization_invalid(): # Make sure things crash when the first column added is not a time column ts = BinnedTimeSeries() with pytest.raises(ValueError) as exc: ts['flux'] = [1, 2, 3] assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected " "'time_bin_start' as the first column but found 'flux'") def test_initialization_time_bin_invalid(): # Make sure things crash when time_bin_* is passed incorrectly. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data=[[1, 4, 3]]) assert exc.value.args[0] == ("'time_bin_start' has not been specified") with pytest.raises(TypeError) as exc: BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]]) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_initialization_time_bin_both(): # Make sure things crash when time_bin_* is passed twice. with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31") assert exc.value.args[0] == ("'time_bin_start' has been given both in the table " "and as a keyword argument") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1]*u.s) assert exc.value.args[0] == ("'time_bin_size' has been given both in the table " "and as a keyword argument") def test_initialization_time_bin_size(): # Make sure things crash when time_bin_size has no units with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=1) assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta") # TimeDelta for time_bin_size ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start="2016-03-22T12:30:31", time_bin_size=TimeDelta(1)) assert isinstance(ts.time_bin_size, u.quantity.Quantity) def test_initialization_time_bin_start_scalar(): # Make sure things crash when time_bin_start is a scalar with no time_bin_size with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required") def test_initialization_n_bins(): # Make sure things crash with incorrect n_bins with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=Time(1, format='mjd'), time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd'), n_bins=10) assert exc.value.args[0] == ("'n_bins' has been given and it is not the " "same length as the input data.") def test_initialization_non_scalar_time(): # Make sure things crash with incorrect size of time_bin_start with pytest.raises(ValueError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"], time_bin_size=1*u.s, time_bin_end=Time(1, format='mjd')) assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)") with pytest.raises(TypeError) as exc: BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]}, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=None, time_bin_end=None) assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified") def test_even_contiguous(): # Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying # the bin width: ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500', '2016-03-22T12:30:35.500', '2016-03-22T12:30:38.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000', '2016-03-22T12:30:37.000', '2016-03-22T12:30:40.000']) def test_uneven_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an # end time: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:32', '2016-03-22T12:30:40'], time_bin_end='2016-03-22T12:30:55', data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:36.000', '2016-03-22T12:30:47.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:40.000', '2016-03-22T12:30:55.000']) def test_uneven_non_contiguous(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with # lists of start times, bin sizes and data: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'], time_bin_size=[5, 100, 2]*u.s, data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:38.000', '2016-03-22T12:34:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500', '2016-03-22T12:31:28.000', '2016-03-22T12:34:41.000']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000', '2016-03-22T12:32:18.000', '2016-03-22T12:34:42.000']) def test_uneven_non_contiguous_full(): # Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by # specifying the start and end times for the bins: ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33', '2016-03-22T12:30:40'], time_bin_end=['2016-03-22T12:30:32', '2016-03-22T12:30:35', '2016-03-22T12:30:41'], data=[[1, 4, 3]]) assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000', '2016-03-22T12:30:40.000']) assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500', '2016-03-22T12:30:34.000', '2016-03-22T12:30:40.500']) assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000', '2016-03-22T12:30:35.000', '2016-03-22T12:30:41.000']) def test_read_empty(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, format='csv') assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.' def test_read_no_size_end(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv') assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.' def test_read_both_extra_bins(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`." def test_read_size_no_unit(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read_start_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data." def test_read_end_time_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv') assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data." def test_read_size_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv') assert exc.value.args[0] == "Bin size column 'missing' not found in the input data." def test_read_time_unit_missing(): with pytest.raises(ValueError) as exc: BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv') assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``." def test_read(): timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='time_end', format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv') assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F'] assert len(timeseries) == 10 assert timeseries['B'].sum() == 1151.54 @pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle]) def test_periodogram(cls): # Note that we don't need to check the actual results from the periodogram # classes here since these are tested extensively in # astropy.timeseries.periodograms. ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b']) p1 = cls.from_timeseries(ts, 'a') assert isinstance(p1, cls) assert_allclose(p1.t.jd, ts.time_bin_center.jd) assert_equal(p1.y, ts['a']) assert p1.dy is None p2 = cls.from_timeseries(ts, 'a', uncertainty='b') assert_quantity_allclose(p2.dy, ts['b']) p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1) assert_allclose(p3.dy, 0.1)
MSeifert04/astropy
astropy/timeseries/tests/test_binned.py
astropy/modeling/setup_package.py
"""Base async HTTP client implementation.""" import sys from vine import Thenable, promise, maybe_promise from kombu.exceptions import HttpError from kombu.utils.compat import coro from kombu.utils.encoding import bytes_to_str from kombu.utils.functional import maybe_list, memoize try: # pragma: no cover from http.client import responses except ImportError: from httplib import responses # noqa __all__ = ('Headers', 'Response', 'Request') PYPY = hasattr(sys, 'pypy_version_info') @memoize(maxsize=1000) def normalize_header(key): return '-'.join(p.capitalize() for p in key.split('-')) class Headers(dict): """Represents a mapping of HTTP headers.""" # TODO: This is just a regular dict and will not perform normalization # when looking up keys etc. #: Set when all of the headers have been read. complete = False #: Internal attribute used to keep track of continuation lines. _prev_key = None @Thenable.register class Request: """A HTTP Request. Arguments: url (str): The URL to request. method (str): The HTTP method to use (defaults to ``GET``). Keyword Arguments: headers (Dict, ~kombu.asynchronous.http.Headers): Optional headers for this request body (str): Optional body for this request. connect_timeout (float): Connection timeout in float seconds Default is 30.0. timeout (float): Time in float seconds before the request times out Default is 30.0. follow_redirects (bool): Specify if the client should follow redirects Enabled by default. max_redirects (int): Maximum number of redirects (default 6). use_gzip (bool): Allow the server to use gzip compression. Enabled by default. validate_cert (bool): Set to true if the server certificate should be verified when performing ``https://`` requests. Enabled by default. auth_username (str): Username for HTTP authentication. auth_password (str): Password for HTTP authentication. auth_mode (str): Type of HTTP authentication (``basic`` or ``digest``). user_agent (str): Custom user agent for this request. network_interace (str): Network interface to use for this request. on_ready (Callable): Callback to be called when the response has been received. Must accept single ``response`` argument. on_stream (Callable): Optional callback to be called every time body content has been read from the socket. If specified then the response body and buffer attributes will not be available. on_timeout (callable): Optional callback to be called if the request times out. on_header (Callable): Optional callback to be called for every header line received from the server. The signature is ``(headers, line)`` and note that if you want ``response.headers`` to be populated then your callback needs to also call ``client.on_header(headers, line)``. on_prepare (Callable): Optional callback that is implementation specific (e.g. curl client will pass the ``curl`` instance to this callback). proxy_host (str): Optional proxy host. Note that a ``proxy_port`` must also be provided or a :exc:`ValueError` will be raised. proxy_username (str): Optional username to use when logging in to the proxy. proxy_password (str): Optional password to use when authenticating with the proxy server. ca_certs (str): Custom CA certificates file to use. client_key (str): Optional filename for client SSL key. client_cert (str): Optional filename for client SSL certificate. """ body = user_agent = network_interface = \ auth_username = auth_password = auth_mode = \ proxy_host = proxy_port = proxy_username = proxy_password = \ ca_certs = client_key = client_cert = None connect_timeout = 30.0 request_timeout = 30.0 follow_redirects = True max_redirects = 6 use_gzip = True validate_cert = True if not PYPY: # pragma: no cover __slots__ = ('url', 'method', 'on_ready', 'on_timeout', 'on_stream', 'on_prepare', 'on_header', 'headers', '__weakref__', '__dict__') def __init__(self, url, method='GET', on_ready=None, on_timeout=None, on_stream=None, on_prepare=None, on_header=None, headers=None, **kwargs): self.url = url self.method = method or self.method self.on_ready = maybe_promise(on_ready) or promise() self.on_timeout = maybe_promise(on_timeout) self.on_stream = maybe_promise(on_stream) self.on_prepare = maybe_promise(on_prepare) self.on_header = maybe_promise(on_header) if kwargs: for k, v in kwargs.items(): setattr(self, k, v) if not isinstance(headers, Headers): headers = Headers(headers or {}) self.headers = headers def then(self, callback, errback=None): self.on_ready.then(callback, errback) def __repr__(self): return '<Request: {0.method} {0.url} {0.body}>'.format(self) class Response: """HTTP Response. Arguments: request (~kombu.asynchronous.http.Request): See :attr:`request`. code (int): See :attr:`code`. headers (~kombu.asynchronous.http.Headers): See :attr:`headers`. buffer (bytes): See :attr:`buffer` effective_url (str): See :attr:`effective_url`. status (str): See :attr:`status`. Attributes: request (~kombu.asynchronous.http.Request): object used to get this response. code (int): HTTP response code (e.g. 200, 404, or 500). headers (~kombu.asynchronous.http.Headers): HTTP headers for this response. buffer (bytes): Socket read buffer. effective_url (str): The destination url for this request after following redirects. error (Exception): Error instance if the request resulted in a HTTP error code. status (str): Human equivalent of :attr:`code`, e.g. ``OK``, `Not found`, or 'Internal Server Error'. """ if not PYPY: # pragma: no cover __slots__ = ('request', 'code', 'headers', 'buffer', 'effective_url', 'error', 'status', '_body', '__weakref__') def __init__(self, request, code, headers=None, buffer=None, effective_url=None, error=None, status=None): self.request = request self.code = code self.headers = headers if headers is not None else Headers() self.buffer = buffer self.effective_url = effective_url or request.url self._body = None self.status = status or responses.get(self.code, 'Unknown') self.error = error if self.error is None and (self.code < 200 or self.code > 299): self.error = HttpError(self.code, self.status, self) def raise_for_error(self): """Raise if the request resulted in an HTTP error code. Raises: :class:`~kombu.exceptions.HttpError` """ if self.error: raise self.error @property def body(self): """The full contents of the response body. Note: Accessing this propery will evaluate the buffer and subsequent accesses will be cached. """ if self._body is None: if self.buffer is not None: self._body = self.buffer.getvalue() return self._body # these are for compatibility with Requests @property def status_code(self): return self.code @property def content(self): return self.body @coro def header_parser(keyt=normalize_header): while 1: (line, headers) = yield if line.startswith('HTTP/'): continue elif not line: headers.complete = True continue elif line[0].isspace(): pkey = headers._prev_key headers[pkey] = ' '.join([headers.get(pkey) or '', line.lstrip()]) else: key, value = line.split(':', 1) key = headers._prev_key = keyt(key) headers[key] = value.strip() class BaseClient: Headers = Headers Request = Request Response = Response def __init__(self, hub, **kwargs): self.hub = hub self._header_parser = header_parser() def perform(self, request, **kwargs): for req in maybe_list(request) or []: if not isinstance(req, self.Request): req = self.Request(req, **kwargs) self.add_request(req) def add_request(self, request): raise NotImplementedError('must implement add_request') def close(self): pass def on_header(self, headers, line): try: self._header_parser.send((bytes_to_str(line), headers)) except StopIteration: self._header_parser = header_parser() def __enter__(self): return self def __exit__(self, *exc_info): self.close()
import tempfile import pytest import t.skip from kombu import Connection, Exchange, Queue, Consumer, Producer @t.skip.if_win32 class test_FilesystemTransport: def setup(self): self.channels = set() try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: pytest.skip('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.channels.add(self.c.default_channel) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.channels.add(self.p.default_channel) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2') def teardown(self): # make sure we don't attempt to restore messages at shutdown. for channel in self.channels: try: channel._qos._dirty.clear() except AttributeError: pass try: channel._qos._delivered.clear() except AttributeError: pass def _add_channel(self, channel): self.channels.add(channel) return channel def test_produce_consume_noack(self): producer = Producer(self._add_channel(self.p.channel()), self.e) consumer = Consumer(self._add_channel(self.c.channel()), self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume(self): producer_channel = self._add_channel(self.p.channel()) consumer_channel = self._add_channel(self.c.channel()) producer = Producer(producer_channel, self.e) consumer1 = Consumer(consumer_channel, self.q) consumer2 = Consumer(consumer_channel, self.q2) self.q2(consumer_channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_filesystem', compression='zlib') m = self.q(consumer_channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') assert self.q(consumer_channel).get() self.q(consumer_channel).delete() self.q(consumer_channel).declare() assert self.q(consumer_channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') assert self.q2(consumer_channel).get() self.q2(consumer_channel).purge() assert self.q2(consumer_channel).get() is None
ZoranPavlovic/kombu
t/unit/transport/test_filesystem.py
kombu/asynchronous/http/base.py
from warnings import catch_warnings import numpy as np from pandas.core.dtypes import generic as gt import pandas as pd import pandas._testing as tm class TestABCClasses: tuples = [[1, 2, 2], ["red", "blue", "red"]] multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color")) datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"]) timedelta_index = pd.to_timedelta(np.arange(5), unit="s") period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M") categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1]) categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical) df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index) sparse_array = pd.arrays.SparseArray(np.random.randn(10)) datetime_array = pd.core.arrays.DatetimeArray(datetime_index) timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index) def test_abc_types(self): assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndex) assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index) assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index) assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index) assert isinstance(self.multi_index, gt.ABCMultiIndex) assert isinstance(self.datetime_index, gt.ABCDatetimeIndex) assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex) assert isinstance(self.period_index, gt.ABCPeriodIndex) assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex) assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndexClass) assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass) assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries) assert isinstance(self.df, gt.ABCDataFrame) assert isinstance(self.sparse_array, gt.ABCExtensionArray) assert isinstance(self.categorical, gt.ABCCategorical) assert isinstance(self.datetime_array, gt.ABCDatetimeArray) assert not isinstance(self.datetime_index, gt.ABCDatetimeArray) assert isinstance(self.timedelta_array, gt.ABCTimedeltaArray) assert not isinstance(self.timedelta_index, gt.ABCTimedeltaArray) def test_setattr_warnings(): # GH7175 - GOTCHA: You can't use dot notation to add a column... d = { "one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]), "two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]), } df = pd.DataFrame(d) with catch_warnings(record=True) as w: # successfully add new column # this should not raise a warning df["three"] = df.two + 1 assert len(w) == 0 assert df.three.sum() > df.two.sum() with catch_warnings(record=True) as w: # successfully modify column in place # this should not raise a warning df.one += 1 assert len(w) == 0 assert df.one.iloc[0] == 2 with catch_warnings(record=True) as w: # successfully add an attribute to a series # this should not raise a warning df.two.not_an_index = [1, 2] assert len(w) == 0 with tm.assert_produces_warning(UserWarning): # warn when setting column to nonexistent name df.four = df.two + 2 assert df.four.sum() > df.two.sum()
""" Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ import codecs import csv from datetime import datetime from io import StringIO import os import platform from urllib.error import URLError import numpy as np import pytest from pandas._libs.tslib import Timestamp from pandas.errors import DtypeWarning, EmptyDataError, ParserError import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex, Series, compat, concat import pandas._testing as tm from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser def test_override_set_noconvert_columns(): # see gh-17351 # # Usecols needs to be sorted in _set_noconvert_columns based # on the test_usecols_with_parse_dates test from test_usecols.py class MyTextFileReader(TextFileReader): def __init__(self): self._currow = 0 self.squeeze = False class MyCParserWrapper(CParserWrapper): def _set_noconvert_columns(self): if self.usecols_dtype == "integer": # self.usecols is a set, which is documented as unordered # but in practice, a CPython set of integers is sorted. # In other implementations this assumption does not hold. # The following code simulates a different order, which # before GH 17351 would cause the wrong columns to be # converted via the parse_dates parameter self.usecols = list(self.usecols) self.usecols.reverse() return CParserWrapper._set_noconvert_columns(self) data = """a,b,c,d,e 0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] cols = { "a": [0, 0], "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], } expected = DataFrame(cols, columns=["c_d", "a"]) parser = MyTextFileReader() parser.options = { "usecols": [0, 2, 3], "parse_dates": parse_dates, "delimiter": ",", } parser._engine = MyCParserWrapper(StringIO(data), **parser.options) result = parser.read() tm.assert_frame_equal(result, expected) def test_empty_decimal_marker(all_parsers): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = "Only length-1 decimal markers supported" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), decimal="") def test_bad_stream_exception(all_parsers, csv_dir_path): # see gh-13652 # # This test validates that both the Python engine and C engine will # raise UnicodeDecodeError instead of C engine raising ParserError # and swallowing the exception that caused read to fail. path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv") codec = codecs.lookup("utf-8") utf8 = codecs.lookup("utf-8") parser = all_parsers msg = "'utf-8' codec can't decode byte" # Stream must be binary UTF8. with open(path, "rb") as handle, codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter ) as stream: with pytest.raises(UnicodeDecodeError, match=msg): parser.read_csv(stream) def test_read_csv_local(all_parsers, csv1): prefix = "file:///" if compat.is_platform_windows() else "file://" parser = all_parsers fname = prefix + str(os.path.abspath(csv1)) result = parser.read_csv(fname, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_1000_sep(all_parsers): parser = all_parsers data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) result = parser.read_csv(StringIO(data), sep="|", thousands=",") tm.assert_frame_equal(result, expected) def test_squeeze(all_parsers): data = """\ a,1 b,2 c,3 """ parser = all_parsers index = Index(["a", "b", "c"], name=0) expected = Series([1, 2, 3], name=1, index=index) result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True) tm.assert_series_equal(result, expected) # see gh-8217 # # Series should not be a view. assert not result._is_view def test_malformed(all_parsers): # see gh-6607 parser = all_parsers data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = "Expected 3 fields in line 4, saw 5" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), header=1, comment="#") @pytest.mark.parametrize("nrows", [5, 3, None]) def test_malformed_chunks(all_parsers, nrows): data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ parser = all_parsers msg = "Expected 3 fields in line 6, saw 5" reader = parser.read_csv( StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2] ) with pytest.raises(ParserError, match=msg): reader.read(nrows) def test_unnamed_columns(all_parsers): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ parser = all_parsers expected = DataFrame( [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64, columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"], ) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_csv_mixed_type(all_parsers): data = """A,B,C a,1,2 b,3,4 c,4,5 """ parser = all_parsers expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_read_csv_low_memory_no_rows_with_index(all_parsers): # see gh-21141 parser = all_parsers if not parser.low_memory: pytest.skip("This is a low-memory specific test") data = """A,B,C 1,1,1,2 2,2,3,4 3,3,4,5 """ result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) expected = DataFrame(columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_read_csv_dataframe(all_parsers, csv1): parser = all_parsers result = parser.read_csv(csv1, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_read_csv_no_index_name(all_parsers, csv_dir_path): parser = all_parsers csv2 = os.path.join(csv_dir_path, "test2.csv") result = parser.read_csv(csv2, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"], [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"], [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"], [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"], [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"], ], columns=["A", "B", "C", "D", "E"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), ] ), ) tm.assert_frame_equal(result, expected) def test_read_csv_wrong_num_columns(all_parsers): # Too few columns. data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ parser = all_parsers msg = "Expected 6 fields in line 3, saw 7" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) def test_read_duplicate_index_explicit(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"), ) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(all_parsers): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ ( "A,B\nTrue,1\nFalse,2\nTrue,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3", dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]), DataFrame( [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]], columns=["A", "B"], ), ), ( "A,B\nTRUE,1\nFALSE,2\nTRUE,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nfoo,bar\nbar,foo", dict(true_values=["foo"], false_values=["bar"]), DataFrame([[True, False], [False, True]], columns=["A", "B"]), ), ], ) def test_parse_bool(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) def test_int_conversion(all_parsers): data = """A,B 1.0,1 2.0,2 3.0,3 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [3, 3.0]) def test_read_nrows(all_parsers, nrows): # see gh-10476 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ expected = DataFrame( [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]], columns=["index", "A", "B", "C", "D"], ) parser = all_parsers result = parser.read_csv(StringIO(data), nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [1.2, "foo", -1]) def test_read_nrows_bad(all_parsers, nrows): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ msg = r"'nrows' must be an integer >=0" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), nrows=nrows) @pytest.mark.parametrize("index_col", [0, "index"]) def test_read_chunksize_with_index(all_parsers, index_col): parser = all_parsers data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2) expected = DataFrame( [ ["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15], ["qux", 12, 13, 14, 15], ["foo2", 12, 13, 14, 15], ["bar2", 12, 13, 14, 15], ], columns=["index", "A", "B", "C", "D"], ) expected = expected.set_index("index") chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) @pytest.mark.parametrize("chunksize", [1.3, "foo", 0]) def test_read_chunksize_bad(all_parsers, chunksize): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers msg = r"'chunksize' must be an integer >=1" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), chunksize=chunksize) @pytest.mark.parametrize("chunksize", [2, 8]) def test_read_chunksize_and_nrows(all_parsers, chunksize): # see gh-15755 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), expected) def test_read_chunksize_and_nrows_changing_size(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5]) with pytest.raises(StopIteration, match=""): reader.get_chunk(size=3) def test_get_chunk_passed_chunksize(all_parsers): parser = all_parsers data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" reader = parser.read_csv(StringIO(data), chunksize=2) result = reader.get_chunk() expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)]) def test_read_chunksize_compat(all_parsers, kwargs): # see gh-12185 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs) result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), result) def test_read_chunksize_jagged_names(all_parsers): # see gh-23509 parser = all_parsers data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)]) expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10]) reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4) result = concat(reader) tm.assert_frame_equal(result, expected) def test_read_data_list(all_parsers): parser = all_parsers kwargs = dict(index_col=0) data = "A,B,C\nfoo,1,2,3\nbar,4,5,6" data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]] expected = parser.read_csv(StringIO(data), **kwargs) parser = TextParser(data_list, chunksize=2, **kwargs) result = parser.read() tm.assert_frame_equal(result, expected) def test_iterator(all_parsers): # see gh-6607 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) expected = parser.read_csv(StringIO(data), **kwargs) reader = parser.read_csv(StringIO(data), iterator=True, **kwargs) first_chunk = reader.read(3) tm.assert_frame_equal(first_chunk, expected[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, expected[3:]) def test_iterator2(all_parsers): parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(result[0], expected) def test_reader_list(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) def test_reader_list_skiprows(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[1:3]) def test_iterator_stop_on_chunksize(all_parsers): # gh-3967: stopping iteration when chunksize is specified parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), chunksize=1) result = list(reader) assert len(result) == 3 expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(concat(result), expected) @pytest.mark.parametrize( "kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)] ) def test_iterator_skipfooter_errors(all_parsers, kwargs): msg = "'skipfooter' not supported for 'iteration'" parser = all_parsers data = "a\n1\n2" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, **kwargs) def test_nrows_skipfooter_errors(all_parsers): msg = "'skipfooter' not supported with 'nrows'" data = "a\n1\n2\n3\n4\n5\n6" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, nrows=5) @pytest.mark.parametrize( "data,kwargs,expected", [ ( """foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """, dict(index_col=0, names=["index", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"), columns=["A", "B", "C", "D"], ), ), ( """foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """, dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ], names=["index1", "index2"], ), columns=["A", "B", "C", "D"], ), ), ], ) def test_pass_names_with_index(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("index_col", [[0, 1], [1, 0]]) def test_multi_index_no_level_names(all_parsers, index_col): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ headless_data = "\n".join(data.split("\n")[1:]) names = ["A", "B", "C", "D"] parser = all_parsers result = parser.read_csv( StringIO(headless_data), index_col=index_col, header=None, names=names ) expected = parser.read_csv(StringIO(data), index_col=index_col) # No index names in headless data. expected.index.names = [None] * 2 tm.assert_frame_equal(result, expected) def test_multi_index_no_level_names_implicit(all_parsers): parser = all_parsers data = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ] ), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected,header", [ ("a,b", DataFrame(columns=["a", "b"]), [0]), ( "a,b\nc,d", DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])), [0, 1], ), ], ) @pytest.mark.parametrize("round_trip", [True, False]) def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip): # see gh-14545 parser = all_parsers data = expected.to_csv(index=False) if round_trip else data result = parser.read_csv(StringIO(data), header=header) tm.assert_frame_equal(result, expected) def test_no_unnamed_index(all_parsers): parser = all_parsers data = """ id c0 c1 c2 0 1 0 a b 1 2 0 c d 2 2 2 e f """ result = parser.read_csv(StringIO(data), sep=" ") expected = DataFrame( [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]], columns=["Unnamed: 0", "id", "c0", "c1", "c2"], ) tm.assert_frame_equal(result, expected) def test_read_csv_parse_simple_list(all_parsers): parser = all_parsers data = """foo bar baz qux foo foo bar""" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) tm.assert_frame_equal(result, expected) @tm.network def test_url(all_parsers, csv_dir_path): # TODO: FTP testing parser = all_parsers kwargs = dict(sep="\t") url = ( "https://raw.github.com/pandas-dev/pandas/master/" "pandas/tests/io/parser/data/salaries.csv" ) url_result = parser.read_csv(url, **kwargs) local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) tm.assert_frame_equal(url_result, local_result) @pytest.mark.slow def test_local_file(all_parsers, csv_dir_path): parser = all_parsers kwargs = dict(sep="\t") local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) url = "file://localhost/" + local_path try: url_result = parser.read_csv(url, **kwargs) tm.assert_frame_equal(url_result, local_result) except URLError: # Fails on some systems. pytest.skip("Failing on: " + " ".join(platform.uname())) def test_path_path_lib(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_path_local_path(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_localpath( df.to_csv, lambda p: parser.read_csv(p, index_col=0) ) tm.assert_frame_equal(df, result) def test_nonexistent_path(all_parsers): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError # GH#29233 "File foo" instead of "File b'foo'" parser = all_parsers path = f"{tm.rands(10)}.csv" msg = r"\[Errno 2\]" with pytest.raises(FileNotFoundError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename @td.skip_if_windows # os.chmod does not work in windows def test_no_permission(all_parsers): # GH 23784 parser = all_parsers msg = r"\[Errno 13\]" with tm.ensure_clean() as path: os.chmod(path, 0) # make file unreadable # verify that this process cannot open the file (not running as sudo) try: with open(path): pass pytest.skip("Running as sudo.") except PermissionError: pass with pytest.raises(PermissionError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename def test_missing_trailing_delimiters(all_parsers): parser = all_parsers data = """A,B,C,D 1,2,3,4 1,3,3, 1,4,5""" result = parser.read_csv(StringIO(data)) expected = DataFrame( [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]], columns=["A", "B", "C", "D"], ) tm.assert_frame_equal(result, expected) def test_skip_initial_space(all_parsers): data = ( '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, " "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, " "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, " "0.212036, 14.7674, 41.605, -9999.0, -9999.0, " "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128" ) parser = all_parsers result = parser.read_csv( StringIO(data), names=list(range(33)), header=None, na_values=["-9999.0"], skipinitialspace=True, ) expected = DataFrame( [ [ "09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, 1.00361, 1.12551, 330.65659, 355626618.16711, 73.48821, 314.11625, 1917.09447, 179.71425, 80.0, 240.0, -350, 70.06056, 344.9837, 1, 1, -0.689265, -0.692787, 0.212036, 14.7674, 41.605, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0, 12, 128, ] ] ) tm.assert_frame_equal(result, expected) def test_trailing_delimiters(all_parsers): # see gh-2442 data = """A,B,C 1,2,3, 4,5,6, 7,8,9,""" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=False) expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_escapechar(all_parsers): # https://stackoverflow.com/questions/13824840/feature-request-for- # pandas-read-csv data = '''SEARCH_TERM,ACTUAL_URL "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa parser = all_parsers result = parser.read_csv( StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8" ) assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series' tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"])) def test_int64_min_issues(all_parsers): # see gh-2599 parser = all_parsers data = "A,B\n0,0\n0," result = parser.read_csv(StringIO(data)) expected = DataFrame({"A": [0, 0], "B": [0, np.nan]}) tm.assert_frame_equal(result, expected) def test_parse_integers_above_fp_precision(all_parsers): data = """Numbers 17007000002000191 17007000002000191 17007000002000191 17007000002000191 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000194""" parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( { "Numbers": [ 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000194, ] } ) tm.assert_frame_equal(result, expected) def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) # Coercions should work without warnings. with tm.assert_produces_warning(None): result = parser.read_csv(StringIO(data)) assert type(result.a[0]) is np.float64 assert result.a.dtype == np.float def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["a", "b"] + integers) # see gh-3866: if chunks are different types and can't # be coerced using numerical types, then issue warning. if parser.engine == "c" and parser.low_memory: warning_type = DtypeWarning with tm.assert_produces_warning(warning_type): df = parser.read_csv(StringIO(data)) assert df.a.dtype == np.object @pytest.mark.parametrize("sep", [" ", r"\s+"]) def test_integer_overflow_bug(all_parsers, sep): # see gh-2601 data = "65248E10 11\n55555E55 22\n" parser = all_parsers result = parser.read_csv(StringIO(data), header=None, sep=sep) expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]]) tm.assert_frame_equal(result, expected) def test_catch_too_many_names(all_parsers): # see gh-5156 data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" parser = all_parsers msg = ( "Too many columns specified: expected 4 and found 3" if parser.engine == "c" else "Number of passed names did not match " "number of header fields in the file" ) with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) def test_ignore_leading_whitespace(all_parsers): # see gh-3374, gh-6607 parser = all_parsers data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9" result = parser.read_csv(StringIO(data), sep=r"\s+") expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_chunk_begins_with_newline_whitespace(all_parsers): # see gh-10022 parser = all_parsers data = "\n hello\nworld\n" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([" hello", "world"]) tm.assert_frame_equal(result, expected) def test_empty_with_index(all_parsers): # see gh-10184 data = "x,y" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame(columns=["y"], index=Index([], name="x")) tm.assert_frame_equal(result, expected) def test_empty_with_multi_index(all_parsers): # see gh-10467 data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=["x", "y"]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"]) ) tm.assert_frame_equal(result, expected) def test_empty_with_reversed_multi_index(all_parsers): data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=[1, 0]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"]) ) tm.assert_frame_equal(result, expected) def test_float_parser(all_parsers): # see gh-9565 parser = all_parsers data = "45e-1,4.5,45.,inf,-inf" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([[float(s) for s in data.split(",")]]) tm.assert_frame_equal(result, expected) def test_scientific_no_exponent(all_parsers): # see gh-12215 df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]}) data = df.to_csv(index=False) parser = all_parsers for precision in parser.float_precision_choices: df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision) tm.assert_frame_equal(df_roundtrip, df) @pytest.mark.parametrize("conv", [None, np.int64, np.uint64]) def test_int64_overflow(all_parsers, conv): data = """ID 00013007854817840016671868 00013007854817840016749251 00013007854817840016754630 00013007854817840016781876 00013007854817840017028824 00013007854817840017963235 00013007854817840018860166""" parser = all_parsers if conv is None: # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. result = parser.read_csv(StringIO(data)) expected = DataFrame( [ "00013007854817840016671868", "00013007854817840016749251", "00013007854817840016754630", "00013007854817840016781876", "00013007854817840017028824", "00013007854817840017963235", "00013007854817840018860166", ], columns=["ID"], ) tm.assert_frame_equal(result, expected) else: # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. msg = ( "(Python int too large to convert to C long)|" "(long too big to convert)|" "(int too big to convert)" ) with pytest.raises(OverflowError, match=msg): parser.read_csv(StringIO(data), converters={"ID": conv}) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min] ) def test_int64_uint64_range(all_parsers, val): # These numbers fall right inside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([val]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1] ) def test_outside_int64_uint64_range(all_parsers, val): # These numbers fall just outside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([str(val)]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]]) def test_numeric_range_too_wide(all_parsers, exp_data): # No numerical dtype can hold both negative and uint64 # values, so they should be cast as string. parser = all_parsers data = "\n".join(exp_data) expected = DataFrame(exp_data) result = parser.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("iterator", [True, False]) def test_empty_with_nrows_chunksize(all_parsers, iterator): # see gh-9535 parser = all_parsers expected = DataFrame(columns=["foo", "bar"]) nrows = 10 data = StringIO("foo,bar\n") if iterator: result = next(iter(parser.read_csv(data, chunksize=nrows))) else: result = parser.read_csv(data, nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected,msg", [ # gh-10728: WHITESPACE_LINE ( "a,b,c\n4,5,6\n ", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # gh-10548: EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_CRNL_NOP ( "a,b,c\n4,5,6\n\r", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_COMMENT ( "a,b,c\n4,5,6#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # SKIP_LINE ( "a,b,c\n4,5,6\nskipme", dict(skiprows=[2]), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#", skip_blank_lines=False), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # IN_FIELD ( "a,b,c\n4,5,6\n ", dict(skip_blank_lines=False), DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]), None, ), # EAT_CRNL ( "a,b,c\n4,5,6\n\r", dict(skip_blank_lines=False), DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]), None, ), # ESCAPED_CHAR ( "a,b,c\n4,5,6\n\\", dict(escapechar="\\"), None, "(EOF following escape character)|(unexpected end of data)", ), # ESCAPE_IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"\\', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), # IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), ], ids=[ "whitespace-line", "eat-line-comment", "eat-crnl-nop", "eat-comment", "skip-line", "eat-line-comment", "in-field", "eat-crnl", "escaped-char", "escape-in-quoted-field", "in-quoted-field", ], ) def test_eof_states(all_parsers, data, kwargs, expected, msg): # see gh-10728, gh-10548 parser = all_parsers if expected is None: with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]]) def test_uneven_lines_with_usecols(all_parsers, usecols): # see gh-12203 parser = all_parsers data = r"""a,b,c 0,1,2 3,4,5,6,7 8,9,10""" if usecols is None: # Make sure that an error is still raised # when the "usecols" parameter is not provided. msg = r"Expected \d+ fields in line \d+, saw \d+" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) else: expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]}) result = parser.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ # First, check to see that the response of parser when faced with no # provided columns raises the correct error, with or without usecols. ("", dict(), None), ("", dict(usecols=["X"]), None), ( ",,", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"], index=[0], dtype=np.float64), ), ( "", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"]), ), ], ) def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): # see gh-12493 parser = all_parsers if expected is None: msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs,expected", [ # gh-8661, gh-8679: this should ignore six lines, including # lines with trailing whitespace and blank lines. ( dict( header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True, ), DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]), ), # gh-8983: test skipping set of rows after a row with trailing spaces. ( dict( delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True ), DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}), ), ], ) def test_trailing_spaces(all_parsers, kwargs, expected): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa parser = all_parsers result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) tm.assert_frame_equal(result, expected) def test_raise_on_sep_with_delim_whitespace(all_parsers): # see gh-6607 data = "a b c\n1 2 3" parser = all_parsers with pytest.raises(ValueError, match="you can only specify one"): parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True) @pytest.mark.parametrize("delim_whitespace", [True, False]) def test_single_char_leading_whitespace(all_parsers, delim_whitespace): # see gh-9710 parser = all_parsers data = """\ MyColumn a b a b\n""" expected = DataFrame({"MyColumn": list("abab")}) result = parser.read_csv( StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "sep,skip_blank_lines,exp_data", [ (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), ( ",", False, [ [1.0, 2.0, 4.0], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [5.0, np.nan, 10.0], [np.nan, np.nan, np.nan], [-70.0, 0.4, 1.0], ], ), ], ) def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data): parser = all_parsers data = """\ A,B,C 1,2.,4. 5.,NaN,10.0 -70,.4,1 """ if sep == r"\s+": data = data.replace(",", " ") result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines) expected = DataFrame(exp_data, columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_whitespace_lines(all_parsers): parser = all_parsers data = """ \t \t\t \t A,B,C \t 1,2.,4. 5.,NaN,10.0 """ expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected", [ ( """ A B C D a 1 2 3 4 b 1 2 3 4 c 1 2 3 4 """, DataFrame( [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], columns=["A", "B", "C", "D"], index=["a", "b", "c"], ), ), ( " a b c\n1 2 3 \n4 5 6\n 7 8 9", DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]), ), ], ) def test_whitespace_regex_separator(all_parsers, data, expected): # see gh-6607 parser = all_parsers result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) def test_verbose_read(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 one,1,2,3 ,1,2,3 one,1,2,3 ,1,2,3 ,1,2,3 one,1,2,3 two,1,2,3""" # Engines are verbose in different ways. parser.read_csv(StringIO(data), verbose=True) captured = capsys.readouterr() if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 3 NA values in column a\n" def test_verbose_read2(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 two,1,2,3 three,1,2,3 four,1,2,3 five,1,2,3 ,1,2,3 seven,1,2,3 eight,1,2,3""" parser.read_csv(StringIO(data), verbose=True, index_col=0) captured = capsys.readouterr() # Engines are verbose in different ways. if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 1 NA values in column a\n" def test_iteration_open_handle(all_parsers): parser = all_parsers kwargs = dict(squeeze=True, header=None) with tm.ensure_clean() as path: with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") with open(path, "r") as f: for line in f: if "CCC" in line: break result = parser.read_csv(f, **kwargs) expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "data,thousands,decimal", [ ( """A|B|C 1|2,334.01|5 10|13|10. """, ",", ".", ), ( """A|B|C 1|2.334,01|5 10|13|10, """, ".", ",", ), ], ) def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal): parser = all_parsers expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) result = parser.read_csv( StringIO(data), sep="|", thousands=thousands, decimal=decimal ) tm.assert_frame_equal(result, expected) def test_euro_decimal_format(all_parsers): parser = all_parsers data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" result = parser.read_csv(StringIO(data), sep=";", decimal=",") expected = DataFrame( [ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], ], columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_inf_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,inf b,-inf c,+Inf d,-Inf e,INF f,-INF g,+INf h,-INf i,inF j,-inF""" expected = DataFrame( {"A": [float("inf"), float("-inf")] * 5}, index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_infinity_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,Infinity b,-Infinity c,+Infinity """ expected = DataFrame( {"A": [float("infinity"), float("-infinity"), float("+infinity")]}, index=["a", "b", "c"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5]) def test_raise_on_no_columns(all_parsers, nrows): parser = all_parsers data = "\n" * nrows msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data)) def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers expected = DataFrame( {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]} ) result = parser.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(result, expected) def test_null_byte_char(all_parsers): # see gh-2741 data = "\x00,foo" names = ["a", "b"] parser = all_parsers if parser.engine == "c": expected = DataFrame([[np.nan, "foo"]], columns=names) out = parser.read_csv(StringIO(data), names=names) tm.assert_frame_equal(out, expected) else: msg = "NULL byte detected" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), names=names) def test_temporary_file(all_parsers): # see gh-13398 parser = all_parsers data = "0 0" with tm.ensure_clean(mode="w+", return_filelike=True) as new_file: new_file.write(data) new_file.flush() new_file.seek(0) result = parser.read_csv(new_file, sep=r"\s+", header=None) expected = DataFrame([[0, 0]]) tm.assert_frame_equal(result, expected) def test_internal_eof_byte(all_parsers): # see gh-5500 parser = all_parsers data = "a,b\n1\x1a,2" expected = DataFrame([["1\x1a", 2]], columns=["a", "b"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_internal_eof_byte_to_file(all_parsers): # see gh-16559 parser = all_parsers data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) path = f"__{tm.rands(10)}__.csv" with tm.ensure_clean(path) as path: with open(path, "wb") as f: f.write(data) result = parser.read_csv(path) tm.assert_frame_equal(result, expected) def test_sub_character(all_parsers, csv_dir_path): # see gh-16893 filename = os.path.join(csv_dir_path, "sub_char.csv") expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) parser = all_parsers result = parser.read_csv(filename) tm.assert_frame_equal(result, expected) def test_file_handle_string_io(all_parsers): # gh-14418 # # Don't close user provided file handles. parser = all_parsers data = "a,b\n1,2" fh = StringIO(data) parser.read_csv(fh) assert not fh.closed def test_file_handles_with_open(all_parsers, csv1): # gh-14418 # # Don't close user provided file handles. parser = all_parsers for mode in ["r", "rb"]: with open(csv1, mode) as f: parser.read_csv(f) assert not f.closed def test_invalid_file_buffer_class(all_parsers): # see gh-15337 class InvalidBuffer: pass parser = all_parsers msg = "Invalid file path or buffer object type" with pytest.raises(ValueError, match=msg): parser.read_csv(InvalidBuffer()) def test_invalid_file_buffer_mock(all_parsers): # see gh-15337 parser = all_parsers msg = "Invalid file path or buffer object type" class Foo: pass with pytest.raises(ValueError, match=msg): parser.read_csv(Foo()) def test_valid_file_buffer_seems_invalid(all_parsers): # gh-16135: we want to ensure that "tell" and "seek" # aren't actually being used when we call `read_csv` # # Thus, while the object may look "invalid" (these # methods are attributes of the `StringIO` class), # it is still a valid file-object for our purposes. class NoSeekTellBuffer(StringIO): def tell(self): raise AttributeError("No tell method") def seek(self, pos, whence=0): raise AttributeError("No seek method") data = "a\n1" parser = all_parsers expected = DataFrame({"a": [1]}) result = parser.read_csv(NoSeekTellBuffer(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs", [dict(), dict(error_bad_lines=True)], # Default is True. # Explicitly pass in. ) @pytest.mark.parametrize( "warn_kwargs", [dict(), dict(warn_bad_lines=True), dict(warn_bad_lines=False)] ) def test_error_bad_lines(all_parsers, kwargs, warn_kwargs): # see gh-15925 parser = all_parsers kwargs.update(**warn_kwargs) data = "a\n1\n1,2,3\n4\n5,6,7" msg = "Expected 1 fields in line 3, saw 3" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) def test_warn_bad_lines(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert "Skipping line 3" in captured.err assert "Skipping line 5" in captured.err def test_suppress_error_output(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv( StringIO(data), error_bad_lines=False, warn_bad_lines=False ) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert captured.err == "" @pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"]) def test_filename_with_special_chars(all_parsers, filename): # see gh-15086. parser = all_parsers df = DataFrame({"a": [1, 2, 3]}) with tm.ensure_clean(filename) as path: df.to_csv(path, index=False) result = parser.read_csv(path) tm.assert_frame_equal(result, df) def test_read_csv_memory_growth_chunksize(all_parsers): # see gh-24805 # # Let's just make sure that we don't crash # as we iteratively process all chunks. parser = all_parsers with tm.ensure_clean() as path: with open(path, "w") as f: for i in range(1000): f.write(str(i) + "\n") result = parser.read_csv(path, chunksize=20) for _ in result: pass def test_read_csv_raises_on_header_prefix(all_parsers): # gh-27394 parser = all_parsers msg = "Argument prefix must be None if argument header is not None" s = StringIO("0,1\n2,3") with pytest.raises(ValueError, match=msg): parser.read_csv(s, header=0, prefix="_X") def test_read_table_equivalency_to_read_csv(all_parsers): # see gh-21948 # As of 0.25.0, read_table is undeprecated parser = all_parsers data = "a\tb\n1\t2\n3\t4" expected = parser.read_csv(StringIO(data), sep="\t") result = parser.read_table(StringIO(data)) tm.assert_frame_equal(result, expected) def test_first_row_bom(all_parsers): # see gh-26545 parser = all_parsers data = '''\ufeff"Head1" "Head2" "Head3"''' result = parser.read_csv(StringIO(data), delimiter="\t") expected = DataFrame(columns=["Head1", "Head2", "Head3"]) tm.assert_frame_equal(result, expected) def test_integer_precision(all_parsers): # Gh 7072 s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765 5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389""" parser = all_parsers result = parser.read_csv(StringIO(s), header=None)[4] expected = Series([4321583677327450765, 4321113141090630389], name=4) tm.assert_series_equal(result, expected) def test_file_descriptor_leak(all_parsers): # GH 31488 parser = all_parsers with tm.ensure_clean() as path: def test(): with pytest.raises(EmptyDataError, match="No columns to parse from file"): parser.read_csv(path) td.check_file_leaks(test)() @pytest.mark.parametrize("nrows", range(1, 6)) def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): # GH 28071 ref = DataFrame( [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]], columns=list("ab"), ) csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4" parser = all_parsers df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False) tm.assert_frame_equal(df, ref[:nrows]) def test_no_header_two_extra_columns(all_parsers): # GH 26218 column_names = ["one", "two", "three"] ref = DataFrame([["foo", "bar", "baz"]], columns=column_names) stream = StringIO("foo,bar,baz,bam,blah") parser = all_parsers df = parser.read_csv(stream, header=None, names=column_names, index_col=False) tm.assert_frame_equal(df, ref)
TomAugspurger/pandas
pandas/tests/io/parser/test_common.py
pandas/tests/dtypes/test_generic.py
import pandas as pd import pandas._testing as tm class TestUnaryOps: def test_invert(self): a = pd.array([True, False, None], dtype="boolean") expected = pd.array([False, True, None], dtype="boolean") tm.assert_extension_array_equal(~a, expected) expected = pd.Series(expected, index=["a", "b", "c"], name="name") result = ~pd.Series(a, index=["a", "b", "c"], name="name") tm.assert_series_equal(result, expected) df = pd.DataFrame({"A": a, "B": [True, False, False]}, index=["a", "b", "c"]) result = ~df expected = pd.DataFrame( {"A": expected, "B": [False, True, True]}, index=["a", "b", "c"] ) tm.assert_frame_equal(result, expected)
""" Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ import codecs import csv from datetime import datetime from io import StringIO import os import platform from urllib.error import URLError import numpy as np import pytest from pandas._libs.tslib import Timestamp from pandas.errors import DtypeWarning, EmptyDataError, ParserError import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex, Series, compat, concat import pandas._testing as tm from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser def test_override_set_noconvert_columns(): # see gh-17351 # # Usecols needs to be sorted in _set_noconvert_columns based # on the test_usecols_with_parse_dates test from test_usecols.py class MyTextFileReader(TextFileReader): def __init__(self): self._currow = 0 self.squeeze = False class MyCParserWrapper(CParserWrapper): def _set_noconvert_columns(self): if self.usecols_dtype == "integer": # self.usecols is a set, which is documented as unordered # but in practice, a CPython set of integers is sorted. # In other implementations this assumption does not hold. # The following code simulates a different order, which # before GH 17351 would cause the wrong columns to be # converted via the parse_dates parameter self.usecols = list(self.usecols) self.usecols.reverse() return CParserWrapper._set_noconvert_columns(self) data = """a,b,c,d,e 0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] cols = { "a": [0, 0], "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], } expected = DataFrame(cols, columns=["c_d", "a"]) parser = MyTextFileReader() parser.options = { "usecols": [0, 2, 3], "parse_dates": parse_dates, "delimiter": ",", } parser._engine = MyCParserWrapper(StringIO(data), **parser.options) result = parser.read() tm.assert_frame_equal(result, expected) def test_empty_decimal_marker(all_parsers): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = "Only length-1 decimal markers supported" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), decimal="") def test_bad_stream_exception(all_parsers, csv_dir_path): # see gh-13652 # # This test validates that both the Python engine and C engine will # raise UnicodeDecodeError instead of C engine raising ParserError # and swallowing the exception that caused read to fail. path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv") codec = codecs.lookup("utf-8") utf8 = codecs.lookup("utf-8") parser = all_parsers msg = "'utf-8' codec can't decode byte" # Stream must be binary UTF8. with open(path, "rb") as handle, codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter ) as stream: with pytest.raises(UnicodeDecodeError, match=msg): parser.read_csv(stream) def test_read_csv_local(all_parsers, csv1): prefix = "file:///" if compat.is_platform_windows() else "file://" parser = all_parsers fname = prefix + str(os.path.abspath(csv1)) result = parser.read_csv(fname, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_1000_sep(all_parsers): parser = all_parsers data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) result = parser.read_csv(StringIO(data), sep="|", thousands=",") tm.assert_frame_equal(result, expected) def test_squeeze(all_parsers): data = """\ a,1 b,2 c,3 """ parser = all_parsers index = Index(["a", "b", "c"], name=0) expected = Series([1, 2, 3], name=1, index=index) result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True) tm.assert_series_equal(result, expected) # see gh-8217 # # Series should not be a view. assert not result._is_view def test_malformed(all_parsers): # see gh-6607 parser = all_parsers data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = "Expected 3 fields in line 4, saw 5" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), header=1, comment="#") @pytest.mark.parametrize("nrows", [5, 3, None]) def test_malformed_chunks(all_parsers, nrows): data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ parser = all_parsers msg = "Expected 3 fields in line 6, saw 5" reader = parser.read_csv( StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2] ) with pytest.raises(ParserError, match=msg): reader.read(nrows) def test_unnamed_columns(all_parsers): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ parser = all_parsers expected = DataFrame( [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64, columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"], ) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_csv_mixed_type(all_parsers): data = """A,B,C a,1,2 b,3,4 c,4,5 """ parser = all_parsers expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_read_csv_low_memory_no_rows_with_index(all_parsers): # see gh-21141 parser = all_parsers if not parser.low_memory: pytest.skip("This is a low-memory specific test") data = """A,B,C 1,1,1,2 2,2,3,4 3,3,4,5 """ result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) expected = DataFrame(columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_read_csv_dataframe(all_parsers, csv1): parser = all_parsers result = parser.read_csv(csv1, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_read_csv_no_index_name(all_parsers, csv_dir_path): parser = all_parsers csv2 = os.path.join(csv_dir_path, "test2.csv") result = parser.read_csv(csv2, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"], [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"], [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"], [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"], [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"], ], columns=["A", "B", "C", "D", "E"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), ] ), ) tm.assert_frame_equal(result, expected) def test_read_csv_wrong_num_columns(all_parsers): # Too few columns. data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ parser = all_parsers msg = "Expected 6 fields in line 3, saw 7" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) def test_read_duplicate_index_explicit(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"), ) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(all_parsers): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ ( "A,B\nTrue,1\nFalse,2\nTrue,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3", dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]), DataFrame( [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]], columns=["A", "B"], ), ), ( "A,B\nTRUE,1\nFALSE,2\nTRUE,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nfoo,bar\nbar,foo", dict(true_values=["foo"], false_values=["bar"]), DataFrame([[True, False], [False, True]], columns=["A", "B"]), ), ], ) def test_parse_bool(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) def test_int_conversion(all_parsers): data = """A,B 1.0,1 2.0,2 3.0,3 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [3, 3.0]) def test_read_nrows(all_parsers, nrows): # see gh-10476 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ expected = DataFrame( [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]], columns=["index", "A", "B", "C", "D"], ) parser = all_parsers result = parser.read_csv(StringIO(data), nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [1.2, "foo", -1]) def test_read_nrows_bad(all_parsers, nrows): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ msg = r"'nrows' must be an integer >=0" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), nrows=nrows) @pytest.mark.parametrize("index_col", [0, "index"]) def test_read_chunksize_with_index(all_parsers, index_col): parser = all_parsers data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2) expected = DataFrame( [ ["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15], ["qux", 12, 13, 14, 15], ["foo2", 12, 13, 14, 15], ["bar2", 12, 13, 14, 15], ], columns=["index", "A", "B", "C", "D"], ) expected = expected.set_index("index") chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) @pytest.mark.parametrize("chunksize", [1.3, "foo", 0]) def test_read_chunksize_bad(all_parsers, chunksize): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers msg = r"'chunksize' must be an integer >=1" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), chunksize=chunksize) @pytest.mark.parametrize("chunksize", [2, 8]) def test_read_chunksize_and_nrows(all_parsers, chunksize): # see gh-15755 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), expected) def test_read_chunksize_and_nrows_changing_size(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5]) with pytest.raises(StopIteration, match=""): reader.get_chunk(size=3) def test_get_chunk_passed_chunksize(all_parsers): parser = all_parsers data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" reader = parser.read_csv(StringIO(data), chunksize=2) result = reader.get_chunk() expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)]) def test_read_chunksize_compat(all_parsers, kwargs): # see gh-12185 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs) result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), result) def test_read_chunksize_jagged_names(all_parsers): # see gh-23509 parser = all_parsers data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)]) expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10]) reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4) result = concat(reader) tm.assert_frame_equal(result, expected) def test_read_data_list(all_parsers): parser = all_parsers kwargs = dict(index_col=0) data = "A,B,C\nfoo,1,2,3\nbar,4,5,6" data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]] expected = parser.read_csv(StringIO(data), **kwargs) parser = TextParser(data_list, chunksize=2, **kwargs) result = parser.read() tm.assert_frame_equal(result, expected) def test_iterator(all_parsers): # see gh-6607 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) expected = parser.read_csv(StringIO(data), **kwargs) reader = parser.read_csv(StringIO(data), iterator=True, **kwargs) first_chunk = reader.read(3) tm.assert_frame_equal(first_chunk, expected[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, expected[3:]) def test_iterator2(all_parsers): parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(result[0], expected) def test_reader_list(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) def test_reader_list_skiprows(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[1:3]) def test_iterator_stop_on_chunksize(all_parsers): # gh-3967: stopping iteration when chunksize is specified parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), chunksize=1) result = list(reader) assert len(result) == 3 expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(concat(result), expected) @pytest.mark.parametrize( "kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)] ) def test_iterator_skipfooter_errors(all_parsers, kwargs): msg = "'skipfooter' not supported for 'iteration'" parser = all_parsers data = "a\n1\n2" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, **kwargs) def test_nrows_skipfooter_errors(all_parsers): msg = "'skipfooter' not supported with 'nrows'" data = "a\n1\n2\n3\n4\n5\n6" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, nrows=5) @pytest.mark.parametrize( "data,kwargs,expected", [ ( """foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """, dict(index_col=0, names=["index", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"), columns=["A", "B", "C", "D"], ), ), ( """foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """, dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ], names=["index1", "index2"], ), columns=["A", "B", "C", "D"], ), ), ], ) def test_pass_names_with_index(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("index_col", [[0, 1], [1, 0]]) def test_multi_index_no_level_names(all_parsers, index_col): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ headless_data = "\n".join(data.split("\n")[1:]) names = ["A", "B", "C", "D"] parser = all_parsers result = parser.read_csv( StringIO(headless_data), index_col=index_col, header=None, names=names ) expected = parser.read_csv(StringIO(data), index_col=index_col) # No index names in headless data. expected.index.names = [None] * 2 tm.assert_frame_equal(result, expected) def test_multi_index_no_level_names_implicit(all_parsers): parser = all_parsers data = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ] ), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected,header", [ ("a,b", DataFrame(columns=["a", "b"]), [0]), ( "a,b\nc,d", DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])), [0, 1], ), ], ) @pytest.mark.parametrize("round_trip", [True, False]) def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip): # see gh-14545 parser = all_parsers data = expected.to_csv(index=False) if round_trip else data result = parser.read_csv(StringIO(data), header=header) tm.assert_frame_equal(result, expected) def test_no_unnamed_index(all_parsers): parser = all_parsers data = """ id c0 c1 c2 0 1 0 a b 1 2 0 c d 2 2 2 e f """ result = parser.read_csv(StringIO(data), sep=" ") expected = DataFrame( [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]], columns=["Unnamed: 0", "id", "c0", "c1", "c2"], ) tm.assert_frame_equal(result, expected) def test_read_csv_parse_simple_list(all_parsers): parser = all_parsers data = """foo bar baz qux foo foo bar""" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) tm.assert_frame_equal(result, expected) @tm.network def test_url(all_parsers, csv_dir_path): # TODO: FTP testing parser = all_parsers kwargs = dict(sep="\t") url = ( "https://raw.github.com/pandas-dev/pandas/master/" "pandas/tests/io/parser/data/salaries.csv" ) url_result = parser.read_csv(url, **kwargs) local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) tm.assert_frame_equal(url_result, local_result) @pytest.mark.slow def test_local_file(all_parsers, csv_dir_path): parser = all_parsers kwargs = dict(sep="\t") local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) url = "file://localhost/" + local_path try: url_result = parser.read_csv(url, **kwargs) tm.assert_frame_equal(url_result, local_result) except URLError: # Fails on some systems. pytest.skip("Failing on: " + " ".join(platform.uname())) def test_path_path_lib(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_path_local_path(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_localpath( df.to_csv, lambda p: parser.read_csv(p, index_col=0) ) tm.assert_frame_equal(df, result) def test_nonexistent_path(all_parsers): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError # GH#29233 "File foo" instead of "File b'foo'" parser = all_parsers path = f"{tm.rands(10)}.csv" msg = r"\[Errno 2\]" with pytest.raises(FileNotFoundError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename @td.skip_if_windows # os.chmod does not work in windows def test_no_permission(all_parsers): # GH 23784 parser = all_parsers msg = r"\[Errno 13\]" with tm.ensure_clean() as path: os.chmod(path, 0) # make file unreadable # verify that this process cannot open the file (not running as sudo) try: with open(path): pass pytest.skip("Running as sudo.") except PermissionError: pass with pytest.raises(PermissionError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename def test_missing_trailing_delimiters(all_parsers): parser = all_parsers data = """A,B,C,D 1,2,3,4 1,3,3, 1,4,5""" result = parser.read_csv(StringIO(data)) expected = DataFrame( [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]], columns=["A", "B", "C", "D"], ) tm.assert_frame_equal(result, expected) def test_skip_initial_space(all_parsers): data = ( '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, " "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, " "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, " "0.212036, 14.7674, 41.605, -9999.0, -9999.0, " "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128" ) parser = all_parsers result = parser.read_csv( StringIO(data), names=list(range(33)), header=None, na_values=["-9999.0"], skipinitialspace=True, ) expected = DataFrame( [ [ "09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, 1.00361, 1.12551, 330.65659, 355626618.16711, 73.48821, 314.11625, 1917.09447, 179.71425, 80.0, 240.0, -350, 70.06056, 344.9837, 1, 1, -0.689265, -0.692787, 0.212036, 14.7674, 41.605, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0, 12, 128, ] ] ) tm.assert_frame_equal(result, expected) def test_trailing_delimiters(all_parsers): # see gh-2442 data = """A,B,C 1,2,3, 4,5,6, 7,8,9,""" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=False) expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_escapechar(all_parsers): # https://stackoverflow.com/questions/13824840/feature-request-for- # pandas-read-csv data = '''SEARCH_TERM,ACTUAL_URL "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa parser = all_parsers result = parser.read_csv( StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8" ) assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series' tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"])) def test_int64_min_issues(all_parsers): # see gh-2599 parser = all_parsers data = "A,B\n0,0\n0," result = parser.read_csv(StringIO(data)) expected = DataFrame({"A": [0, 0], "B": [0, np.nan]}) tm.assert_frame_equal(result, expected) def test_parse_integers_above_fp_precision(all_parsers): data = """Numbers 17007000002000191 17007000002000191 17007000002000191 17007000002000191 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000194""" parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( { "Numbers": [ 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000194, ] } ) tm.assert_frame_equal(result, expected) def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) # Coercions should work without warnings. with tm.assert_produces_warning(None): result = parser.read_csv(StringIO(data)) assert type(result.a[0]) is np.float64 assert result.a.dtype == np.float def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["a", "b"] + integers) # see gh-3866: if chunks are different types and can't # be coerced using numerical types, then issue warning. if parser.engine == "c" and parser.low_memory: warning_type = DtypeWarning with tm.assert_produces_warning(warning_type): df = parser.read_csv(StringIO(data)) assert df.a.dtype == np.object @pytest.mark.parametrize("sep", [" ", r"\s+"]) def test_integer_overflow_bug(all_parsers, sep): # see gh-2601 data = "65248E10 11\n55555E55 22\n" parser = all_parsers result = parser.read_csv(StringIO(data), header=None, sep=sep) expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]]) tm.assert_frame_equal(result, expected) def test_catch_too_many_names(all_parsers): # see gh-5156 data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" parser = all_parsers msg = ( "Too many columns specified: expected 4 and found 3" if parser.engine == "c" else "Number of passed names did not match " "number of header fields in the file" ) with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) def test_ignore_leading_whitespace(all_parsers): # see gh-3374, gh-6607 parser = all_parsers data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9" result = parser.read_csv(StringIO(data), sep=r"\s+") expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_chunk_begins_with_newline_whitespace(all_parsers): # see gh-10022 parser = all_parsers data = "\n hello\nworld\n" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([" hello", "world"]) tm.assert_frame_equal(result, expected) def test_empty_with_index(all_parsers): # see gh-10184 data = "x,y" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame(columns=["y"], index=Index([], name="x")) tm.assert_frame_equal(result, expected) def test_empty_with_multi_index(all_parsers): # see gh-10467 data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=["x", "y"]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"]) ) tm.assert_frame_equal(result, expected) def test_empty_with_reversed_multi_index(all_parsers): data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=[1, 0]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"]) ) tm.assert_frame_equal(result, expected) def test_float_parser(all_parsers): # see gh-9565 parser = all_parsers data = "45e-1,4.5,45.,inf,-inf" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([[float(s) for s in data.split(",")]]) tm.assert_frame_equal(result, expected) def test_scientific_no_exponent(all_parsers): # see gh-12215 df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]}) data = df.to_csv(index=False) parser = all_parsers for precision in parser.float_precision_choices: df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision) tm.assert_frame_equal(df_roundtrip, df) @pytest.mark.parametrize("conv", [None, np.int64, np.uint64]) def test_int64_overflow(all_parsers, conv): data = """ID 00013007854817840016671868 00013007854817840016749251 00013007854817840016754630 00013007854817840016781876 00013007854817840017028824 00013007854817840017963235 00013007854817840018860166""" parser = all_parsers if conv is None: # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. result = parser.read_csv(StringIO(data)) expected = DataFrame( [ "00013007854817840016671868", "00013007854817840016749251", "00013007854817840016754630", "00013007854817840016781876", "00013007854817840017028824", "00013007854817840017963235", "00013007854817840018860166", ], columns=["ID"], ) tm.assert_frame_equal(result, expected) else: # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. msg = ( "(Python int too large to convert to C long)|" "(long too big to convert)|" "(int too big to convert)" ) with pytest.raises(OverflowError, match=msg): parser.read_csv(StringIO(data), converters={"ID": conv}) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min] ) def test_int64_uint64_range(all_parsers, val): # These numbers fall right inside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([val]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1] ) def test_outside_int64_uint64_range(all_parsers, val): # These numbers fall just outside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([str(val)]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]]) def test_numeric_range_too_wide(all_parsers, exp_data): # No numerical dtype can hold both negative and uint64 # values, so they should be cast as string. parser = all_parsers data = "\n".join(exp_data) expected = DataFrame(exp_data) result = parser.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("iterator", [True, False]) def test_empty_with_nrows_chunksize(all_parsers, iterator): # see gh-9535 parser = all_parsers expected = DataFrame(columns=["foo", "bar"]) nrows = 10 data = StringIO("foo,bar\n") if iterator: result = next(iter(parser.read_csv(data, chunksize=nrows))) else: result = parser.read_csv(data, nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected,msg", [ # gh-10728: WHITESPACE_LINE ( "a,b,c\n4,5,6\n ", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # gh-10548: EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_CRNL_NOP ( "a,b,c\n4,5,6\n\r", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_COMMENT ( "a,b,c\n4,5,6#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # SKIP_LINE ( "a,b,c\n4,5,6\nskipme", dict(skiprows=[2]), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#", skip_blank_lines=False), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # IN_FIELD ( "a,b,c\n4,5,6\n ", dict(skip_blank_lines=False), DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]), None, ), # EAT_CRNL ( "a,b,c\n4,5,6\n\r", dict(skip_blank_lines=False), DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]), None, ), # ESCAPED_CHAR ( "a,b,c\n4,5,6\n\\", dict(escapechar="\\"), None, "(EOF following escape character)|(unexpected end of data)", ), # ESCAPE_IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"\\', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), # IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), ], ids=[ "whitespace-line", "eat-line-comment", "eat-crnl-nop", "eat-comment", "skip-line", "eat-line-comment", "in-field", "eat-crnl", "escaped-char", "escape-in-quoted-field", "in-quoted-field", ], ) def test_eof_states(all_parsers, data, kwargs, expected, msg): # see gh-10728, gh-10548 parser = all_parsers if expected is None: with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]]) def test_uneven_lines_with_usecols(all_parsers, usecols): # see gh-12203 parser = all_parsers data = r"""a,b,c 0,1,2 3,4,5,6,7 8,9,10""" if usecols is None: # Make sure that an error is still raised # when the "usecols" parameter is not provided. msg = r"Expected \d+ fields in line \d+, saw \d+" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) else: expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]}) result = parser.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ # First, check to see that the response of parser when faced with no # provided columns raises the correct error, with or without usecols. ("", dict(), None), ("", dict(usecols=["X"]), None), ( ",,", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"], index=[0], dtype=np.float64), ), ( "", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"]), ), ], ) def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): # see gh-12493 parser = all_parsers if expected is None: msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs,expected", [ # gh-8661, gh-8679: this should ignore six lines, including # lines with trailing whitespace and blank lines. ( dict( header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True, ), DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]), ), # gh-8983: test skipping set of rows after a row with trailing spaces. ( dict( delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True ), DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}), ), ], ) def test_trailing_spaces(all_parsers, kwargs, expected): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa parser = all_parsers result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) tm.assert_frame_equal(result, expected) def test_raise_on_sep_with_delim_whitespace(all_parsers): # see gh-6607 data = "a b c\n1 2 3" parser = all_parsers with pytest.raises(ValueError, match="you can only specify one"): parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True) @pytest.mark.parametrize("delim_whitespace", [True, False]) def test_single_char_leading_whitespace(all_parsers, delim_whitespace): # see gh-9710 parser = all_parsers data = """\ MyColumn a b a b\n""" expected = DataFrame({"MyColumn": list("abab")}) result = parser.read_csv( StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "sep,skip_blank_lines,exp_data", [ (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), ( ",", False, [ [1.0, 2.0, 4.0], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [5.0, np.nan, 10.0], [np.nan, np.nan, np.nan], [-70.0, 0.4, 1.0], ], ), ], ) def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data): parser = all_parsers data = """\ A,B,C 1,2.,4. 5.,NaN,10.0 -70,.4,1 """ if sep == r"\s+": data = data.replace(",", " ") result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines) expected = DataFrame(exp_data, columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_whitespace_lines(all_parsers): parser = all_parsers data = """ \t \t\t \t A,B,C \t 1,2.,4. 5.,NaN,10.0 """ expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected", [ ( """ A B C D a 1 2 3 4 b 1 2 3 4 c 1 2 3 4 """, DataFrame( [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], columns=["A", "B", "C", "D"], index=["a", "b", "c"], ), ), ( " a b c\n1 2 3 \n4 5 6\n 7 8 9", DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]), ), ], ) def test_whitespace_regex_separator(all_parsers, data, expected): # see gh-6607 parser = all_parsers result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) def test_verbose_read(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 one,1,2,3 ,1,2,3 one,1,2,3 ,1,2,3 ,1,2,3 one,1,2,3 two,1,2,3""" # Engines are verbose in different ways. parser.read_csv(StringIO(data), verbose=True) captured = capsys.readouterr() if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 3 NA values in column a\n" def test_verbose_read2(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 two,1,2,3 three,1,2,3 four,1,2,3 five,1,2,3 ,1,2,3 seven,1,2,3 eight,1,2,3""" parser.read_csv(StringIO(data), verbose=True, index_col=0) captured = capsys.readouterr() # Engines are verbose in different ways. if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 1 NA values in column a\n" def test_iteration_open_handle(all_parsers): parser = all_parsers kwargs = dict(squeeze=True, header=None) with tm.ensure_clean() as path: with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") with open(path, "r") as f: for line in f: if "CCC" in line: break result = parser.read_csv(f, **kwargs) expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "data,thousands,decimal", [ ( """A|B|C 1|2,334.01|5 10|13|10. """, ",", ".", ), ( """A|B|C 1|2.334,01|5 10|13|10, """, ".", ",", ), ], ) def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal): parser = all_parsers expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) result = parser.read_csv( StringIO(data), sep="|", thousands=thousands, decimal=decimal ) tm.assert_frame_equal(result, expected) def test_euro_decimal_format(all_parsers): parser = all_parsers data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" result = parser.read_csv(StringIO(data), sep=";", decimal=",") expected = DataFrame( [ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], ], columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_inf_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,inf b,-inf c,+Inf d,-Inf e,INF f,-INF g,+INf h,-INf i,inF j,-inF""" expected = DataFrame( {"A": [float("inf"), float("-inf")] * 5}, index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_infinity_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,Infinity b,-Infinity c,+Infinity """ expected = DataFrame( {"A": [float("infinity"), float("-infinity"), float("+infinity")]}, index=["a", "b", "c"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5]) def test_raise_on_no_columns(all_parsers, nrows): parser = all_parsers data = "\n" * nrows msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data)) def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers expected = DataFrame( {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]} ) result = parser.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(result, expected) def test_null_byte_char(all_parsers): # see gh-2741 data = "\x00,foo" names = ["a", "b"] parser = all_parsers if parser.engine == "c": expected = DataFrame([[np.nan, "foo"]], columns=names) out = parser.read_csv(StringIO(data), names=names) tm.assert_frame_equal(out, expected) else: msg = "NULL byte detected" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), names=names) def test_temporary_file(all_parsers): # see gh-13398 parser = all_parsers data = "0 0" with tm.ensure_clean(mode="w+", return_filelike=True) as new_file: new_file.write(data) new_file.flush() new_file.seek(0) result = parser.read_csv(new_file, sep=r"\s+", header=None) expected = DataFrame([[0, 0]]) tm.assert_frame_equal(result, expected) def test_internal_eof_byte(all_parsers): # see gh-5500 parser = all_parsers data = "a,b\n1\x1a,2" expected = DataFrame([["1\x1a", 2]], columns=["a", "b"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_internal_eof_byte_to_file(all_parsers): # see gh-16559 parser = all_parsers data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) path = f"__{tm.rands(10)}__.csv" with tm.ensure_clean(path) as path: with open(path, "wb") as f: f.write(data) result = parser.read_csv(path) tm.assert_frame_equal(result, expected) def test_sub_character(all_parsers, csv_dir_path): # see gh-16893 filename = os.path.join(csv_dir_path, "sub_char.csv") expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) parser = all_parsers result = parser.read_csv(filename) tm.assert_frame_equal(result, expected) def test_file_handle_string_io(all_parsers): # gh-14418 # # Don't close user provided file handles. parser = all_parsers data = "a,b\n1,2" fh = StringIO(data) parser.read_csv(fh) assert not fh.closed def test_file_handles_with_open(all_parsers, csv1): # gh-14418 # # Don't close user provided file handles. parser = all_parsers for mode in ["r", "rb"]: with open(csv1, mode) as f: parser.read_csv(f) assert not f.closed def test_invalid_file_buffer_class(all_parsers): # see gh-15337 class InvalidBuffer: pass parser = all_parsers msg = "Invalid file path or buffer object type" with pytest.raises(ValueError, match=msg): parser.read_csv(InvalidBuffer()) def test_invalid_file_buffer_mock(all_parsers): # see gh-15337 parser = all_parsers msg = "Invalid file path or buffer object type" class Foo: pass with pytest.raises(ValueError, match=msg): parser.read_csv(Foo()) def test_valid_file_buffer_seems_invalid(all_parsers): # gh-16135: we want to ensure that "tell" and "seek" # aren't actually being used when we call `read_csv` # # Thus, while the object may look "invalid" (these # methods are attributes of the `StringIO` class), # it is still a valid file-object for our purposes. class NoSeekTellBuffer(StringIO): def tell(self): raise AttributeError("No tell method") def seek(self, pos, whence=0): raise AttributeError("No seek method") data = "a\n1" parser = all_parsers expected = DataFrame({"a": [1]}) result = parser.read_csv(NoSeekTellBuffer(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs", [dict(), dict(error_bad_lines=True)], # Default is True. # Explicitly pass in. ) @pytest.mark.parametrize( "warn_kwargs", [dict(), dict(warn_bad_lines=True), dict(warn_bad_lines=False)] ) def test_error_bad_lines(all_parsers, kwargs, warn_kwargs): # see gh-15925 parser = all_parsers kwargs.update(**warn_kwargs) data = "a\n1\n1,2,3\n4\n5,6,7" msg = "Expected 1 fields in line 3, saw 3" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) def test_warn_bad_lines(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert "Skipping line 3" in captured.err assert "Skipping line 5" in captured.err def test_suppress_error_output(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv( StringIO(data), error_bad_lines=False, warn_bad_lines=False ) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert captured.err == "" @pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"]) def test_filename_with_special_chars(all_parsers, filename): # see gh-15086. parser = all_parsers df = DataFrame({"a": [1, 2, 3]}) with tm.ensure_clean(filename) as path: df.to_csv(path, index=False) result = parser.read_csv(path) tm.assert_frame_equal(result, df) def test_read_csv_memory_growth_chunksize(all_parsers): # see gh-24805 # # Let's just make sure that we don't crash # as we iteratively process all chunks. parser = all_parsers with tm.ensure_clean() as path: with open(path, "w") as f: for i in range(1000): f.write(str(i) + "\n") result = parser.read_csv(path, chunksize=20) for _ in result: pass def test_read_csv_raises_on_header_prefix(all_parsers): # gh-27394 parser = all_parsers msg = "Argument prefix must be None if argument header is not None" s = StringIO("0,1\n2,3") with pytest.raises(ValueError, match=msg): parser.read_csv(s, header=0, prefix="_X") def test_read_table_equivalency_to_read_csv(all_parsers): # see gh-21948 # As of 0.25.0, read_table is undeprecated parser = all_parsers data = "a\tb\n1\t2\n3\t4" expected = parser.read_csv(StringIO(data), sep="\t") result = parser.read_table(StringIO(data)) tm.assert_frame_equal(result, expected) def test_first_row_bom(all_parsers): # see gh-26545 parser = all_parsers data = '''\ufeff"Head1" "Head2" "Head3"''' result = parser.read_csv(StringIO(data), delimiter="\t") expected = DataFrame(columns=["Head1", "Head2", "Head3"]) tm.assert_frame_equal(result, expected) def test_integer_precision(all_parsers): # Gh 7072 s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765 5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389""" parser = all_parsers result = parser.read_csv(StringIO(s), header=None)[4] expected = Series([4321583677327450765, 4321113141090630389], name=4) tm.assert_series_equal(result, expected) def test_file_descriptor_leak(all_parsers): # GH 31488 parser = all_parsers with tm.ensure_clean() as path: def test(): with pytest.raises(EmptyDataError, match="No columns to parse from file"): parser.read_csv(path) td.check_file_leaks(test)() @pytest.mark.parametrize("nrows", range(1, 6)) def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): # GH 28071 ref = DataFrame( [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]], columns=list("ab"), ) csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4" parser = all_parsers df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False) tm.assert_frame_equal(df, ref[:nrows]) def test_no_header_two_extra_columns(all_parsers): # GH 26218 column_names = ["one", "two", "three"] ref = DataFrame([["foo", "bar", "baz"]], columns=column_names) stream = StringIO("foo,bar,baz,bam,blah") parser = all_parsers df = parser.read_csv(stream, header=None, names=column_names, index_col=False) tm.assert_frame_equal(df, ref)
TomAugspurger/pandas
pandas/tests/io/parser/test_common.py
pandas/tests/arrays/boolean/test_ops.py
from contextlib import contextmanager from pandas.plotting._core import _get_plot_backend def table(ax, data, rowLabels=None, colLabels=None, **kwargs): """ Helper function to convert DataFrame and Series to matplotlib.table. Parameters ---------- ax : Matplotlib axes object data : DataFrame or Series Data for table contents. **kwargs Keyword arguments to be passed to matplotlib.table.table. If `rowLabels` or `colLabels` is not specified, data index or column name will be used. Returns ------- matplotlib table object """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.table( ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs ) def register(): """ Register pandas formatters and converters with matplotlib. This function modifies the global ``matplotlib.units.registry`` dictionary. pandas adds custom converters for * pd.Timestamp * pd.Period * np.datetime64 * datetime.datetime * datetime.date * datetime.time See Also -------- deregister_matplotlib_converters : Remove pandas formatters and converters. """ plot_backend = _get_plot_backend("matplotlib") plot_backend.register() def deregister(): """ Remove pandas formatters and converters. Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for types pandas overwrites, like ``datetime.datetime``, are restored to their original value. See Also -------- register_matplotlib_converters : Register pandas formatters and converters with matplotlib. """ plot_backend = _get_plot_backend("matplotlib") plot_backend.deregister() def scatter_matrix( frame, alpha=0.5, figsize=None, ax=None, grid=False, diagonal="hist", marker=".", density_kwds=None, hist_kwds=None, range_padding=0.05, **kwargs, ): """ Draw a matrix of scatter plots. Parameters ---------- frame : DataFrame alpha : float, optional Amount of transparency applied. figsize : (float,float), optional A tuple (width, height) in inches. ax : Matplotlib axis object, optional grid : bool, optional Setting this to True will show the grid. diagonal : {'hist', 'kde'} Pick between 'kde' and 'hist' for either Kernel Density Estimation or Histogram plot in the diagonal. marker : str, optional Matplotlib marker type, default '.'. density_kwds : keywords Keyword arguments to be passed to kernel density estimate plot. hist_kwds : keywords Keyword arguments to be passed to hist function. range_padding : float, default 0.05 Relative extension of axis range in x and y with respect to (x_max - x_min) or (y_max - y_min). **kwargs Keyword arguments to be passed to scatter function. Returns ------- numpy.ndarray A matrix of scatter plots. Examples -------- .. plot:: :context: close-figs >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) >>> pd.plotting.scatter_matrix(df, alpha=0.2) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.scatter_matrix( frame=frame, alpha=alpha, figsize=figsize, ax=ax, grid=grid, diagonal=diagonal, marker=marker, density_kwds=density_kwds, hist_kwds=hist_kwds, range_padding=range_padding, **kwargs, ) def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): """ Plot a multidimensional dataset in 2D. Each Series in the DataFrame is represented as a evenly distributed slice on a circle. Each data point is rendered in the circle according to the value on each Series. Highly correlated `Series` in the `DataFrame` are placed closer on the unit circle. RadViz allow to project a N-dimensional data set into a 2D space where the influence of each dimension can be interpreted as a balance between the influence of all dimensions. More info available at the `original article <https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_ describing RadViz. Parameters ---------- frame : `DataFrame` pandas object holding the data. class_column : str Column name containing the name of the data point category. ax : :class:`matplotlib.axes.Axes`, optional A plot instance to which to add the information. color : list[str] or tuple[str], optional Assign a color to each category. Example: ['blue', 'green']. colormap : str or :class:`matplotlib.colors.Colormap`, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. **kwds Options to pass to matplotlib scatter plotting method. Returns ------- class:`matplotlib.axes.Axes` See Also -------- plotting.andrews_curves : Plot clustering visualization. Examples -------- .. plot:: :context: close-figs >>> df = pd.DataFrame( ... { ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6], ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6], ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0], ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2], ... 'Category': [ ... 'virginica', ... 'virginica', ... 'setosa', ... 'virginica', ... 'virginica', ... 'versicolor', ... 'versicolor', ... 'setosa', ... 'virginica', ... 'setosa' ... ] ... } ... ) >>> pd.plotting.radviz(df, 'Category') """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.radviz( frame=frame, class_column=class_column, ax=ax, color=color, colormap=colormap, **kwds, ) def andrews_curves( frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwargs ): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0). class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes. colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. **kwargs Options to pass to matplotlib plotting method. Returns ------- class:`matplotlip.axis.Axes` Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.andrews_curves(df, 'Name') """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.andrews_curves( frame=frame, class_column=class_column, ax=ax, samples=samples, color=color, colormap=colormap, **kwargs, ) def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): """ Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relaying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series pandas Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be greater or equal than the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- DataFrame.plot : Basic plotting for DataFrame objects. Series.plot : Basic plotting for Series objects. Examples -------- This example draws a basic bootstap plot for a Series. .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> pd.plotting.bootstrap_plot(s) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.bootstrap_plot( series=series, fig=fig, size=size, samples=samples, **kwds ) def parallel_coordinates( frame, class_column, cols=None, ax=None, color=None, use_columns=False, xticks=None, colormap=None, axvlines=True, axvlines_kwds=None, sort_labels=False, **kwargs, ): """ Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names. cols : list, optional A list of column names to use. ax : matplotlib.axis, optional Matplotlib axis object. color : list or tuple, optional Colors to use for the different classes. use_columns : bool, optional If true, columns will be used as xticks. xticks : list or tuple, optional A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines. sort_labels : bool, default False Sort class_column labels, useful when assigning colors. **kwargs Options to pass to matplotlib plotting method. Returns ------- class:`matplotlib.axis.Axes` Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') ... ) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.parallel_coordinates( frame=frame, class_column=class_column, cols=cols, ax=ax, color=color, use_columns=use_columns, xticks=xticks, colormap=colormap, axvlines=axvlines, axvlines_kwds=axvlines_kwds, sort_labels=sort_labels, **kwargs, ) def lag_plot(series, lag=1, ax=None, **kwds): """ Lag plot for time series. Parameters ---------- series : Time series lag : lag of the scatter plot, default 1 ax : Matplotlib axis object, optional **kwds Matplotlib scatter method keyword arguments. Returns ------- class:`matplotlib.axis.Axes` Examples -------- Lag plots are most commonly used to look for patterns in time series data. Given the following time series .. plot:: :context: close-figs >>> np.random.seed(5) >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) >>> s = pd.Series(x) >>> s.plot() A lag plot with ``lag=1`` returns .. plot:: :context: close-figs >>> pd.plotting.lag_plot(s, lag=1) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) def autocorrelation_plot(series, ax=None, **kwargs): """ Autocorrelation plot for time series. Parameters ---------- series : Time series ax : Matplotlib axis object, optional **kwargs Options to pass to matplotlib plotting method. Returns ------- class:`matplotlib.axis.Axes` Examples -------- The horizontal lines in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. .. plot:: :context: close-figs >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) >>> pd.plotting.autocorrelation_plot(s) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) class _Options(dict): """ Stores pandas plotting options. Allows for parameter aliasing so you can just use parameter names that are the same as the plot function parameters, but is stored in a canonical format that makes it easy to breakdown into groups later. """ # alias so the names are same as plotting method parameter names _ALIASES = {"x_compat": "xaxis.compat"} _DEFAULT_KEYS = ["xaxis.compat"] def __init__(self, deprecated=False): self._deprecated = deprecated super().__setitem__("xaxis.compat", False) def __getitem__(self, key): key = self._get_canonical_key(key) if key not in self: raise ValueError(f"{key} is not a valid pandas plotting option") return super().__getitem__(key) def __setitem__(self, key, value): key = self._get_canonical_key(key) return super().__setitem__(key, value) def __delitem__(self, key): key = self._get_canonical_key(key) if key in self._DEFAULT_KEYS: raise ValueError(f"Cannot remove default parameter {key}") return super().__delitem__(key) def __contains__(self, key) -> bool: key = self._get_canonical_key(key) return super().__contains__(key) def reset(self): """ Reset the option store to its initial state Returns ------- None """ self.__init__() def _get_canonical_key(self, key): return self._ALIASES.get(key, key) @contextmanager def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """ old_value = self[key] try: self[key] = value yield self finally: self[key] = old_value plot_params = _Options()
""" Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ import codecs import csv from datetime import datetime from io import StringIO import os import platform from urllib.error import URLError import numpy as np import pytest from pandas._libs.tslib import Timestamp from pandas.errors import DtypeWarning, EmptyDataError, ParserError import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex, Series, compat, concat import pandas._testing as tm from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser def test_override_set_noconvert_columns(): # see gh-17351 # # Usecols needs to be sorted in _set_noconvert_columns based # on the test_usecols_with_parse_dates test from test_usecols.py class MyTextFileReader(TextFileReader): def __init__(self): self._currow = 0 self.squeeze = False class MyCParserWrapper(CParserWrapper): def _set_noconvert_columns(self): if self.usecols_dtype == "integer": # self.usecols is a set, which is documented as unordered # but in practice, a CPython set of integers is sorted. # In other implementations this assumption does not hold. # The following code simulates a different order, which # before GH 17351 would cause the wrong columns to be # converted via the parse_dates parameter self.usecols = list(self.usecols) self.usecols.reverse() return CParserWrapper._set_noconvert_columns(self) data = """a,b,c,d,e 0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] cols = { "a": [0, 0], "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], } expected = DataFrame(cols, columns=["c_d", "a"]) parser = MyTextFileReader() parser.options = { "usecols": [0, 2, 3], "parse_dates": parse_dates, "delimiter": ",", } parser._engine = MyCParserWrapper(StringIO(data), **parser.options) result = parser.read() tm.assert_frame_equal(result, expected) def test_empty_decimal_marker(all_parsers): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = "Only length-1 decimal markers supported" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), decimal="") def test_bad_stream_exception(all_parsers, csv_dir_path): # see gh-13652 # # This test validates that both the Python engine and C engine will # raise UnicodeDecodeError instead of C engine raising ParserError # and swallowing the exception that caused read to fail. path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv") codec = codecs.lookup("utf-8") utf8 = codecs.lookup("utf-8") parser = all_parsers msg = "'utf-8' codec can't decode byte" # Stream must be binary UTF8. with open(path, "rb") as handle, codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter ) as stream: with pytest.raises(UnicodeDecodeError, match=msg): parser.read_csv(stream) def test_read_csv_local(all_parsers, csv1): prefix = "file:///" if compat.is_platform_windows() else "file://" parser = all_parsers fname = prefix + str(os.path.abspath(csv1)) result = parser.read_csv(fname, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_1000_sep(all_parsers): parser = all_parsers data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) result = parser.read_csv(StringIO(data), sep="|", thousands=",") tm.assert_frame_equal(result, expected) def test_squeeze(all_parsers): data = """\ a,1 b,2 c,3 """ parser = all_parsers index = Index(["a", "b", "c"], name=0) expected = Series([1, 2, 3], name=1, index=index) result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True) tm.assert_series_equal(result, expected) # see gh-8217 # # Series should not be a view. assert not result._is_view def test_malformed(all_parsers): # see gh-6607 parser = all_parsers data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = "Expected 3 fields in line 4, saw 5" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), header=1, comment="#") @pytest.mark.parametrize("nrows", [5, 3, None]) def test_malformed_chunks(all_parsers, nrows): data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ parser = all_parsers msg = "Expected 3 fields in line 6, saw 5" reader = parser.read_csv( StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2] ) with pytest.raises(ParserError, match=msg): reader.read(nrows) def test_unnamed_columns(all_parsers): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ parser = all_parsers expected = DataFrame( [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64, columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"], ) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_csv_mixed_type(all_parsers): data = """A,B,C a,1,2 b,3,4 c,4,5 """ parser = all_parsers expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_read_csv_low_memory_no_rows_with_index(all_parsers): # see gh-21141 parser = all_parsers if not parser.low_memory: pytest.skip("This is a low-memory specific test") data = """A,B,C 1,1,1,2 2,2,3,4 3,3,4,5 """ result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) expected = DataFrame(columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_read_csv_dataframe(all_parsers, csv1): parser = all_parsers result = parser.read_csv(csv1, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_read_csv_no_index_name(all_parsers, csv_dir_path): parser = all_parsers csv2 = os.path.join(csv_dir_path, "test2.csv") result = parser.read_csv(csv2, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"], [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"], [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"], [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"], [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"], ], columns=["A", "B", "C", "D", "E"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), ] ), ) tm.assert_frame_equal(result, expected) def test_read_csv_wrong_num_columns(all_parsers): # Too few columns. data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ parser = all_parsers msg = "Expected 6 fields in line 3, saw 7" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) def test_read_duplicate_index_explicit(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"), ) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(all_parsers): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ ( "A,B\nTrue,1\nFalse,2\nTrue,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3", dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]), DataFrame( [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]], columns=["A", "B"], ), ), ( "A,B\nTRUE,1\nFALSE,2\nTRUE,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nfoo,bar\nbar,foo", dict(true_values=["foo"], false_values=["bar"]), DataFrame([[True, False], [False, True]], columns=["A", "B"]), ), ], ) def test_parse_bool(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) def test_int_conversion(all_parsers): data = """A,B 1.0,1 2.0,2 3.0,3 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [3, 3.0]) def test_read_nrows(all_parsers, nrows): # see gh-10476 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ expected = DataFrame( [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]], columns=["index", "A", "B", "C", "D"], ) parser = all_parsers result = parser.read_csv(StringIO(data), nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [1.2, "foo", -1]) def test_read_nrows_bad(all_parsers, nrows): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ msg = r"'nrows' must be an integer >=0" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), nrows=nrows) @pytest.mark.parametrize("index_col", [0, "index"]) def test_read_chunksize_with_index(all_parsers, index_col): parser = all_parsers data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2) expected = DataFrame( [ ["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15], ["qux", 12, 13, 14, 15], ["foo2", 12, 13, 14, 15], ["bar2", 12, 13, 14, 15], ], columns=["index", "A", "B", "C", "D"], ) expected = expected.set_index("index") chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) @pytest.mark.parametrize("chunksize", [1.3, "foo", 0]) def test_read_chunksize_bad(all_parsers, chunksize): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers msg = r"'chunksize' must be an integer >=1" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), chunksize=chunksize) @pytest.mark.parametrize("chunksize", [2, 8]) def test_read_chunksize_and_nrows(all_parsers, chunksize): # see gh-15755 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), expected) def test_read_chunksize_and_nrows_changing_size(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5]) with pytest.raises(StopIteration, match=""): reader.get_chunk(size=3) def test_get_chunk_passed_chunksize(all_parsers): parser = all_parsers data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" reader = parser.read_csv(StringIO(data), chunksize=2) result = reader.get_chunk() expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)]) def test_read_chunksize_compat(all_parsers, kwargs): # see gh-12185 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs) result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), result) def test_read_chunksize_jagged_names(all_parsers): # see gh-23509 parser = all_parsers data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)]) expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10]) reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4) result = concat(reader) tm.assert_frame_equal(result, expected) def test_read_data_list(all_parsers): parser = all_parsers kwargs = dict(index_col=0) data = "A,B,C\nfoo,1,2,3\nbar,4,5,6" data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]] expected = parser.read_csv(StringIO(data), **kwargs) parser = TextParser(data_list, chunksize=2, **kwargs) result = parser.read() tm.assert_frame_equal(result, expected) def test_iterator(all_parsers): # see gh-6607 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) expected = parser.read_csv(StringIO(data), **kwargs) reader = parser.read_csv(StringIO(data), iterator=True, **kwargs) first_chunk = reader.read(3) tm.assert_frame_equal(first_chunk, expected[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, expected[3:]) def test_iterator2(all_parsers): parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(result[0], expected) def test_reader_list(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) def test_reader_list_skiprows(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[1:3]) def test_iterator_stop_on_chunksize(all_parsers): # gh-3967: stopping iteration when chunksize is specified parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), chunksize=1) result = list(reader) assert len(result) == 3 expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(concat(result), expected) @pytest.mark.parametrize( "kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)] ) def test_iterator_skipfooter_errors(all_parsers, kwargs): msg = "'skipfooter' not supported for 'iteration'" parser = all_parsers data = "a\n1\n2" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, **kwargs) def test_nrows_skipfooter_errors(all_parsers): msg = "'skipfooter' not supported with 'nrows'" data = "a\n1\n2\n3\n4\n5\n6" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, nrows=5) @pytest.mark.parametrize( "data,kwargs,expected", [ ( """foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """, dict(index_col=0, names=["index", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"), columns=["A", "B", "C", "D"], ), ), ( """foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """, dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ], names=["index1", "index2"], ), columns=["A", "B", "C", "D"], ), ), ], ) def test_pass_names_with_index(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("index_col", [[0, 1], [1, 0]]) def test_multi_index_no_level_names(all_parsers, index_col): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ headless_data = "\n".join(data.split("\n")[1:]) names = ["A", "B", "C", "D"] parser = all_parsers result = parser.read_csv( StringIO(headless_data), index_col=index_col, header=None, names=names ) expected = parser.read_csv(StringIO(data), index_col=index_col) # No index names in headless data. expected.index.names = [None] * 2 tm.assert_frame_equal(result, expected) def test_multi_index_no_level_names_implicit(all_parsers): parser = all_parsers data = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ] ), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected,header", [ ("a,b", DataFrame(columns=["a", "b"]), [0]), ( "a,b\nc,d", DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])), [0, 1], ), ], ) @pytest.mark.parametrize("round_trip", [True, False]) def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip): # see gh-14545 parser = all_parsers data = expected.to_csv(index=False) if round_trip else data result = parser.read_csv(StringIO(data), header=header) tm.assert_frame_equal(result, expected) def test_no_unnamed_index(all_parsers): parser = all_parsers data = """ id c0 c1 c2 0 1 0 a b 1 2 0 c d 2 2 2 e f """ result = parser.read_csv(StringIO(data), sep=" ") expected = DataFrame( [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]], columns=["Unnamed: 0", "id", "c0", "c1", "c2"], ) tm.assert_frame_equal(result, expected) def test_read_csv_parse_simple_list(all_parsers): parser = all_parsers data = """foo bar baz qux foo foo bar""" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) tm.assert_frame_equal(result, expected) @tm.network def test_url(all_parsers, csv_dir_path): # TODO: FTP testing parser = all_parsers kwargs = dict(sep="\t") url = ( "https://raw.github.com/pandas-dev/pandas/master/" "pandas/tests/io/parser/data/salaries.csv" ) url_result = parser.read_csv(url, **kwargs) local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) tm.assert_frame_equal(url_result, local_result) @pytest.mark.slow def test_local_file(all_parsers, csv_dir_path): parser = all_parsers kwargs = dict(sep="\t") local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) url = "file://localhost/" + local_path try: url_result = parser.read_csv(url, **kwargs) tm.assert_frame_equal(url_result, local_result) except URLError: # Fails on some systems. pytest.skip("Failing on: " + " ".join(platform.uname())) def test_path_path_lib(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_path_local_path(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_localpath( df.to_csv, lambda p: parser.read_csv(p, index_col=0) ) tm.assert_frame_equal(df, result) def test_nonexistent_path(all_parsers): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError # GH#29233 "File foo" instead of "File b'foo'" parser = all_parsers path = f"{tm.rands(10)}.csv" msg = r"\[Errno 2\]" with pytest.raises(FileNotFoundError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename @td.skip_if_windows # os.chmod does not work in windows def test_no_permission(all_parsers): # GH 23784 parser = all_parsers msg = r"\[Errno 13\]" with tm.ensure_clean() as path: os.chmod(path, 0) # make file unreadable # verify that this process cannot open the file (not running as sudo) try: with open(path): pass pytest.skip("Running as sudo.") except PermissionError: pass with pytest.raises(PermissionError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename def test_missing_trailing_delimiters(all_parsers): parser = all_parsers data = """A,B,C,D 1,2,3,4 1,3,3, 1,4,5""" result = parser.read_csv(StringIO(data)) expected = DataFrame( [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]], columns=["A", "B", "C", "D"], ) tm.assert_frame_equal(result, expected) def test_skip_initial_space(all_parsers): data = ( '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, " "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, " "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, " "0.212036, 14.7674, 41.605, -9999.0, -9999.0, " "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128" ) parser = all_parsers result = parser.read_csv( StringIO(data), names=list(range(33)), header=None, na_values=["-9999.0"], skipinitialspace=True, ) expected = DataFrame( [ [ "09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, 1.00361, 1.12551, 330.65659, 355626618.16711, 73.48821, 314.11625, 1917.09447, 179.71425, 80.0, 240.0, -350, 70.06056, 344.9837, 1, 1, -0.689265, -0.692787, 0.212036, 14.7674, 41.605, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0, 12, 128, ] ] ) tm.assert_frame_equal(result, expected) def test_trailing_delimiters(all_parsers): # see gh-2442 data = """A,B,C 1,2,3, 4,5,6, 7,8,9,""" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=False) expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_escapechar(all_parsers): # https://stackoverflow.com/questions/13824840/feature-request-for- # pandas-read-csv data = '''SEARCH_TERM,ACTUAL_URL "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa parser = all_parsers result = parser.read_csv( StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8" ) assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series' tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"])) def test_int64_min_issues(all_parsers): # see gh-2599 parser = all_parsers data = "A,B\n0,0\n0," result = parser.read_csv(StringIO(data)) expected = DataFrame({"A": [0, 0], "B": [0, np.nan]}) tm.assert_frame_equal(result, expected) def test_parse_integers_above_fp_precision(all_parsers): data = """Numbers 17007000002000191 17007000002000191 17007000002000191 17007000002000191 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000194""" parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( { "Numbers": [ 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000194, ] } ) tm.assert_frame_equal(result, expected) def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) # Coercions should work without warnings. with tm.assert_produces_warning(None): result = parser.read_csv(StringIO(data)) assert type(result.a[0]) is np.float64 assert result.a.dtype == np.float def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["a", "b"] + integers) # see gh-3866: if chunks are different types and can't # be coerced using numerical types, then issue warning. if parser.engine == "c" and parser.low_memory: warning_type = DtypeWarning with tm.assert_produces_warning(warning_type): df = parser.read_csv(StringIO(data)) assert df.a.dtype == np.object @pytest.mark.parametrize("sep", [" ", r"\s+"]) def test_integer_overflow_bug(all_parsers, sep): # see gh-2601 data = "65248E10 11\n55555E55 22\n" parser = all_parsers result = parser.read_csv(StringIO(data), header=None, sep=sep) expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]]) tm.assert_frame_equal(result, expected) def test_catch_too_many_names(all_parsers): # see gh-5156 data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" parser = all_parsers msg = ( "Too many columns specified: expected 4 and found 3" if parser.engine == "c" else "Number of passed names did not match " "number of header fields in the file" ) with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) def test_ignore_leading_whitespace(all_parsers): # see gh-3374, gh-6607 parser = all_parsers data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9" result = parser.read_csv(StringIO(data), sep=r"\s+") expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_chunk_begins_with_newline_whitespace(all_parsers): # see gh-10022 parser = all_parsers data = "\n hello\nworld\n" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([" hello", "world"]) tm.assert_frame_equal(result, expected) def test_empty_with_index(all_parsers): # see gh-10184 data = "x,y" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame(columns=["y"], index=Index([], name="x")) tm.assert_frame_equal(result, expected) def test_empty_with_multi_index(all_parsers): # see gh-10467 data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=["x", "y"]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"]) ) tm.assert_frame_equal(result, expected) def test_empty_with_reversed_multi_index(all_parsers): data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=[1, 0]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"]) ) tm.assert_frame_equal(result, expected) def test_float_parser(all_parsers): # see gh-9565 parser = all_parsers data = "45e-1,4.5,45.,inf,-inf" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([[float(s) for s in data.split(",")]]) tm.assert_frame_equal(result, expected) def test_scientific_no_exponent(all_parsers): # see gh-12215 df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]}) data = df.to_csv(index=False) parser = all_parsers for precision in parser.float_precision_choices: df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision) tm.assert_frame_equal(df_roundtrip, df) @pytest.mark.parametrize("conv", [None, np.int64, np.uint64]) def test_int64_overflow(all_parsers, conv): data = """ID 00013007854817840016671868 00013007854817840016749251 00013007854817840016754630 00013007854817840016781876 00013007854817840017028824 00013007854817840017963235 00013007854817840018860166""" parser = all_parsers if conv is None: # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. result = parser.read_csv(StringIO(data)) expected = DataFrame( [ "00013007854817840016671868", "00013007854817840016749251", "00013007854817840016754630", "00013007854817840016781876", "00013007854817840017028824", "00013007854817840017963235", "00013007854817840018860166", ], columns=["ID"], ) tm.assert_frame_equal(result, expected) else: # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. msg = ( "(Python int too large to convert to C long)|" "(long too big to convert)|" "(int too big to convert)" ) with pytest.raises(OverflowError, match=msg): parser.read_csv(StringIO(data), converters={"ID": conv}) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min] ) def test_int64_uint64_range(all_parsers, val): # These numbers fall right inside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([val]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1] ) def test_outside_int64_uint64_range(all_parsers, val): # These numbers fall just outside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([str(val)]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]]) def test_numeric_range_too_wide(all_parsers, exp_data): # No numerical dtype can hold both negative and uint64 # values, so they should be cast as string. parser = all_parsers data = "\n".join(exp_data) expected = DataFrame(exp_data) result = parser.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("iterator", [True, False]) def test_empty_with_nrows_chunksize(all_parsers, iterator): # see gh-9535 parser = all_parsers expected = DataFrame(columns=["foo", "bar"]) nrows = 10 data = StringIO("foo,bar\n") if iterator: result = next(iter(parser.read_csv(data, chunksize=nrows))) else: result = parser.read_csv(data, nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected,msg", [ # gh-10728: WHITESPACE_LINE ( "a,b,c\n4,5,6\n ", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # gh-10548: EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_CRNL_NOP ( "a,b,c\n4,5,6\n\r", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_COMMENT ( "a,b,c\n4,5,6#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # SKIP_LINE ( "a,b,c\n4,5,6\nskipme", dict(skiprows=[2]), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#", skip_blank_lines=False), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # IN_FIELD ( "a,b,c\n4,5,6\n ", dict(skip_blank_lines=False), DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]), None, ), # EAT_CRNL ( "a,b,c\n4,5,6\n\r", dict(skip_blank_lines=False), DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]), None, ), # ESCAPED_CHAR ( "a,b,c\n4,5,6\n\\", dict(escapechar="\\"), None, "(EOF following escape character)|(unexpected end of data)", ), # ESCAPE_IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"\\', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), # IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), ], ids=[ "whitespace-line", "eat-line-comment", "eat-crnl-nop", "eat-comment", "skip-line", "eat-line-comment", "in-field", "eat-crnl", "escaped-char", "escape-in-quoted-field", "in-quoted-field", ], ) def test_eof_states(all_parsers, data, kwargs, expected, msg): # see gh-10728, gh-10548 parser = all_parsers if expected is None: with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]]) def test_uneven_lines_with_usecols(all_parsers, usecols): # see gh-12203 parser = all_parsers data = r"""a,b,c 0,1,2 3,4,5,6,7 8,9,10""" if usecols is None: # Make sure that an error is still raised # when the "usecols" parameter is not provided. msg = r"Expected \d+ fields in line \d+, saw \d+" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) else: expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]}) result = parser.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ # First, check to see that the response of parser when faced with no # provided columns raises the correct error, with or without usecols. ("", dict(), None), ("", dict(usecols=["X"]), None), ( ",,", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"], index=[0], dtype=np.float64), ), ( "", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"]), ), ], ) def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): # see gh-12493 parser = all_parsers if expected is None: msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs,expected", [ # gh-8661, gh-8679: this should ignore six lines, including # lines with trailing whitespace and blank lines. ( dict( header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True, ), DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]), ), # gh-8983: test skipping set of rows after a row with trailing spaces. ( dict( delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True ), DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}), ), ], ) def test_trailing_spaces(all_parsers, kwargs, expected): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa parser = all_parsers result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) tm.assert_frame_equal(result, expected) def test_raise_on_sep_with_delim_whitespace(all_parsers): # see gh-6607 data = "a b c\n1 2 3" parser = all_parsers with pytest.raises(ValueError, match="you can only specify one"): parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True) @pytest.mark.parametrize("delim_whitespace", [True, False]) def test_single_char_leading_whitespace(all_parsers, delim_whitespace): # see gh-9710 parser = all_parsers data = """\ MyColumn a b a b\n""" expected = DataFrame({"MyColumn": list("abab")}) result = parser.read_csv( StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "sep,skip_blank_lines,exp_data", [ (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), ( ",", False, [ [1.0, 2.0, 4.0], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [5.0, np.nan, 10.0], [np.nan, np.nan, np.nan], [-70.0, 0.4, 1.0], ], ), ], ) def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data): parser = all_parsers data = """\ A,B,C 1,2.,4. 5.,NaN,10.0 -70,.4,1 """ if sep == r"\s+": data = data.replace(",", " ") result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines) expected = DataFrame(exp_data, columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_whitespace_lines(all_parsers): parser = all_parsers data = """ \t \t\t \t A,B,C \t 1,2.,4. 5.,NaN,10.0 """ expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected", [ ( """ A B C D a 1 2 3 4 b 1 2 3 4 c 1 2 3 4 """, DataFrame( [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], columns=["A", "B", "C", "D"], index=["a", "b", "c"], ), ), ( " a b c\n1 2 3 \n4 5 6\n 7 8 9", DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]), ), ], ) def test_whitespace_regex_separator(all_parsers, data, expected): # see gh-6607 parser = all_parsers result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) def test_verbose_read(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 one,1,2,3 ,1,2,3 one,1,2,3 ,1,2,3 ,1,2,3 one,1,2,3 two,1,2,3""" # Engines are verbose in different ways. parser.read_csv(StringIO(data), verbose=True) captured = capsys.readouterr() if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 3 NA values in column a\n" def test_verbose_read2(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 two,1,2,3 three,1,2,3 four,1,2,3 five,1,2,3 ,1,2,3 seven,1,2,3 eight,1,2,3""" parser.read_csv(StringIO(data), verbose=True, index_col=0) captured = capsys.readouterr() # Engines are verbose in different ways. if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 1 NA values in column a\n" def test_iteration_open_handle(all_parsers): parser = all_parsers kwargs = dict(squeeze=True, header=None) with tm.ensure_clean() as path: with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") with open(path, "r") as f: for line in f: if "CCC" in line: break result = parser.read_csv(f, **kwargs) expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "data,thousands,decimal", [ ( """A|B|C 1|2,334.01|5 10|13|10. """, ",", ".", ), ( """A|B|C 1|2.334,01|5 10|13|10, """, ".", ",", ), ], ) def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal): parser = all_parsers expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) result = parser.read_csv( StringIO(data), sep="|", thousands=thousands, decimal=decimal ) tm.assert_frame_equal(result, expected) def test_euro_decimal_format(all_parsers): parser = all_parsers data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" result = parser.read_csv(StringIO(data), sep=";", decimal=",") expected = DataFrame( [ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], ], columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_inf_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,inf b,-inf c,+Inf d,-Inf e,INF f,-INF g,+INf h,-INf i,inF j,-inF""" expected = DataFrame( {"A": [float("inf"), float("-inf")] * 5}, index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_infinity_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,Infinity b,-Infinity c,+Infinity """ expected = DataFrame( {"A": [float("infinity"), float("-infinity"), float("+infinity")]}, index=["a", "b", "c"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5]) def test_raise_on_no_columns(all_parsers, nrows): parser = all_parsers data = "\n" * nrows msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data)) def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers expected = DataFrame( {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]} ) result = parser.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(result, expected) def test_null_byte_char(all_parsers): # see gh-2741 data = "\x00,foo" names = ["a", "b"] parser = all_parsers if parser.engine == "c": expected = DataFrame([[np.nan, "foo"]], columns=names) out = parser.read_csv(StringIO(data), names=names) tm.assert_frame_equal(out, expected) else: msg = "NULL byte detected" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), names=names) def test_temporary_file(all_parsers): # see gh-13398 parser = all_parsers data = "0 0" with tm.ensure_clean(mode="w+", return_filelike=True) as new_file: new_file.write(data) new_file.flush() new_file.seek(0) result = parser.read_csv(new_file, sep=r"\s+", header=None) expected = DataFrame([[0, 0]]) tm.assert_frame_equal(result, expected) def test_internal_eof_byte(all_parsers): # see gh-5500 parser = all_parsers data = "a,b\n1\x1a,2" expected = DataFrame([["1\x1a", 2]], columns=["a", "b"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_internal_eof_byte_to_file(all_parsers): # see gh-16559 parser = all_parsers data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) path = f"__{tm.rands(10)}__.csv" with tm.ensure_clean(path) as path: with open(path, "wb") as f: f.write(data) result = parser.read_csv(path) tm.assert_frame_equal(result, expected) def test_sub_character(all_parsers, csv_dir_path): # see gh-16893 filename = os.path.join(csv_dir_path, "sub_char.csv") expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) parser = all_parsers result = parser.read_csv(filename) tm.assert_frame_equal(result, expected) def test_file_handle_string_io(all_parsers): # gh-14418 # # Don't close user provided file handles. parser = all_parsers data = "a,b\n1,2" fh = StringIO(data) parser.read_csv(fh) assert not fh.closed def test_file_handles_with_open(all_parsers, csv1): # gh-14418 # # Don't close user provided file handles. parser = all_parsers for mode in ["r", "rb"]: with open(csv1, mode) as f: parser.read_csv(f) assert not f.closed def test_invalid_file_buffer_class(all_parsers): # see gh-15337 class InvalidBuffer: pass parser = all_parsers msg = "Invalid file path or buffer object type" with pytest.raises(ValueError, match=msg): parser.read_csv(InvalidBuffer()) def test_invalid_file_buffer_mock(all_parsers): # see gh-15337 parser = all_parsers msg = "Invalid file path or buffer object type" class Foo: pass with pytest.raises(ValueError, match=msg): parser.read_csv(Foo()) def test_valid_file_buffer_seems_invalid(all_parsers): # gh-16135: we want to ensure that "tell" and "seek" # aren't actually being used when we call `read_csv` # # Thus, while the object may look "invalid" (these # methods are attributes of the `StringIO` class), # it is still a valid file-object for our purposes. class NoSeekTellBuffer(StringIO): def tell(self): raise AttributeError("No tell method") def seek(self, pos, whence=0): raise AttributeError("No seek method") data = "a\n1" parser = all_parsers expected = DataFrame({"a": [1]}) result = parser.read_csv(NoSeekTellBuffer(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs", [dict(), dict(error_bad_lines=True)], # Default is True. # Explicitly pass in. ) @pytest.mark.parametrize( "warn_kwargs", [dict(), dict(warn_bad_lines=True), dict(warn_bad_lines=False)] ) def test_error_bad_lines(all_parsers, kwargs, warn_kwargs): # see gh-15925 parser = all_parsers kwargs.update(**warn_kwargs) data = "a\n1\n1,2,3\n4\n5,6,7" msg = "Expected 1 fields in line 3, saw 3" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) def test_warn_bad_lines(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert "Skipping line 3" in captured.err assert "Skipping line 5" in captured.err def test_suppress_error_output(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv( StringIO(data), error_bad_lines=False, warn_bad_lines=False ) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert captured.err == "" @pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"]) def test_filename_with_special_chars(all_parsers, filename): # see gh-15086. parser = all_parsers df = DataFrame({"a": [1, 2, 3]}) with tm.ensure_clean(filename) as path: df.to_csv(path, index=False) result = parser.read_csv(path) tm.assert_frame_equal(result, df) def test_read_csv_memory_growth_chunksize(all_parsers): # see gh-24805 # # Let's just make sure that we don't crash # as we iteratively process all chunks. parser = all_parsers with tm.ensure_clean() as path: with open(path, "w") as f: for i in range(1000): f.write(str(i) + "\n") result = parser.read_csv(path, chunksize=20) for _ in result: pass def test_read_csv_raises_on_header_prefix(all_parsers): # gh-27394 parser = all_parsers msg = "Argument prefix must be None if argument header is not None" s = StringIO("0,1\n2,3") with pytest.raises(ValueError, match=msg): parser.read_csv(s, header=0, prefix="_X") def test_read_table_equivalency_to_read_csv(all_parsers): # see gh-21948 # As of 0.25.0, read_table is undeprecated parser = all_parsers data = "a\tb\n1\t2\n3\t4" expected = parser.read_csv(StringIO(data), sep="\t") result = parser.read_table(StringIO(data)) tm.assert_frame_equal(result, expected) def test_first_row_bom(all_parsers): # see gh-26545 parser = all_parsers data = '''\ufeff"Head1" "Head2" "Head3"''' result = parser.read_csv(StringIO(data), delimiter="\t") expected = DataFrame(columns=["Head1", "Head2", "Head3"]) tm.assert_frame_equal(result, expected) def test_integer_precision(all_parsers): # Gh 7072 s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765 5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389""" parser = all_parsers result = parser.read_csv(StringIO(s), header=None)[4] expected = Series([4321583677327450765, 4321113141090630389], name=4) tm.assert_series_equal(result, expected) def test_file_descriptor_leak(all_parsers): # GH 31488 parser = all_parsers with tm.ensure_clean() as path: def test(): with pytest.raises(EmptyDataError, match="No columns to parse from file"): parser.read_csv(path) td.check_file_leaks(test)() @pytest.mark.parametrize("nrows", range(1, 6)) def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): # GH 28071 ref = DataFrame( [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]], columns=list("ab"), ) csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4" parser = all_parsers df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False) tm.assert_frame_equal(df, ref[:nrows]) def test_no_header_two_extra_columns(all_parsers): # GH 26218 column_names = ["one", "two", "three"] ref = DataFrame([["foo", "bar", "baz"]], columns=column_names) stream = StringIO("foo,bar,baz,bam,blah") parser = all_parsers df = parser.read_csv(stream, header=None, names=column_names, index_col=False) tm.assert_frame_equal(df, ref)
TomAugspurger/pandas
pandas/tests/io/parser/test_common.py
pandas/plotting/_misc.py
""" Helper functions to generate range-like data for DatetimeArray (and possibly TimedeltaArray/PeriodArray) """ from typing import Union import numpy as np from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp from pandas.tseries.offsets import DateOffset def generate_regular_range( start: Union[Timestamp, Timedelta], end: Union[Timestamp, Timedelta], periods: int, freq: DateOffset, ): """ Generate a range of dates or timestamps with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timedelta, Timestamp or None First point of produced date range. end : Timedelta, Timestamp or None Last point of produced date range. periods : int Number of periods in produced date range. freq : Tick Describes space between dates in produced date range. Returns ------- ndarray[np.int64] Representing nanoseconds. """ start = start.value if start is not None else None end = end.value if end is not None else None stride = freq.nanos if periods is None: b = start # cannot just use e = Timestamp(end) + 1 because arange breaks when # stride is too large, see GH10887 e = b + (end - b) // stride * stride + stride // 2 + 1 elif start is not None: b = start e = _generate_range_overflow_safe(b, periods, stride, side="start") elif end is not None: e = end + stride b = _generate_range_overflow_safe(e, periods, stride, side="end") else: raise ValueError( "at least 'start' or 'end' should be specified if a 'period' is given." ) with np.errstate(over="raise"): # If the range is sufficiently large, np.arange may overflow # and incorrectly return an empty array if not caught. try: values = np.arange(b, e, stride, dtype=np.int64) except FloatingPointError: xdr = [b] while xdr[-1] != e: xdr.append(xdr[-1] + stride) values = np.array(xdr[:-1], dtype=np.int64) return values def _generate_range_overflow_safe( endpoint: int, periods: int, stride: int, side: str = "start" ) -> int: """ Calculate the second endpoint for passing to np.arange, checking to avoid an integer overflow. Catch OverflowError and re-raise as OutOfBoundsDatetime. Parameters ---------- endpoint : int nanosecond timestamp of the known endpoint of the desired range periods : int number of periods in the desired range stride : int nanoseconds between periods in the desired range side : {'start', 'end'} which end of the range `endpoint` refers to Returns ------- other_end : int Raises ------ OutOfBoundsDatetime """ # GH#14187 raise instead of incorrectly wrapping around assert side in ["start", "end"] i64max = np.uint64(np.iinfo(np.int64).max) msg = f"Cannot generate range with {side}={endpoint} and periods={periods}" with np.errstate(over="raise"): # if periods * strides cannot be multiplied within the *uint64* bounds, # we cannot salvage the operation by recursing, so raise try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) except FloatingPointError as err: raise OutOfBoundsDatetime(msg) from err if np.abs(addend) <= i64max: # relatively easy case without casting concerns return _generate_range_overflow_safe_signed(endpoint, periods, stride, side) elif (endpoint > 0 and side == "start" and stride > 0) or ( endpoint < 0 and side == "end" and stride > 0 ): # no chance of not-overflowing raise OutOfBoundsDatetime(msg) elif side == "end" and endpoint > i64max and endpoint - stride <= i64max: # in _generate_regular_range we added `stride` thereby overflowing # the bounds. Adjust to fix this. return _generate_range_overflow_safe( endpoint - stride, periods - 1, stride, side ) # split into smaller pieces mid_periods = periods // 2 remaining = periods - mid_periods assert 0 < remaining < periods, (remaining, periods, endpoint, stride) midpoint = _generate_range_overflow_safe(endpoint, mid_periods, stride, side) return _generate_range_overflow_safe(midpoint, remaining, stride, side) def _generate_range_overflow_safe_signed( endpoint: int, periods: int, stride: int, side: str ) -> int: """ A special case for _generate_range_overflow_safe where `periods * stride` can be calculated without overflowing int64 bounds. """ assert side in ["start", "end"] if side == "end": stride *= -1 with np.errstate(over="raise"): addend = np.int64(periods) * np.int64(stride) try: # easy case with no overflows return np.int64(endpoint) + addend except (FloatingPointError, OverflowError): # with endpoint negative and addend positive we risk # FloatingPointError; with reversed signed we risk OverflowError pass # if stride and endpoint had opposite signs, then endpoint + addend # should never overflow. so they must have the same signs assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0) if stride > 0: # watch out for very special case in which we just slightly # exceed implementation bounds, but when passing the result to # np.arange will get a result slightly within the bounds result = np.uint64(endpoint) + np.uint64(addend) i64max = np.uint64(np.iinfo(np.int64).max) assert result > i64max if result <= i64max + np.uint64(stride): return result raise OutOfBoundsDatetime( f"Cannot generate range with {side}={endpoint} and periods={periods}" )
""" Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ import codecs import csv from datetime import datetime from io import StringIO import os import platform from urllib.error import URLError import numpy as np import pytest from pandas._libs.tslib import Timestamp from pandas.errors import DtypeWarning, EmptyDataError, ParserError import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex, Series, compat, concat import pandas._testing as tm from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser def test_override_set_noconvert_columns(): # see gh-17351 # # Usecols needs to be sorted in _set_noconvert_columns based # on the test_usecols_with_parse_dates test from test_usecols.py class MyTextFileReader(TextFileReader): def __init__(self): self._currow = 0 self.squeeze = False class MyCParserWrapper(CParserWrapper): def _set_noconvert_columns(self): if self.usecols_dtype == "integer": # self.usecols is a set, which is documented as unordered # but in practice, a CPython set of integers is sorted. # In other implementations this assumption does not hold. # The following code simulates a different order, which # before GH 17351 would cause the wrong columns to be # converted via the parse_dates parameter self.usecols = list(self.usecols) self.usecols.reverse() return CParserWrapper._set_noconvert_columns(self) data = """a,b,c,d,e 0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] cols = { "a": [0, 0], "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], } expected = DataFrame(cols, columns=["c_d", "a"]) parser = MyTextFileReader() parser.options = { "usecols": [0, 2, 3], "parse_dates": parse_dates, "delimiter": ",", } parser._engine = MyCParserWrapper(StringIO(data), **parser.options) result = parser.read() tm.assert_frame_equal(result, expected) def test_empty_decimal_marker(all_parsers): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = "Only length-1 decimal markers supported" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), decimal="") def test_bad_stream_exception(all_parsers, csv_dir_path): # see gh-13652 # # This test validates that both the Python engine and C engine will # raise UnicodeDecodeError instead of C engine raising ParserError # and swallowing the exception that caused read to fail. path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv") codec = codecs.lookup("utf-8") utf8 = codecs.lookup("utf-8") parser = all_parsers msg = "'utf-8' codec can't decode byte" # Stream must be binary UTF8. with open(path, "rb") as handle, codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter ) as stream: with pytest.raises(UnicodeDecodeError, match=msg): parser.read_csv(stream) def test_read_csv_local(all_parsers, csv1): prefix = "file:///" if compat.is_platform_windows() else "file://" parser = all_parsers fname = prefix + str(os.path.abspath(csv1)) result = parser.read_csv(fname, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_1000_sep(all_parsers): parser = all_parsers data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) result = parser.read_csv(StringIO(data), sep="|", thousands=",") tm.assert_frame_equal(result, expected) def test_squeeze(all_parsers): data = """\ a,1 b,2 c,3 """ parser = all_parsers index = Index(["a", "b", "c"], name=0) expected = Series([1, 2, 3], name=1, index=index) result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True) tm.assert_series_equal(result, expected) # see gh-8217 # # Series should not be a view. assert not result._is_view def test_malformed(all_parsers): # see gh-6607 parser = all_parsers data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = "Expected 3 fields in line 4, saw 5" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), header=1, comment="#") @pytest.mark.parametrize("nrows", [5, 3, None]) def test_malformed_chunks(all_parsers, nrows): data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ parser = all_parsers msg = "Expected 3 fields in line 6, saw 5" reader = parser.read_csv( StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2] ) with pytest.raises(ParserError, match=msg): reader.read(nrows) def test_unnamed_columns(all_parsers): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ parser = all_parsers expected = DataFrame( [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64, columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"], ) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_csv_mixed_type(all_parsers): data = """A,B,C a,1,2 b,3,4 c,4,5 """ parser = all_parsers expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_read_csv_low_memory_no_rows_with_index(all_parsers): # see gh-21141 parser = all_parsers if not parser.low_memory: pytest.skip("This is a low-memory specific test") data = """A,B,C 1,1,1,2 2,2,3,4 3,3,4,5 """ result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) expected = DataFrame(columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_read_csv_dataframe(all_parsers, csv1): parser = all_parsers result = parser.read_csv(csv1, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738], [1.047916, -0.041232, -0.16181208307, 0.212549], [0.498581, 0.731168, -0.537677223318, 1.346270], [1.120202, 1.567621, 0.00364077397681, 0.675253], [-0.487094, 0.571455, -1.6116394093, 0.103469], [0.836649, 0.246462, 0.588542635376, 1.062782], [-0.157161, 1.340307, 1.1957779562, -1.097007], ], columns=["A", "B", "C", "D"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), datetime(2000, 1, 10), datetime(2000, 1, 11), ], name="index", ), ) tm.assert_frame_equal(result, expected) def test_read_csv_no_index_name(all_parsers, csv_dir_path): parser = all_parsers csv2 = os.path.join(csv_dir_path, "test2.csv") result = parser.read_csv(csv2, index_col=0, parse_dates=True) expected = DataFrame( [ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"], [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"], [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"], [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"], [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"], ], columns=["A", "B", "C", "D", "E"], index=Index( [ datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5), datetime(2000, 1, 6), datetime(2000, 1, 7), ] ), ) tm.assert_frame_equal(result, expected) def test_read_csv_wrong_num_columns(all_parsers): # Too few columns. data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ parser = all_parsers msg = "Expected 6 fields in line 3, saw 7" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) def test_read_duplicate_index_explicit(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"), ) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(all_parsers): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ ( "A,B\nTrue,1\nFalse,2\nTrue,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3", dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]), DataFrame( [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]], columns=["A", "B"], ), ), ( "A,B\nTRUE,1\nFALSE,2\nTRUE,3", dict(), DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]), ), ( "A,B\nfoo,bar\nbar,foo", dict(true_values=["foo"], false_values=["bar"]), DataFrame([[True, False], [False, True]], columns=["A", "B"]), ), ], ) def test_parse_bool(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) def test_int_conversion(all_parsers): data = """A,B 1.0,1 2.0,2 3.0,3 """ parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [3, 3.0]) def test_read_nrows(all_parsers, nrows): # see gh-10476 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ expected = DataFrame( [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]], columns=["index", "A", "B", "C", "D"], ) parser = all_parsers result = parser.read_csv(StringIO(data), nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [1.2, "foo", -1]) def test_read_nrows_bad(all_parsers, nrows): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ msg = r"'nrows' must be an integer >=0" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), nrows=nrows) @pytest.mark.parametrize("index_col", [0, "index"]) def test_read_chunksize_with_index(all_parsers, index_col): parser = all_parsers data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2) expected = DataFrame( [ ["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15], ["qux", 12, 13, 14, 15], ["foo2", 12, 13, 14, 15], ["bar2", 12, 13, 14, 15], ], columns=["index", "A", "B", "C", "D"], ) expected = expected.set_index("index") chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) @pytest.mark.parametrize("chunksize", [1.3, "foo", 0]) def test_read_chunksize_bad(all_parsers, chunksize): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers msg = r"'chunksize' must be an integer >=1" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), chunksize=chunksize) @pytest.mark.parametrize("chunksize", [2, 8]) def test_read_chunksize_and_nrows(all_parsers, chunksize): # see gh-15755 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), expected) def test_read_chunksize_and_nrows_changing_size(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0, nrows=5) reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5]) with pytest.raises(StopIteration, match=""): reader.get_chunk(size=3) def test_get_chunk_passed_chunksize(all_parsers): parser = all_parsers data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" reader = parser.read_csv(StringIO(data), chunksize=2) result = reader.get_chunk() expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)]) def test_read_chunksize_compat(all_parsers, kwargs): # see gh-12185 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs) result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(concat(reader), result) def test_read_chunksize_jagged_names(all_parsers): # see gh-23509 parser = all_parsers data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)]) expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10]) reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4) result = concat(reader) tm.assert_frame_equal(result, expected) def test_read_data_list(all_parsers): parser = all_parsers kwargs = dict(index_col=0) data = "A,B,C\nfoo,1,2,3\nbar,4,5,6" data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]] expected = parser.read_csv(StringIO(data), **kwargs) parser = TextParser(data_list, chunksize=2, **kwargs) result = parser.read() tm.assert_frame_equal(result, expected) def test_iterator(all_parsers): # see gh-6607 data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) expected = parser.read_csv(StringIO(data), **kwargs) reader = parser.read_csv(StringIO(data), iterator=True, **kwargs) first_chunk = reader.read(3) tm.assert_frame_equal(first_chunk, expected[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, expected[3:]) def test_iterator2(all_parsers): parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(result[0], expected) def test_reader_list(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[:2]) tm.assert_frame_equal(chunks[1], expected[2:4]) tm.assert_frame_equal(chunks[2], expected[4:]) def test_reader_list_skiprows(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ parser = all_parsers kwargs = dict(index_col=0) lines = list(csv.reader(StringIO(data))) reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs) expected = parser.read_csv(StringIO(data), **kwargs) chunks = list(reader) tm.assert_frame_equal(chunks[0], expected[1:3]) def test_iterator_stop_on_chunksize(all_parsers): # gh-3967: stopping iteration when chunksize is specified parser = all_parsers data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = parser.read_csv(StringIO(data), chunksize=1) result = list(reader) assert len(result) == 3 expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["foo", "bar", "baz"], columns=["A", "B", "C"], ) tm.assert_frame_equal(concat(result), expected) @pytest.mark.parametrize( "kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)] ) def test_iterator_skipfooter_errors(all_parsers, kwargs): msg = "'skipfooter' not supported for 'iteration'" parser = all_parsers data = "a\n1\n2" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, **kwargs) def test_nrows_skipfooter_errors(all_parsers): msg = "'skipfooter' not supported with 'nrows'" data = "a\n1\n2\n3\n4\n5\n6" parser = all_parsers with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=1, nrows=5) @pytest.mark.parametrize( "data,kwargs,expected", [ ( """foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """, dict(index_col=0, names=["index", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"), columns=["A", "B", "C", "D"], ), ), ( """foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """, dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]), DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ], names=["index1", "index2"], ), columns=["A", "B", "C", "D"], ), ), ], ) def test_pass_names_with_index(all_parsers, data, kwargs, expected): parser = all_parsers result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("index_col", [[0, 1], [1, 0]]) def test_multi_index_no_level_names(all_parsers, index_col): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ headless_data = "\n".join(data.split("\n")[1:]) names = ["A", "B", "C", "D"] parser = all_parsers result = parser.read_csv( StringIO(headless_data), index_col=index_col, header=None, names=names ) expected = parser.read_csv(StringIO(data), index_col=index_col) # No index names in headless data. expected.index.names = [None] * 2 tm.assert_frame_equal(result, expected) def test_multi_index_no_level_names_implicit(all_parsers): parser = all_parsers data = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ result = parser.read_csv(StringIO(data)) expected = DataFrame( [ [2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15], ], columns=["A", "B", "C", "D"], index=MultiIndex.from_tuples( [ ("foo", "one"), ("foo", "two"), ("foo", "three"), ("bar", "one"), ("bar", "two"), ] ), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected,header", [ ("a,b", DataFrame(columns=["a", "b"]), [0]), ( "a,b\nc,d", DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])), [0, 1], ), ], ) @pytest.mark.parametrize("round_trip", [True, False]) def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip): # see gh-14545 parser = all_parsers data = expected.to_csv(index=False) if round_trip else data result = parser.read_csv(StringIO(data), header=header) tm.assert_frame_equal(result, expected) def test_no_unnamed_index(all_parsers): parser = all_parsers data = """ id c0 c1 c2 0 1 0 a b 1 2 0 c d 2 2 2 e f """ result = parser.read_csv(StringIO(data), sep=" ") expected = DataFrame( [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]], columns=["Unnamed: 0", "id", "c0", "c1", "c2"], ) tm.assert_frame_equal(result, expected) def test_read_csv_parse_simple_list(all_parsers): parser = all_parsers data = """foo bar baz qux foo foo bar""" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) tm.assert_frame_equal(result, expected) @tm.network def test_url(all_parsers, csv_dir_path): # TODO: FTP testing parser = all_parsers kwargs = dict(sep="\t") url = ( "https://raw.github.com/pandas-dev/pandas/master/" "pandas/tests/io/parser/data/salaries.csv" ) url_result = parser.read_csv(url, **kwargs) local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) tm.assert_frame_equal(url_result, local_result) @pytest.mark.slow def test_local_file(all_parsers, csv_dir_path): parser = all_parsers kwargs = dict(sep="\t") local_path = os.path.join(csv_dir_path, "salaries.csv") local_result = parser.read_csv(local_path, **kwargs) url = "file://localhost/" + local_path try: url_result = parser.read_csv(url, **kwargs) tm.assert_frame_equal(url_result, local_result) except URLError: # Fails on some systems. pytest.skip("Failing on: " + " ".join(platform.uname())) def test_path_path_lib(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_path_local_path(all_parsers): parser = all_parsers df = tm.makeDataFrame() result = tm.round_trip_localpath( df.to_csv, lambda p: parser.read_csv(p, index_col=0) ) tm.assert_frame_equal(df, result) def test_nonexistent_path(all_parsers): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError # GH#29233 "File foo" instead of "File b'foo'" parser = all_parsers path = f"{tm.rands(10)}.csv" msg = r"\[Errno 2\]" with pytest.raises(FileNotFoundError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename @td.skip_if_windows # os.chmod does not work in windows def test_no_permission(all_parsers): # GH 23784 parser = all_parsers msg = r"\[Errno 13\]" with tm.ensure_clean() as path: os.chmod(path, 0) # make file unreadable # verify that this process cannot open the file (not running as sudo) try: with open(path): pass pytest.skip("Running as sudo.") except PermissionError: pass with pytest.raises(PermissionError, match=msg) as e: parser.read_csv(path) assert path == e.value.filename def test_missing_trailing_delimiters(all_parsers): parser = all_parsers data = """A,B,C,D 1,2,3,4 1,3,3, 1,4,5""" result = parser.read_csv(StringIO(data)) expected = DataFrame( [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]], columns=["A", "B", "C", "D"], ) tm.assert_frame_equal(result, expected) def test_skip_initial_space(all_parsers): data = ( '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, " "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, " "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, " "0.212036, 14.7674, 41.605, -9999.0, -9999.0, " "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128" ) parser = all_parsers result = parser.read_csv( StringIO(data), names=list(range(33)), header=None, na_values=["-9999.0"], skipinitialspace=True, ) expected = DataFrame( [ [ "09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, 1.00361, 1.12551, 330.65659, 355626618.16711, 73.48821, 314.11625, 1917.09447, 179.71425, 80.0, 240.0, -350, 70.06056, 344.9837, 1, 1, -0.689265, -0.692787, 0.212036, 14.7674, 41.605, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0, 12, 128, ] ] ) tm.assert_frame_equal(result, expected) def test_trailing_delimiters(all_parsers): # see gh-2442 data = """A,B,C 1,2,3, 4,5,6, 7,8,9,""" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=False) expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_escapechar(all_parsers): # https://stackoverflow.com/questions/13824840/feature-request-for- # pandas-read-csv data = '''SEARCH_TERM,ACTUAL_URL "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa parser = all_parsers result = parser.read_csv( StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8" ) assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series' tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"])) def test_int64_min_issues(all_parsers): # see gh-2599 parser = all_parsers data = "A,B\n0,0\n0," result = parser.read_csv(StringIO(data)) expected = DataFrame({"A": [0, 0], "B": [0, np.nan]}) tm.assert_frame_equal(result, expected) def test_parse_integers_above_fp_precision(all_parsers): data = """Numbers 17007000002000191 17007000002000191 17007000002000191 17007000002000191 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000194""" parser = all_parsers result = parser.read_csv(StringIO(data)) expected = DataFrame( { "Numbers": [ 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000194, ] } ) tm.assert_frame_equal(result, expected) def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) # Coercions should work without warnings. with tm.assert_produces_warning(None): result = parser.read_csv(StringIO(data)) assert type(result.a[0]) is np.float64 assert result.a.dtype == np.float def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["a", "b"] + integers) # see gh-3866: if chunks are different types and can't # be coerced using numerical types, then issue warning. if parser.engine == "c" and parser.low_memory: warning_type = DtypeWarning with tm.assert_produces_warning(warning_type): df = parser.read_csv(StringIO(data)) assert df.a.dtype == np.object @pytest.mark.parametrize("sep", [" ", r"\s+"]) def test_integer_overflow_bug(all_parsers, sep): # see gh-2601 data = "65248E10 11\n55555E55 22\n" parser = all_parsers result = parser.read_csv(StringIO(data), header=None, sep=sep) expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]]) tm.assert_frame_equal(result, expected) def test_catch_too_many_names(all_parsers): # see gh-5156 data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" parser = all_parsers msg = ( "Too many columns specified: expected 4 and found 3" if parser.engine == "c" else "Number of passed names did not match " "number of header fields in the file" ) with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) def test_ignore_leading_whitespace(all_parsers): # see gh-3374, gh-6607 parser = all_parsers data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9" result = parser.read_csv(StringIO(data), sep=r"\s+") expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_chunk_begins_with_newline_whitespace(all_parsers): # see gh-10022 parser = all_parsers data = "\n hello\nworld\n" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([" hello", "world"]) tm.assert_frame_equal(result, expected) def test_empty_with_index(all_parsers): # see gh-10184 data = "x,y" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=0) expected = DataFrame(columns=["y"], index=Index([], name="x")) tm.assert_frame_equal(result, expected) def test_empty_with_multi_index(all_parsers): # see gh-10467 data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=["x", "y"]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"]) ) tm.assert_frame_equal(result, expected) def test_empty_with_reversed_multi_index(all_parsers): data = "x,y,z" parser = all_parsers result = parser.read_csv(StringIO(data), index_col=[1, 0]) expected = DataFrame( columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"]) ) tm.assert_frame_equal(result, expected) def test_float_parser(all_parsers): # see gh-9565 parser = all_parsers data = "45e-1,4.5,45.,inf,-inf" result = parser.read_csv(StringIO(data), header=None) expected = DataFrame([[float(s) for s in data.split(",")]]) tm.assert_frame_equal(result, expected) def test_scientific_no_exponent(all_parsers): # see gh-12215 df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]}) data = df.to_csv(index=False) parser = all_parsers for precision in parser.float_precision_choices: df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision) tm.assert_frame_equal(df_roundtrip, df) @pytest.mark.parametrize("conv", [None, np.int64, np.uint64]) def test_int64_overflow(all_parsers, conv): data = """ID 00013007854817840016671868 00013007854817840016749251 00013007854817840016754630 00013007854817840016781876 00013007854817840017028824 00013007854817840017963235 00013007854817840018860166""" parser = all_parsers if conv is None: # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. result = parser.read_csv(StringIO(data)) expected = DataFrame( [ "00013007854817840016671868", "00013007854817840016749251", "00013007854817840016754630", "00013007854817840016781876", "00013007854817840017028824", "00013007854817840017963235", "00013007854817840018860166", ], columns=["ID"], ) tm.assert_frame_equal(result, expected) else: # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. msg = ( "(Python int too large to convert to C long)|" "(long too big to convert)|" "(int too big to convert)" ) with pytest.raises(OverflowError, match=msg): parser.read_csv(StringIO(data), converters={"ID": conv}) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min] ) def test_int64_uint64_range(all_parsers, val): # These numbers fall right inside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([val]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1] ) def test_outside_int64_uint64_range(all_parsers, val): # These numbers fall just outside the int64-uint64 # range, so they should be parsed as string. parser = all_parsers result = parser.read_csv(StringIO(str(val)), header=None) expected = DataFrame([str(val)]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]]) def test_numeric_range_too_wide(all_parsers, exp_data): # No numerical dtype can hold both negative and uint64 # values, so they should be cast as string. parser = all_parsers data = "\n".join(exp_data) expected = DataFrame(exp_data) result = parser.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("iterator", [True, False]) def test_empty_with_nrows_chunksize(all_parsers, iterator): # see gh-9535 parser = all_parsers expected = DataFrame(columns=["foo", "bar"]) nrows = 10 data = StringIO("foo,bar\n") if iterator: result = next(iter(parser.read_csv(data, chunksize=nrows))) else: result = parser.read_csv(data, nrows=nrows) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected,msg", [ # gh-10728: WHITESPACE_LINE ( "a,b,c\n4,5,6\n ", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # gh-10548: EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_CRNL_NOP ( "a,b,c\n4,5,6\n\r", dict(), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_COMMENT ( "a,b,c\n4,5,6#comment", dict(comment="#"), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # SKIP_LINE ( "a,b,c\n4,5,6\nskipme", dict(skiprows=[2]), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # EAT_LINE_COMMENT ( "a,b,c\n4,5,6\n#comment", dict(comment="#", skip_blank_lines=False), DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None, ), # IN_FIELD ( "a,b,c\n4,5,6\n ", dict(skip_blank_lines=False), DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]), None, ), # EAT_CRNL ( "a,b,c\n4,5,6\n\r", dict(skip_blank_lines=False), DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]), None, ), # ESCAPED_CHAR ( "a,b,c\n4,5,6\n\\", dict(escapechar="\\"), None, "(EOF following escape character)|(unexpected end of data)", ), # ESCAPE_IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"\\', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), # IN_QUOTED_FIELD ( 'a,b,c\n4,5,6\n"', dict(escapechar="\\"), None, "(EOF inside string starting at row 2)|(unexpected end of data)", ), ], ids=[ "whitespace-line", "eat-line-comment", "eat-crnl-nop", "eat-comment", "skip-line", "eat-line-comment", "in-field", "eat-crnl", "escaped-char", "escape-in-quoted-field", "in-quoted-field", ], ) def test_eof_states(all_parsers, data, kwargs, expected, msg): # see gh-10728, gh-10548 parser = all_parsers if expected is None: with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]]) def test_uneven_lines_with_usecols(all_parsers, usecols): # see gh-12203 parser = all_parsers data = r"""a,b,c 0,1,2 3,4,5,6,7 8,9,10""" if usecols is None: # Make sure that an error is still raised # when the "usecols" parameter is not provided. msg = r"Expected \d+ fields in line \d+, saw \d+" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data)) else: expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]}) result = parser.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,kwargs,expected", [ # First, check to see that the response of parser when faced with no # provided columns raises the correct error, with or without usecols. ("", dict(), None), ("", dict(usecols=["X"]), None), ( ",,", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"], index=[0], dtype=np.float64), ), ( "", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]), DataFrame(columns=["X"]), ), ], ) def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): # see gh-12493 parser = all_parsers if expected is None: msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data), **kwargs) else: result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs,expected", [ # gh-8661, gh-8679: this should ignore six lines, including # lines with trailing whitespace and blank lines. ( dict( header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True, ), DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]), ), # gh-8983: test skipping set of rows after a row with trailing spaces. ( dict( delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True ), DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}), ), ], ) def test_trailing_spaces(all_parsers, kwargs, expected): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa parser = all_parsers result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) tm.assert_frame_equal(result, expected) def test_raise_on_sep_with_delim_whitespace(all_parsers): # see gh-6607 data = "a b c\n1 2 3" parser = all_parsers with pytest.raises(ValueError, match="you can only specify one"): parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True) @pytest.mark.parametrize("delim_whitespace", [True, False]) def test_single_char_leading_whitespace(all_parsers, delim_whitespace): # see gh-9710 parser = all_parsers data = """\ MyColumn a b a b\n""" expected = DataFrame({"MyColumn": list("abab")}) result = parser.read_csv( StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "sep,skip_blank_lines,exp_data", [ (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), ( ",", False, [ [1.0, 2.0, 4.0], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [5.0, np.nan, 10.0], [np.nan, np.nan, np.nan], [-70.0, 0.4, 1.0], ], ), ], ) def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data): parser = all_parsers data = """\ A,B,C 1,2.,4. 5.,NaN,10.0 -70,.4,1 """ if sep == r"\s+": data = data.replace(",", " ") result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines) expected = DataFrame(exp_data, columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) def test_whitespace_lines(all_parsers): parser = all_parsers data = """ \t \t\t \t A,B,C \t 1,2.,4. 5.,NaN,10.0 """ expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,expected", [ ( """ A B C D a 1 2 3 4 b 1 2 3 4 c 1 2 3 4 """, DataFrame( [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], columns=["A", "B", "C", "D"], index=["a", "b", "c"], ), ), ( " a b c\n1 2 3 \n4 5 6\n 7 8 9", DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]), ), ], ) def test_whitespace_regex_separator(all_parsers, data, expected): # see gh-6607 parser = all_parsers result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) def test_verbose_read(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 one,1,2,3 ,1,2,3 one,1,2,3 ,1,2,3 ,1,2,3 one,1,2,3 two,1,2,3""" # Engines are verbose in different ways. parser.read_csv(StringIO(data), verbose=True) captured = capsys.readouterr() if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 3 NA values in column a\n" def test_verbose_read2(all_parsers, capsys): parser = all_parsers data = """a,b,c,d one,1,2,3 two,1,2,3 three,1,2,3 four,1,2,3 five,1,2,3 ,1,2,3 seven,1,2,3 eight,1,2,3""" parser.read_csv(StringIO(data), verbose=True, index_col=0) captured = capsys.readouterr() # Engines are verbose in different ways. if parser.engine == "c": assert "Tokenization took:" in captured.out assert "Parser memory cleanup took:" in captured.out else: # Python engine assert captured.out == "Filled 1 NA values in column a\n" def test_iteration_open_handle(all_parsers): parser = all_parsers kwargs = dict(squeeze=True, header=None) with tm.ensure_clean() as path: with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") with open(path, "r") as f: for line in f: if "CCC" in line: break result = parser.read_csv(f, **kwargs) expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "data,thousands,decimal", [ ( """A|B|C 1|2,334.01|5 10|13|10. """, ",", ".", ), ( """A|B|C 1|2.334,01|5 10|13|10, """, ".", ",", ), ], ) def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal): parser = all_parsers expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) result = parser.read_csv( StringIO(data), sep="|", thousands=thousands, decimal=decimal ) tm.assert_frame_equal(result, expected) def test_euro_decimal_format(all_parsers): parser = all_parsers data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" result = parser.read_csv(StringIO(data), sep=";", decimal=",") expected = DataFrame( [ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], ], columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_inf_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,inf b,-inf c,+Inf d,-Inf e,INF f,-INF g,+INf h,-INf i,inF j,-inF""" expected = DataFrame( {"A": [float("inf"), float("-inf")] * 5}, index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("na_filter", [True, False]) def test_infinity_parsing(all_parsers, na_filter): parser = all_parsers data = """\ ,A a,Infinity b,-Infinity c,+Infinity """ expected = DataFrame( {"A": [float("infinity"), float("-infinity"), float("+infinity")]}, index=["a", "b", "c"], ) result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5]) def test_raise_on_no_columns(all_parsers, nrows): parser = all_parsers data = "\n" * nrows msg = "No columns to parse from file" with pytest.raises(EmptyDataError, match=msg): parser.read_csv(StringIO(data)) def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers expected = DataFrame( {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]} ) result = parser.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(result, expected) def test_null_byte_char(all_parsers): # see gh-2741 data = "\x00,foo" names = ["a", "b"] parser = all_parsers if parser.engine == "c": expected = DataFrame([[np.nan, "foo"]], columns=names) out = parser.read_csv(StringIO(data), names=names) tm.assert_frame_equal(out, expected) else: msg = "NULL byte detected" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), names=names) def test_temporary_file(all_parsers): # see gh-13398 parser = all_parsers data = "0 0" with tm.ensure_clean(mode="w+", return_filelike=True) as new_file: new_file.write(data) new_file.flush() new_file.seek(0) result = parser.read_csv(new_file, sep=r"\s+", header=None) expected = DataFrame([[0, 0]]) tm.assert_frame_equal(result, expected) def test_internal_eof_byte(all_parsers): # see gh-5500 parser = all_parsers data = "a,b\n1\x1a,2" expected = DataFrame([["1\x1a", 2]], columns=["a", "b"]) result = parser.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_internal_eof_byte_to_file(all_parsers): # see gh-16559 parser = all_parsers data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) path = f"__{tm.rands(10)}__.csv" with tm.ensure_clean(path) as path: with open(path, "wb") as f: f.write(data) result = parser.read_csv(path) tm.assert_frame_equal(result, expected) def test_sub_character(all_parsers, csv_dir_path): # see gh-16893 filename = os.path.join(csv_dir_path, "sub_char.csv") expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) parser = all_parsers result = parser.read_csv(filename) tm.assert_frame_equal(result, expected) def test_file_handle_string_io(all_parsers): # gh-14418 # # Don't close user provided file handles. parser = all_parsers data = "a,b\n1,2" fh = StringIO(data) parser.read_csv(fh) assert not fh.closed def test_file_handles_with_open(all_parsers, csv1): # gh-14418 # # Don't close user provided file handles. parser = all_parsers for mode in ["r", "rb"]: with open(csv1, mode) as f: parser.read_csv(f) assert not f.closed def test_invalid_file_buffer_class(all_parsers): # see gh-15337 class InvalidBuffer: pass parser = all_parsers msg = "Invalid file path or buffer object type" with pytest.raises(ValueError, match=msg): parser.read_csv(InvalidBuffer()) def test_invalid_file_buffer_mock(all_parsers): # see gh-15337 parser = all_parsers msg = "Invalid file path or buffer object type" class Foo: pass with pytest.raises(ValueError, match=msg): parser.read_csv(Foo()) def test_valid_file_buffer_seems_invalid(all_parsers): # gh-16135: we want to ensure that "tell" and "seek" # aren't actually being used when we call `read_csv` # # Thus, while the object may look "invalid" (these # methods are attributes of the `StringIO` class), # it is still a valid file-object for our purposes. class NoSeekTellBuffer(StringIO): def tell(self): raise AttributeError("No tell method") def seek(self, pos, whence=0): raise AttributeError("No seek method") data = "a\n1" parser = all_parsers expected = DataFrame({"a": [1]}) result = parser.read_csv(NoSeekTellBuffer(data)) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "kwargs", [dict(), dict(error_bad_lines=True)], # Default is True. # Explicitly pass in. ) @pytest.mark.parametrize( "warn_kwargs", [dict(), dict(warn_bad_lines=True), dict(warn_bad_lines=False)] ) def test_error_bad_lines(all_parsers, kwargs, warn_kwargs): # see gh-15925 parser = all_parsers kwargs.update(**warn_kwargs) data = "a\n1\n1,2,3\n4\n5,6,7" msg = "Expected 1 fields in line 3, saw 3" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) def test_warn_bad_lines(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert "Skipping line 3" in captured.err assert "Skipping line 5" in captured.err def test_suppress_error_output(all_parsers, capsys): # see gh-15925 parser = all_parsers data = "a\n1\n1,2,3\n4\n5,6,7" expected = DataFrame({"a": [1, 4]}) result = parser.read_csv( StringIO(data), error_bad_lines=False, warn_bad_lines=False ) tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert captured.err == "" @pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"]) def test_filename_with_special_chars(all_parsers, filename): # see gh-15086. parser = all_parsers df = DataFrame({"a": [1, 2, 3]}) with tm.ensure_clean(filename) as path: df.to_csv(path, index=False) result = parser.read_csv(path) tm.assert_frame_equal(result, df) def test_read_csv_memory_growth_chunksize(all_parsers): # see gh-24805 # # Let's just make sure that we don't crash # as we iteratively process all chunks. parser = all_parsers with tm.ensure_clean() as path: with open(path, "w") as f: for i in range(1000): f.write(str(i) + "\n") result = parser.read_csv(path, chunksize=20) for _ in result: pass def test_read_csv_raises_on_header_prefix(all_parsers): # gh-27394 parser = all_parsers msg = "Argument prefix must be None if argument header is not None" s = StringIO("0,1\n2,3") with pytest.raises(ValueError, match=msg): parser.read_csv(s, header=0, prefix="_X") def test_read_table_equivalency_to_read_csv(all_parsers): # see gh-21948 # As of 0.25.0, read_table is undeprecated parser = all_parsers data = "a\tb\n1\t2\n3\t4" expected = parser.read_csv(StringIO(data), sep="\t") result = parser.read_table(StringIO(data)) tm.assert_frame_equal(result, expected) def test_first_row_bom(all_parsers): # see gh-26545 parser = all_parsers data = '''\ufeff"Head1" "Head2" "Head3"''' result = parser.read_csv(StringIO(data), delimiter="\t") expected = DataFrame(columns=["Head1", "Head2", "Head3"]) tm.assert_frame_equal(result, expected) def test_integer_precision(all_parsers): # Gh 7072 s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765 5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389""" parser = all_parsers result = parser.read_csv(StringIO(s), header=None)[4] expected = Series([4321583677327450765, 4321113141090630389], name=4) tm.assert_series_equal(result, expected) def test_file_descriptor_leak(all_parsers): # GH 31488 parser = all_parsers with tm.ensure_clean() as path: def test(): with pytest.raises(EmptyDataError, match="No columns to parse from file"): parser.read_csv(path) td.check_file_leaks(test)() @pytest.mark.parametrize("nrows", range(1, 6)) def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): # GH 28071 ref = DataFrame( [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]], columns=list("ab"), ) csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4" parser = all_parsers df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False) tm.assert_frame_equal(df, ref[:nrows]) def test_no_header_two_extra_columns(all_parsers): # GH 26218 column_names = ["one", "two", "three"] ref = DataFrame([["foo", "bar", "baz"]], columns=column_names) stream = StringIO("foo,bar,baz,bam,blah") parser = all_parsers df = parser.read_csv(stream, header=None, names=column_names, index_col=False) tm.assert_frame_equal(df, ref)
TomAugspurger/pandas
pandas/tests/io/parser/test_common.py
pandas/core/arrays/_ranges.py
from __future__ import print_function from argparse import ArgumentParser import os from ruamel import yaml from binstar_client.utils import get_server_api from binstar_client.errors import NotFound from conda.version import VersionOrder __all__ = ['PackageCopier'] class PackageCopier(object): def __init__(self, source, destination, input_packages, token=''): """ Parameters ---------- source : ``str`` Name of source conda channel. destination : ``str`` Name of destination conda channel. input_package : ``dict`` Dictionary in which keys are package names and values are either a string version number (e.g. ``'1.0.1'``) or ``None``, which indicates the latest version on the source channel should be copied. This dictionary should contain the packages that potentially need to be copied. token : ``str``, optional Token for conda API. Needed for the actual copy operation. """ self.source = source self.destination = destination self.input_packages = input_packages self.api = get_server_api(token) self.to_copy = self._package_versions_to_copy() def _package_versions_to_copy(self): """ Determine which version of each package in packages should be copied from conda channel source to channel destination. Returns ------- ``dict`` Dictionary whose keys are the packages that actually need to be copied and whose values are the version to be copied. """ packages = self.input_packages copy_versions = {} for p, version in packages.items(): copy_builds = [] need_to_copy = False # This will end up True if the version exists on both src and dest # and triggers a comparison of file names. Technically, it could # be omitted, but seems more likely to be clear to future me. check_builds = False cf = self.api.package(self.source, p) cf_version = VersionOrder(cf['latest_version']) if version is not None: pinned_version = VersionOrder(version) else: pinned_version = None if pinned_version is not None: if str(pinned_version) not in cf['versions']: error_message = ('Version {} of package {} not ' 'found on source channel {}.') err = error_message.format(pinned_version, p, self.source) raise RuntimeError(err) try: ap = self.api.package(self.destination, p) except NotFound: need_to_copy = True ap_version = None else: ap_version = VersionOrder(ap['latest_version']) if pinned_version is None: if cf_version > ap_version: need_to_copy = True elif cf_version == ap_version: check_builds = True else: if str(pinned_version) not in ap['versions']: need_to_copy = True else: check_builds = True if check_builds: # If we get here it means that the same version is on both # source and destination so we need to check the individual # builds. check_version = pinned_version or cf_version copy_builds = \ self._check_for_missing_builds(cf, ap, check_version) need_to_copy = len(copy_builds) > 0 if need_to_copy: copy_versions[p] = (str(cf_version), copy_builds) return copy_versions def _check_for_missing_builds(self, source, dest, version): """ For two packages that have the same version, see if there are any files on the source that are not on the destination. source and dest are both conda channels, and version should be a string. """ def files_for_version(channel, version): files = [f['basename'] for f in channel['files'] if VersionOrder(version) == VersionOrder(f['version'])] return files source_files = files_for_version(source, version) destination_files = files_for_version(dest, version) need_to_copy = [src for src in source_files if src not in destination_files] return need_to_copy def copy_packages(self): """ Actually do the copying of the packages. """ for p, v in self.to_copy.items(): version, buildnames = v if not buildnames: # Copy all of the builds for this version self.api.copy(self.source, p, version, to_owner=self.destination) else: for build in buildnames: self.api.copy(self.source, p, version, basename=build, to_owner=self.destination) def main(arguments=None): parser = ArgumentParser('Simple script for copying packages ' 'from one conda owner to another') parser.add_argument('packages_yaml', help=('Packages to copy, as a yaml dictionary. ' 'Keys are package names, values are version, ' 'or None for the latest version from ' 'the source.')) parser.add_argument('--source', default='conda-forge', help='Source conda channel owner.') parser.add_argument('--token', default='', help=('anaconda.org API token. May set ' 'environmental variable BINSTAR_TOKEN ' 'instead.')) parser.add_argument('destination_channel', help=('Destination conda channel owner.')) if arguments is None: args = parser.parse_args() else: args = parser.parse_args(arguments) source = args.source dest = args.destination_channel package_file = args.packages_yaml token = args.token with open(package_file) as f: packages = yaml.load(f) # No token on command line, try the environment... if not token: token = os.getenv('BINSTAR_TOKEN') # Still no token, so raise an error if not token: raise RuntimeError('Set an anaconda.org API token before running') pc = PackageCopier(source, dest, packages, token=token) pc.copy_packages() if __name__ == '__main__': main()
import pytest from os import getenv from binstar_client.utils import get_server_api from binstar_client.errors import NotFound from ..copy_packages import PackageCopier SOURCE = 'conda-forge' DEST = 'astropy-channel-copy-test' # Destination channel contains only the packages: # wcsaxes # only versions 0.7 and 0.8, but not the latest on conda-forge, # which is 0.9. # sep # only version 0.5.2, copied from channel mwcraig, # which contains only that version. def test_package_not_on_source(): # Package does not exist on source channel # Expected outcome: NotFound packages = {'asudifjqeiroufnver': None} with pytest.raises(NotFound): PackageCopier(SOURCE, DEST, packages) # Whether or not version exists on destination channel: def test_version_not_in_source(): # Package version is pinned and... # ...pinned version is not in source channel # Expected outcome: RuntimeError and specific message packages = {'wcsaxes': '0.0.0'} with pytest.raises(RuntimeError): PackageCopier(SOURCE, DEST, packages) # Package version is pinned and... def test_version_pinned_not_in_destination(): # ...pinned version is not in destination channel # Expected outcome: copy packages = {'wcsaxes': '0.9'} pc = PackageCopier(SOURCE, DEST, packages) assert 'wcsaxes' in pc.to_copy def test_version_pinned_in_destination(): # ...pinned version is in destination channel # Expected outcome: No copy packages = {'wcsaxes': '0.8'} pc = PackageCopier(SOURCE, DEST, packages) assert 'wcsaxes' not in pc.to_copy # Package version is not pinned and... def test_version_not_pinned_not_in_destination(): # ...destination channel is not up to date # Expected outcome: copy packages = {'wcsaxes': None} pc = PackageCopier(SOURCE, DEST, packages) assert 'wcsaxes' in pc.to_copy def test_version_not_pinned_no_update_needed(): # ...destination is up to date # Expected outcome: no copy packages = {'sep': None} pc = PackageCopier('mwcraig', DEST, packages) assert 'sep' not in pc.to_copy token = getenv('COPY_TEST_BINSTAR_TOKEN') @pytest.mark.skipif(token is None, reason='binstar token not set') def test_package_copying(): api = get_server_api(token) packages = {'wcsaxes': None} pc = PackageCopier(SOURCE, DEST, packages, token=token) # Make sure v0.9 has not accidentally ended up in the channel. dest_wcs = api.package(DEST, 'wcsaxes') assert "0.9" not in dest_wcs['versions'] # Copy 0.9 to the channel. pc.copy_packages() # Make sure it is really there. dest_wcs = api.package(DEST, 'wcsaxes') assert "0.9" in dest_wcs['versions'] # Remove it... api.remove_release(DEST, 'wcsaxes', "0.9") # ...and make sure it is really gone. dest_wcs = api.package(DEST, 'wcsaxes') assert "0.9" not in dest_wcs['versions']
astropy/conda-build-tools
extruder/tests/test_channel_copy.py
extruder/copy_packages.py
from pandas.util._decorators import cache_readonly import pandas.util.testing as tm import pandas as pd _ts = tm.makeTimeSeries() class TestData(object): @cache_readonly def ts(self): ts = _ts.copy() ts.name = 'ts' return ts @cache_readonly def series(self): series = tm.makeStringSeries() series.name = 'series' return series @cache_readonly def objSeries(self): objSeries = tm.makeObjectSeries() objSeries.name = 'objects' return objSeries @cache_readonly def empty(self): return pd.Series([], index=[])
from datetime import timedelta import pytest import numpy as np import pandas as pd import pandas.util.testing as tm from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta class TestTimedeltaIndex(object): _multiprocess_can_split_ = True def test_insert(self): idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx') result = idx.insert(2, timedelta(days=5)) exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx') tm.assert_index_equal(result, exp) # insertion of non-datetime should coerce to object index result = idx.insert(1, 'inserted') expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'), Timedelta('2day')], name='idx') assert not isinstance(result, TimedeltaIndex) tm.assert_index_equal(result, expected) assert result.name == expected.name idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx') # preserve freq expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02', '1day 00:00:03'], name='idx', freq='s') expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02', '1day 00:00:03', '1day 00:00:04'], name='idx', freq='s') # reset freq to None expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01', '1day 00:00:02', '1day 00:00:03'], name='idx', freq=None) expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02', '1day 00:00:03', '1day 00:00:05'], name='idx', freq=None) cases = [(0, Timedelta('1day'), expected_0), (-3, Timedelta('1day'), expected_0), (3, Timedelta('1day 00:00:04'), expected_3), (1, Timedelta('1day 00:00:01'), expected_1_nofreq), (3, Timedelta('1day 00:00:05'), expected_3_nofreq)] for n, d, expected in cases: result = idx.insert(n, d) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq # GH 18295 (test missing) expected = TimedeltaIndex(['1day', pd.NaT, '2day', '3day']) for na in (np.nan, pd.NaT, None): result = timedelta_range('1day', '3day').insert(1, na) tm.assert_index_equal(result, expected) def test_delete(self): idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx') # prserve freq expected_0 = timedelta_range(start='2 Days', periods=4, freq='D', name='idx') expected_4 = timedelta_range(start='1 Days', periods=4, freq='D', name='idx') # reset freq to None expected_1 = TimedeltaIndex( ['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx') cases = {0: expected_0, -5: expected_0, -1: expected_4, 4: expected_4, 1: expected_1} for n, expected in compat.iteritems(cases): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq with pytest.raises((IndexError, ValueError)): # either depeidnig on numpy version result = idx.delete(5) def test_delete_slice(self): idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx') # prserve freq expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D', name='idx') expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D', name='idx') # reset freq to None expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d', '7 d', '8 d', '9 d', '10d'], freq=None, name='idx') cases = {(0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5} for n, expected in compat.iteritems(cases): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq result = idx.delete(slice(n[0], n[-1] + 1)) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq def test_getitem(self): idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx[0] assert result == Timedelta('1 day') result = idx[0:5] expected = timedelta_range('1 day', '5 day', freq='D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx[0:10:2] expected = timedelta_range('1 day', '9 day', freq='2D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx[-20:-5:3] expected = timedelta_range('12 day', '24 day', freq='3D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx[4::-1] expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'], freq='-1D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq def test_take(self): # GH 10295 idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx.take([0]) assert result == Timedelta('1 day') result = idx.take([-1]) assert result == Timedelta('31 day') result = idx.take([0, 1, 2]) expected = timedelta_range('1 day', '3 day', freq='D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx.take([0, 2, 4]) expected = timedelta_range('1 day', '5 day', freq='2D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx.take([7, 4, 1]) expected = timedelta_range('8 day', '2 day', freq='-3D', name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx.take([3, 2, 5]) expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') tm.assert_index_equal(result, expected) assert result.freq is None result = idx.take([-3, 2, 5]) expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx') tm.assert_index_equal(result, expected) assert result.freq is None def test_take_invalid_kwargs(self): idx = timedelta_range('1 day', '31 day', freq='D', name='idx') indices = [1, 6, 5, 9, 10, 13, 15, 3] msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assert_raises_regex(TypeError, msg, idx.take, indices, foo=2) msg = "the 'out' parameter is not supported" tm.assert_raises_regex(ValueError, msg, idx.take, indices, out=indices) msg = "the 'mode' parameter is not supported" tm.assert_raises_regex(ValueError, msg, idx.take, indices, mode='clip') # TODO: This method came from test_timedelta; de-dup with version above def test_take2(self): tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00'] idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx') expected = TimedeltaIndex(tds, freq=None, name='idx') taken1 = idx.take([2, 4, 10]) taken2 = idx[[2, 4, 10]] for taken in [taken1, taken2]: tm.assert_index_equal(taken, expected) assert isinstance(taken, TimedeltaIndex) assert taken.freq is None assert taken.name == expected.name def test_take_fill_value(self): # GH 12631 idx = TimedeltaIndex(['1 days', '2 days', '3 days'], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = TimedeltaIndex(['2 days', '1 days', '3 days'], name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = TimedeltaIndex(['2 days', '1 days', 'NaT'], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = TimedeltaIndex(['2 days', '1 days', '3 days'], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -2]), fill_value=True) with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_get_loc(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days']) for method in [None, 'pad', 'backfill', 'nearest']: assert idx.get_loc(idx[1], method) == 1 assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1 assert idx.get_loc(str(idx[1]), method) == 1 assert idx.get_loc(idx[1], 'pad', tolerance=Timedelta(0)) == 1 assert idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')) == 1 assert idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)) == 1 with tm.assert_raises_regex(ValueError, 'unit abbreviation w/o a number'): idx.get_loc(idx[1], method='nearest', tolerance='foo') with pytest.raises( ValueError, match='tolerance size must match'): idx.get_loc(idx[1], method='nearest', tolerance=[Timedelta(0).to_timedelta64(), Timedelta(0).to_timedelta64()]) for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc('1 day 1 hour', method) == loc # GH 16909 assert idx.get_loc(idx[1].to_timedelta64()) == 1 # GH 16896 assert idx.get_loc('0 days') == 0 def test_get_loc_nat(self): tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00']) assert tidx.get_loc(pd.NaT) == 1 assert tidx.get_loc(None) == 1 assert tidx.get_loc(float('nan')) == 1 assert tidx.get_loc(np.nan) == 1 def test_get_indexer(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days']) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)) target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.intp)) res = idx.get_indexer(target, 'nearest', tolerance=Timedelta('1 hour')) tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
winklerand/pandas
pandas/tests/indexes/timedeltas/test_indexing.py
pandas/tests/series/common.py
""" Support for Yamaha Receivers. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.yamaha/ """ import logging import voluptuous as vol from homeassistant.components.media_player import ( SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_STOP, SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, SUPPORT_PLAY, MEDIA_TYPE_MUSIC, MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON, STATE_PLAYING, STATE_IDLE) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['rxv==0.4.0'] _LOGGER = logging.getLogger(__name__) SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | SUPPORT_PLAY CONF_SOURCE_NAMES = 'source_names' CONF_SOURCE_IGNORE = 'source_ignore' CONF_ZONE_IGNORE = 'zone_ignore' DEFAULT_NAME = 'Yamaha Receiver' KNOWN = 'yamaha_known_receivers' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_SOURCE_IGNORE, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_ZONE_IGNORE, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string}, }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Yamaha platform.""" import rxv # keep track of configured receivers so that we don't end up # discovering a receiver dynamically that we have static config # for. if hass.data.get(KNOWN, None) is None: hass.data[KNOWN] = set() name = config.get(CONF_NAME) host = config.get(CONF_HOST) source_ignore = config.get(CONF_SOURCE_IGNORE) source_names = config.get(CONF_SOURCE_NAMES) zone_ignore = config.get(CONF_ZONE_IGNORE) if discovery_info is not None: name = discovery_info.get('name') model = discovery_info.get('model_name') ctrl_url = discovery_info.get('control_url') desc_url = discovery_info.get('description_url') if ctrl_url in hass.data[KNOWN]: _LOGGER.info("%s already manually configured", ctrl_url) return receivers = rxv.RXV( ctrl_url, model_name=model, friendly_name=name, unit_desc_url=desc_url).zone_controllers() _LOGGER.info("Receivers: %s", receivers) # when we are dynamically discovered config is empty zone_ignore = [] elif host is None: receivers = [] for recv in rxv.find(): receivers.extend(recv.zone_controllers()) else: ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host) receivers = rxv.RXV(ctrl_url, name).zone_controllers() for receiver in receivers: if receiver.zone not in zone_ignore: hass.data[KNOWN].add(receiver.ctrl_url) add_devices([ YamahaDevice(name, receiver, source_ignore, source_names)]) class YamahaDevice(MediaPlayerDevice): """Representation of a Yamaha device.""" def __init__(self, name, receiver, source_ignore, source_names): """Initialize the Yamaha Receiver.""" self._receiver = receiver self._muted = False self._volume = 0 self._pwstate = STATE_OFF self._current_source = None self._source_list = None self._source_ignore = source_ignore or [] self._source_names = source_names or {} self._reverse_mapping = None self._playback_support = None self._is_playback_supported = False self._play_status = None self.update() self._name = name self._zone = receiver.zone def update(self): """Get the latest details from the device.""" self._play_status = self._receiver.play_status() if self._receiver.on: if self._play_status is None: self._pwstate = STATE_ON elif self._play_status.playing: self._pwstate = STATE_PLAYING else: self._pwstate = STATE_IDLE else: self._pwstate = STATE_OFF self._muted = self._receiver.mute self._volume = (self._receiver.volume / 100) + 1 if self.source_list is None: self.build_source_list() current_source = self._receiver.input self._current_source = self._source_names.get( current_source, current_source) self._playback_support = self._receiver.get_playback_support() self._is_playback_supported = self._receiver.is_playback_supported( self._current_source) def build_source_list(self): """Build the source list.""" self._reverse_mapping = {alias: source for source, alias in self._source_names.items()} self._source_list = sorted( self._source_names.get(source, source) for source in self._receiver.inputs() if source not in self._source_ignore) @property def name(self): """Return the name of the device.""" name = self._name if self._zone != "Main_Zone": # Zone will be one of Main_Zone, Zone_2, Zone_3 name += " " + self._zone.replace('_', ' ') return name @property def state(self): """Return the state of the device.""" return self._pwstate @property def volume_level(self): """Volume level of the media player (0..1).""" return self._volume @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._muted @property def source(self): """Return the current input source.""" return self._current_source @property def source_list(self): """List of available input sources.""" return self._source_list @property def supported_features(self): """Flag media player features that are supported.""" supported_features = SUPPORT_YAMAHA supports = self._playback_support mapping = {'play': (SUPPORT_PLAY | SUPPORT_PLAY_MEDIA), 'pause': SUPPORT_PAUSE, 'stop': SUPPORT_STOP, 'skip_f': SUPPORT_NEXT_TRACK, 'skip_r': SUPPORT_PREVIOUS_TRACK} for attr, feature in mapping.items(): if getattr(supports, attr, False): supported_features |= feature return supported_features def turn_off(self): """Turn off media player.""" self._receiver.on = False def set_volume_level(self, volume): """Set volume level, range 0..1.""" receiver_vol = 100 - (volume * 100) negative_receiver_vol = -receiver_vol self._receiver.volume = negative_receiver_vol def mute_volume(self, mute): """Mute (true) or unmute (false) media player.""" self._receiver.mute = mute def turn_on(self): """Turn the media player on.""" self._receiver.on = True self._volume = (self._receiver.volume / 100) + 1 def media_play(self): """Send play commmand.""" self._call_playback_function(self._receiver.play, "play") def media_pause(self): """Send pause command.""" self._call_playback_function(self._receiver.pause, "pause") def media_stop(self): """Send stop command.""" self._call_playback_function(self._receiver.stop, "stop") def media_previous_track(self): """Send previous track command.""" self._call_playback_function(self._receiver.previous, "previous track") def media_next_track(self): """Send next track command.""" self._call_playback_function(self._receiver.next, "next track") def _call_playback_function(self, function, function_text): import rxv try: function() except rxv.exceptions.ResponseException: _LOGGER.warning( "Failed to execute %s on %s", function_text, self._name) def select_source(self, source): """Select input source.""" self._receiver.input = self._reverse_mapping.get(source, source) def play_media(self, media_type, media_id, **kwargs): """Play media from an ID. This exposes a pass through for various input sources in the Yamaha to direct play certain kinds of media. media_type is treated as the input type that we are setting, and media id is specific to it. """ if media_type == "NET RADIO": self._receiver.net_radio(media_id) @property def media_artist(self): """Artist of current playing media.""" if self._play_status is not None: return self._play_status.artist @property def media_album_name(self): """Album of current playing media.""" if self._play_status is not None: return self._play_status.album @property def media_content_type(self): """Content type of current playing media.""" # Loose assumption that if playback is supported, we are playing music if self._is_playback_supported: return MEDIA_TYPE_MUSIC return None @property def media_title(self): """Artist of current playing media.""" if self._play_status is not None: song = self._play_status.song station = self._play_status.station # If both song and station is available, print both, otherwise # just the one we have. if song and station: return '{}: {}'.format(station, song) else: return song or station
"""The tests for the TTS component.""" import ctypes import os import shutil from unittest.mock import patch, PropertyMock import pytest import requests import homeassistant.components.http as http import homeassistant.components.tts as tts from homeassistant.components.tts.demo import DemoProvider from homeassistant.components.media_player import ( SERVICE_PLAY_MEDIA, MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, DOMAIN as DOMAIN_MP) from homeassistant.setup import setup_component from tests.common import ( get_test_home_assistant, get_test_instance_port, assert_setup_component, mock_service) @pytest.fixture(autouse=True) def mutagen_mock(): """Mock writing tags.""" with patch('homeassistant.components.tts.SpeechManager.write_tags', side_effect=lambda *args: args[1]): yield class TestTTS(object): """Test the Google speech component.""" def setup_method(self): """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() self.demo_provider = DemoProvider('en') self.default_tts_cache = self.hass.config.path(tts.DEFAULT_CACHE_DIR) setup_component( self.hass, http.DOMAIN, {http.DOMAIN: {http.CONF_SERVER_PORT: get_test_instance_port()}}) def teardown_method(self): """Stop everything that was started.""" if os.path.isdir(self.default_tts_cache): shutil.rmtree(self.default_tts_cache) self.hass.stop() def test_setup_component_demo(self): """Setup the demo platform with defaults.""" config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) assert self.hass.services.has_service(tts.DOMAIN, 'demo_say') assert self.hass.services.has_service(tts.DOMAIN, 'clear_cache') @patch('os.mkdir', side_effect=OSError(2, "No access")) def test_setup_component_demo_no_access_cache_folder(self, mock_mkdir): """Setup the demo platform with defaults.""" config = { tts.DOMAIN: { 'platform': 'demo', } } assert not setup_component(self.hass, tts.DOMAIN, config) assert not self.hass.services.has_service(tts.DOMAIN, 'demo_say') assert not self.hass.services.has_service(tts.DOMAIN, 'clear_cache') def test_setup_component_and_test_service(self): """Setup the demo platform and call service.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find( "/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_en_-_demo.mp3") \ != -1 assert os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")) def test_setup_component_and_test_service_with_config_language(self): """Setup the demo platform and call service.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', 'language': 'de' } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find( "/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_de_-_demo.mp3") \ != -1 assert os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_de_-_demo.mp3")) def test_setup_component_and_test_service_with_wrong_conf_language(self): """Setup the demo platform and call service with wrong config.""" config = { tts.DOMAIN: { 'platform': 'demo', 'language': 'ru' } } with assert_setup_component(0, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) def test_setup_component_and_test_service_with_service_language(self): """Setup the demo platform and call service.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", tts.ATTR_LANGUAGE: "de", }) self.hass.block_till_done() assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find( "/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_de_-_demo.mp3") \ != -1 assert os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_de_-_demo.mp3")) def test_setup_component_test_service_with_wrong_service_language(self): """Setup the demo platform and call service.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", tts.ATTR_LANGUAGE: "lang", }) self.hass.block_till_done() assert len(calls) == 0 assert not os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_lang_-_demo.mp3")) def test_setup_component_and_test_service_with_service_options(self): """Setup the demo platform and call service with options.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", tts.ATTR_LANGUAGE: "de", tts.ATTR_OPTIONS: { 'voice': 'alex' } }) self.hass.block_till_done() opt_hash = ctypes.c_size_t(hash(frozenset({'voice': 'alex'}))).value assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find( "/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_de_{0}_demo.mp3".format(opt_hash)) \ != -1 assert os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_de_{0}_demo.mp3".format( opt_hash))) @patch('homeassistant.components.tts.demo.DemoProvider.default_options', new_callable=PropertyMock(return_value={'voice': 'alex'})) def test_setup_component_and_test_with_service_options_def(self, def_mock): """Setup the demo platform and call service with default options.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", tts.ATTR_LANGUAGE: "de", }) self.hass.block_till_done() opt_hash = ctypes.c_size_t(hash(frozenset({'voice': 'alex'}))).value assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find( "/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_de_{0}_demo.mp3".format(opt_hash)) \ != -1 assert os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_de_{0}_demo.mp3".format( opt_hash))) def test_setup_component_and_test_service_with_service_options_wrong(self): """Setup the demo platform and call service with wrong options.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", tts.ATTR_LANGUAGE: "de", tts.ATTR_OPTIONS: { 'speed': 1 } }) self.hass.block_till_done() opt_hash = ctypes.c_size_t(hash(frozenset({'speed': 1}))).value assert len(calls) == 0 assert not os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_de_{0}_demo.mp3".format( opt_hash))) def test_setup_component_and_test_service_clear_cache(self): """Setup the demo platform and call service clear cache.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 assert os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")) self.hass.services.call(tts.DOMAIN, tts.SERVICE_CLEAR_CACHE, {}) self.hass.block_till_done() assert not os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")) def test_setup_component_and_test_service_with_receive_voice(self): """Setup the demo platform and call service and receive voice.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.start() self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 req = requests.get(calls[0].data[ATTR_MEDIA_CONTENT_ID]) _, demo_data = self.demo_provider.get_tts_audio("bla", 'en') demo_data = tts.SpeechManager.write_tags( "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3", demo_data, self.demo_provider, "I person is on front of your door.", 'en', None) assert req.status_code == 200 assert req.content == demo_data def test_setup_component_and_test_service_with_receive_voice_german(self): """Setup the demo platform and call service and receive voice.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', 'language': 'de', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.start() self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 req = requests.get(calls[0].data[ATTR_MEDIA_CONTENT_ID]) _, demo_data = self.demo_provider.get_tts_audio("bla", "de") demo_data = tts.SpeechManager.write_tags( "265944c108cbb00b2a621be5930513e03a0bb2cd_de_-_demo.mp3", demo_data, self.demo_provider, "I person is on front of your door.", 'de', None) assert req.status_code == 200 assert req.content == demo_data def test_setup_component_and_web_view_wrong_file(self): """Setup the demo platform and receive wrong file from web.""" config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.start() url = ("{}/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_en_-_demo.mp3").format(self.hass.config.api.base_url) req = requests.get(url) assert req.status_code == 404 def test_setup_component_and_web_view_wrong_filename(self): """Setup the demo platform and receive wrong filename from web.""" config = { tts.DOMAIN: { 'platform': 'demo', } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.start() url = ("{}/api/tts_proxy/265944dsk32c1b2a621be5930510bb2cd" "_en_-_demo.mp3").format(self.hass.config.api.base_url) req = requests.get(url) assert req.status_code == 404 def test_setup_component_test_without_cache(self): """Setup demo platform without cache.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', 'cache': False, } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 assert not os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")) def test_setup_component_test_with_cache_call_service_without_cache(self): """Setup demo platform with cache and call service without cache.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo', 'cache': True, } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", tts.ATTR_CACHE: False, }) self.hass.block_till_done() assert len(calls) == 1 assert not os.path.isfile(os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3")) def test_setup_component_test_with_cache_dir(self): """Setup demo platform with cache and call service without cache.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) _, demo_data = self.demo_provider.get_tts_audio("bla", 'en') cache_file = os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3") os.mkdir(self.default_tts_cache) with open(cache_file, "wb") as voice_file: voice_file.write(demo_data) config = { tts.DOMAIN: { 'platform': 'demo', 'cache': True, } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) with patch('homeassistant.components.tts.demo.DemoProvider.' 'get_tts_audio', return_value=(None, None)): self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 1 assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find( "/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_en_-_demo.mp3") \ != -1 @patch('homeassistant.components.tts.demo.DemoProvider.get_tts_audio', return_value=(None, None)) def test_setup_component_test_with_error_on_get_tts(self, tts_mock): """Setup demo platform with wrong get_tts_audio.""" calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA) config = { tts.DOMAIN: { 'platform': 'demo' } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.services.call(tts.DOMAIN, 'demo_say', { tts.ATTR_MESSAGE: "I person is on front of your door.", }) self.hass.block_till_done() assert len(calls) == 0 def test_setup_component_load_cache_retrieve_without_mem_cache(self): """Setup component and load cache and get without mem cache.""" _, demo_data = self.demo_provider.get_tts_audio("bla", 'en') cache_file = os.path.join( self.default_tts_cache, "265944c108cbb00b2a621be5930513e03a0bb2cd_en_-_demo.mp3") os.mkdir(self.default_tts_cache) with open(cache_file, "wb") as voice_file: voice_file.write(demo_data) config = { tts.DOMAIN: { 'platform': 'demo', 'cache': True, } } with assert_setup_component(1, tts.DOMAIN): setup_component(self.hass, tts.DOMAIN, config) self.hass.start() url = ("{}/api/tts_proxy/265944c108cbb00b2a621be5930513e03a0bb2cd" "_en_-_demo.mp3").format(self.hass.config.api.base_url) req = requests.get(url) assert req.status_code == 200 assert req.content == demo_data
happyleavesaoc/home-assistant
tests/components/tts/test_init.py
homeassistant/components/media_player/yamaha.py
import hashlib import json import os import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base from sacred.dependencies import get_digest from sacred.serializer import restore Base = declarative_base() class Source(Base): __tablename__ = "source" @classmethod def get_or_create(cls, filename, md5sum, session, basedir): instance = ( session.query(cls).filter_by(filename=filename, md5sum=md5sum).first() ) if instance: return instance full_path = os.path.join(basedir, filename) md5sum_ = get_digest(full_path) assert md5sum_ == md5sum, "found md5 mismatch for {}: {} != {}".format( filename, md5sum, md5sum_ ) with open(full_path, "r") as f: return cls(filename=filename, md5sum=md5sum, content=f.read()) source_id = sa.Column(sa.Integer, primary_key=True) filename = sa.Column(sa.String(256)) md5sum = sa.Column(sa.String(32)) content = sa.Column(sa.Text) def to_json(self): return {"filename": self.filename, "md5sum": self.md5sum} class Repository(Base): __tablename__ = "repository" @classmethod def get_or_create(cls, url, commit, dirty, session): instance = ( session.query(cls).filter_by(url=url, commit=commit, dirty=dirty).first() ) if instance: return instance return cls(url=url, commit=commit, dirty=dirty) repository_id = sa.Column(sa.Integer, primary_key=True) url = sa.Column(sa.String(2048)) commit = sa.Column(sa.String(40)) dirty = sa.Column(sa.Boolean) def to_json(self): return {"url": self.url, "commit": self.commit, "dirty": self.dirty} class Dependency(Base): __tablename__ = "dependency" @classmethod def get_or_create(cls, dep, session): name, _, version = dep.partition("==") instance = session.query(cls).filter_by(name=name, version=version).first() if instance: return instance return cls(name=name, version=version) dependency_id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(32)) version = sa.Column(sa.String(16)) def to_json(self): return "{}=={}".format(self.name, self.version) class Artifact(Base): __tablename__ = "artifact" @classmethod def create(cls, name, filename): with open(filename, "rb") as f: return cls(filename=name, content=f.read()) artifact_id = sa.Column(sa.Integer, primary_key=True) filename = sa.Column(sa.String(64)) content = sa.Column(sa.LargeBinary) run_id = sa.Column(sa.String(24), sa.ForeignKey("run.run_id")) run = sa.orm.relationship("Run", backref=sa.orm.backref("artifacts")) def to_json(self): return {"_id": self.artifact_id, "filename": self.filename} class Resource(Base): __tablename__ = "resource" @classmethod def get_or_create(cls, filename, session): md5sum = get_digest(filename) instance = ( session.query(cls).filter_by(filename=filename, md5sum=md5sum).first() ) if instance: return instance with open(filename, "rb") as f: return cls(filename=filename, md5sum=md5sum, content=f.read()) resource_id = sa.Column(sa.Integer, primary_key=True) filename = sa.Column(sa.String(256)) md5sum = sa.Column(sa.String(32)) content = sa.Column(sa.LargeBinary) def to_json(self): return {"filename": self.filename, "md5sum": self.md5sum} class Host(Base): __tablename__ = "host" @classmethod def get_or_create(cls, host_info, session): h = dict( hostname=host_info["hostname"], cpu=host_info["cpu"], os=host_info["os"][0], os_info=host_info["os"][1], python_version=host_info["python_version"], ) return session.query(cls).filter_by(**h).first() or cls(**h) host_id = sa.Column(sa.Integer, primary_key=True) cpu = sa.Column(sa.String(64)) hostname = sa.Column(sa.String(64)) os = sa.Column(sa.String(16)) os_info = sa.Column(sa.String(64)) python_version = sa.Column(sa.String(16)) def to_json(self): return { "cpu": self.cpu, "hostname": self.hostname, "os": [self.os, self.os_info], "python_version": self.python_version, } experiment_source_association = sa.Table( "experiments_sources", Base.metadata, sa.Column("experiment_id", sa.Integer, sa.ForeignKey("experiment.experiment_id")), sa.Column("source_id", sa.Integer, sa.ForeignKey("source.source_id")), ) experiment_repository_association = sa.Table( "experiments_repositories", Base.metadata, sa.Column("experiment_id", sa.Integer, sa.ForeignKey("experiment.experiment_id")), sa.Column("repository_id", sa.Integer, sa.ForeignKey("repository.repository_id")), ) experiment_dependency_association = sa.Table( "experiments_dependencies", Base.metadata, sa.Column("experiment_id", sa.Integer, sa.ForeignKey("experiment.experiment_id")), sa.Column("dependency_id", sa.Integer, sa.ForeignKey("dependency.dependency_id")), ) class Experiment(Base): __tablename__ = "experiment" @classmethod def get_or_create(cls, ex_info, session): name = ex_info["name"] # Compute a MD5sum of the ex_info to determine its uniqueness h = hashlib.md5() h.update(json.dumps(ex_info).encode()) md5 = h.hexdigest() instance = session.query(cls).filter_by(name=name, md5sum=md5).first() if instance: return instance dependencies = [ Dependency.get_or_create(d, session) for d in ex_info["dependencies"] ] sources = [ Source.get_or_create(s, md5sum, session, ex_info["base_dir"]) for s, md5sum in ex_info["sources"] ] repositories = set() for r in ex_info["repositories"]: repository = Repository.get_or_create( r["url"], r["commit"], r["dirty"], session ) session.add(repository) repositories.add(repository) repositories = list(repositories) return cls( name=name, dependencies=dependencies, sources=sources, repositories=repositories, md5sum=md5, base_dir=ex_info["base_dir"], ) experiment_id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(32)) md5sum = sa.Column(sa.String(32)) base_dir = sa.Column(sa.String(64)) sources = sa.orm.relationship( "Source", secondary=experiment_source_association, backref="experiments" ) repositories = sa.orm.relationship( "Repository", secondary=experiment_repository_association, backref="experiments" ) dependencies = sa.orm.relationship( "Dependency", secondary=experiment_dependency_association, backref="experiments" ) def to_json(self): return { "name": self.name, "base_dir": self.base_dir, "sources": [s.to_json() for s in self.sources], "repositories": [r.to_json() for r in self.repositories], "dependencies": [d.to_json() for d in self.dependencies], } run_resource_association = sa.Table( "runs_resources", Base.metadata, sa.Column("run_id", sa.String(24), sa.ForeignKey("run.run_id")), sa.Column("resource_id", sa.Integer, sa.ForeignKey("resource.resource_id")), ) class Run(Base): __tablename__ = "run" id = sa.Column(sa.Integer, primary_key=True) run_id = sa.Column(sa.String(24), unique=True) command = sa.Column(sa.String(64)) # times start_time = sa.Column(sa.DateTime) heartbeat = sa.Column(sa.DateTime) stop_time = sa.Column(sa.DateTime) queue_time = sa.Column(sa.DateTime) # meta info priority = sa.Column(sa.Float) comment = sa.Column(sa.Text) fail_trace = sa.Column(sa.Text) # Captured out # TODO: move to separate table? captured_out = sa.Column(sa.Text) # Configuration & info # TODO: switch type to json if possible config = sa.Column(sa.Text) info = sa.Column(sa.Text) status = sa.Column( sa.Enum( "RUNNING", "COMPLETED", "INTERRUPTED", "TIMEOUT", "FAILED", name="status_enum", ) ) host_id = sa.Column(sa.Integer, sa.ForeignKey("host.host_id")) host = sa.orm.relationship("Host", backref=sa.orm.backref("runs")) experiment_id = sa.Column(sa.Integer, sa.ForeignKey("experiment.experiment_id")) experiment = sa.orm.relationship("Experiment", backref=sa.orm.backref("runs")) # artifacts = backref resources = sa.orm.relationship( "Resource", secondary=run_resource_association, backref="runs" ) result = sa.Column(sa.Float) def to_json(self): return { "_id": self.run_id, "command": self.command, "start_time": self.start_time, "heartbeat": self.heartbeat, "stop_time": self.stop_time, "queue_time": self.queue_time, "status": self.status, "result": self.result, "meta": {"comment": self.comment, "priority": self.priority}, "resources": [r.to_json() for r in self.resources], "artifacts": [a.to_json() for a in self.artifacts], "host": self.host.to_json(), "experiment": self.experiment.to_json(), "config": restore(json.loads(self.config)), "captured_out": self.captured_out, "fail_trace": self.fail_trace, }
#!/usr/bin/env python # coding=utf-8 from sacred import Ingredient """Global Docstring""" from mock import patch import pytest import sys from sacred import cli_option from sacred import host_info_gatherer from sacred.experiment import Experiment from sacred.utils import apply_backspaces_and_linefeeds, ConfigAddedError, SacredError @pytest.fixture def ex(): return Experiment("ator3000") def test_main(ex): @ex.main def foo(): pass assert "foo" in ex.commands assert ex.commands["foo"] == foo assert ex.default_command == "foo" def test_automain_imported(ex): main_called = [False] with patch.object(sys, "argv", ["test.py"]): @ex.automain def foo(): main_called[0] = True assert "foo" in ex.commands assert ex.commands["foo"] == foo assert ex.default_command == "foo" assert main_called[0] is False def test_automain_script_runs_main(ex): global __name__ oldname = __name__ main_called = [False] try: __name__ = "__main__" with patch.object(sys, "argv", ["test.py"]): @ex.automain def foo(): main_called[0] = True assert "foo" in ex.commands assert ex.commands["foo"] == foo assert ex.default_command == "foo" assert main_called[0] is True finally: __name__ = oldname def test_fails_on_unused_config_updates(ex): @ex.config def cfg(): a = 1 c = 3 @ex.main def foo(a, b=2): return a + b # normal config updates work assert ex.run(config_updates={"a": 3}).result == 5 # not in config but used works assert ex.run(config_updates={"b": 8}).result == 9 # unused but in config updates work assert ex.run(config_updates={"c": 9}).result == 3 # unused config updates raise with pytest.raises(ConfigAddedError): ex.run(config_updates={"d": 3}) def test_fails_on_nested_unused_config_updates(ex): @ex.config def cfg(): a = {"b": 1} d = {"e": 3} @ex.main def foo(a): return a["b"] # normal config updates work assert ex.run(config_updates={"a": {"b": 2}}).result == 2 # not in config but parent is works assert ex.run(config_updates={"a": {"c": 5}}).result == 1 # unused but in config works assert ex.run(config_updates={"d": {"e": 7}}).result == 1 # unused nested config updates raise with pytest.raises(ConfigAddedError): ex.run(config_updates={"d": {"f": 3}}) def test_considers_captured_functions_for_fail_on_unused_config(ex): @ex.config def cfg(): a = 1 @ex.capture def transmogrify(a, b=0): return a + b @ex.main def foo(): return transmogrify() assert ex.run(config_updates={"a": 7}).result == 7 assert ex.run(config_updates={"b": 3}).result == 4 with pytest.raises(ConfigAddedError): ex.run(config_updates={"c": 3}) def test_considers_prefix_for_fail_on_unused_config(ex): @ex.config def cfg(): a = {"b": 1} @ex.capture(prefix="a") def transmogrify(b): return b @ex.main def foo(): return transmogrify() assert ex.run(config_updates={"a": {"b": 3}}).result == 3 with pytest.raises(ConfigAddedError): ex.run(config_updates={"b": 5}) with pytest.raises(ConfigAddedError): ex.run(config_updates={"a": {"c": 5}}) def test_non_existing_prefix_is_treated_as_empty_dict(ex): @ex.capture(prefix="nonexisting") def transmogrify(b=10): return b @ex.main def foo(): return transmogrify() assert ex.run().result == 10 def test_using_a_named_config(ex): @ex.config def cfg(): a = 1 @ex.named_config def ncfg_first(): a = 10 @ex.named_config def ncfg_second(a): a = a * 2 @ex.main def run(a): return a assert ex.run().result == 1 assert ex.run(named_configs=["ncfg_first"]).result == 10 assert ex.run(named_configs=["ncfg_first", "ncfg_second"]).result == 20 with pytest.raises(KeyError, match=r".*not in preset for ConfigScope"): ex.run(named_configs=["ncfg_second", "ncfg_first"]) def test_empty_dict_named_config(ex): @ex.named_config def ncfg(): empty_dict = {} nested_empty_dict = {"k1": {"k2": {}}} @ex.automain def main(empty_dict=1, nested_empty_dict=2): return empty_dict, nested_empty_dict assert ex.run().result == (1, 2) assert ex.run(named_configs=["ncfg"]).result == ({}, {"k1": {"k2": {}}}) def test_empty_dict_config_updates(ex): @ex.config def cfg(): a = 1 @ex.config def default(): a = {"b": 1} @ex.main def main(): pass r = ex.run() assert r.config["a"]["b"] == 1 def test_named_config_and_ingredient(): ing = Ingredient("foo") @ing.config def cfg(): a = 10 ex = Experiment(ingredients=[ing]) @ex.config def default(): b = 20 @ex.named_config def named(): b = 30 @ex.main def main(): pass r = ex.run(named_configs=["named"]) assert r.config["b"] == 30 assert r.config["foo"] == {"a": 10} def test_captured_out_filter(ex, capsys): @ex.main def run_print_mock_progress(): sys.stdout.write("progress 0") sys.stdout.flush() for i in range(10): sys.stdout.write("\b") sys.stdout.write("{}".format(i)) sys.stdout.flush() ex.captured_out_filter = apply_backspaces_and_linefeeds # disable logging and set capture mode to python options = {"--loglevel": "CRITICAL", "--capture": "sys"} with capsys.disabled(): assert ex.run(options=options).captured_out == "progress 9" def test_adding_option_hooks(ex): @ex.option_hook def hook(options): pass @ex.option_hook def hook2(options): pass assert hook in ex.option_hooks assert hook2 in ex.option_hooks def test_option_hooks_without_options_arg_raises(ex): with pytest.raises(KeyError): @ex.option_hook def invalid_hook(wrong_arg_name): pass def test_config_hook_updates_config(ex): @ex.config def cfg(): a = "hello" @ex.config_hook def hook(config, command_name, logger): config.update({"a": "me"}) return config @ex.main def foo(): pass r = ex.run() assert r.config["a"] == "me" def test_info_kwarg_updates_info(ex): """Tests that the info kwarg of Experiment.create_run is used to update Run.info""" @ex.automain def foo(): pass run = ex.run(info={"bar": "baz"}) assert "bar" in run.info def test_info_kwargs_default_behavior(ex): """Tests the default behavior of Experiment.create_run when the info kwarg is not specified.""" @ex.automain def foo(_run): _run.info["bar"] = "baz" run = ex.run() assert "bar" in run.info def test_fails_on_config_write(ex): @ex.config def cfg(): a = "hello" nested_dict = {"dict": {"dict": 1234, "list": [1, 2, 3, 4]}} nested_list = [{"a": 42}, (1, 2, 3, 4), [1, 2, 3, 4]] nested_tuple = ({"a": 42}, (1, 2, 3, 4), [1, 2, 3, 4]) @ex.main def main(_config, nested_dict, nested_list, nested_tuple): raises_list = pytest.raises( SacredError, match="The configuration is read-only in a captured function!" ) raises_dict = pytest.raises( SacredError, match="The configuration is read-only in a captured function!" ) print("in main") # Test for ReadOnlyDict with raises_dict: _config["a"] = "world!" with raises_dict: nested_dict["dict"] = "world!" with raises_dict: nested_dict["list"] = "world!" with raises_dict: nested_dict.clear() with raises_dict: nested_dict.update({"a": "world"}) # Test ReadOnlyList with raises_list: nested_dict["dict"]["list"][0] = 1 with raises_list: nested_list[0] = "world!" with raises_list: nested_dict.clear() # Test nested tuple with raises_dict: nested_tuple[0]["a"] = "world!" with raises_list: nested_tuple[2][0] = 123 ex.run() def test_add_config_dict_chain(ex): @ex.config def config1(): """This is my demo configuration""" dictnest_cap = {"key_1": "value_1", "key_2": "value_2"} @ex.config def config2(): """This is my demo configuration""" dictnest_cap = {"key_2": "update_value_2", "key_3": "value3", "key_4": "value4"} adict = {"dictnest_dict": {"key_1": "value_1", "key_2": "value_2"}} ex.add_config(adict) bdict = { "dictnest_dict": { "key_2": "update_value_2", "key_3": "value3", "key_4": "value4", } } ex.add_config(bdict) @ex.automain def run(): pass final_config = ex.run().config assert final_config["dictnest_cap"] == { "key_1": "value_1", "key_2": "update_value_2", "key_3": "value3", "key_4": "value4", } assert final_config["dictnest_cap"] == final_config["dictnest_dict"] def test_additional_gatherers(): @host_info_gatherer("hello") def get_hello(): return "hello world" experiment = Experiment("ator3000", additional_host_info=[get_hello]) @experiment.main def foo(): pass experiment.run() assert experiment.current_run.host_info["hello"] == "hello world" @pytest.mark.parametrize("command_line_option", ["-w", "--warning"]) def test_additional_cli_options_flag(command_line_option): executed = [False] @cli_option("-w", "--warning", is_flag=True) def dummy_option(args, run): executed[0] = True experiment = Experiment("ator3000", additional_cli_options=[dummy_option]) @experiment.main def foo(): pass experiment.run_commandline([__file__, command_line_option]) assert executed[0] @pytest.mark.parametrize("command_line_option", ["-w", "--warning"]) def test_additional_cli_options(command_line_option): executed = [False] @cli_option("-w", "--warning") def dummy_option(args, run): executed[0] = args experiment = Experiment("ator3000", additional_cli_options=[dummy_option]) @experiment.main def foo(): pass experiment.run_commandline([__file__, command_line_option, "10"]) assert executed[0] == "10"
IDSIA/sacred
tests/test_experiment.py
sacred/observers/sql_bases.py
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.15 (https://github.com/warner/python-versioneer) import errno import os import re import subprocess import sys from pandas.compat import PY3 def get_keywords(): # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" keywords = {"refnames": git_refnames, "full": git_full} return keywords class VersioneerConfig(object): pass def get_config(): # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "v" cfg.parentdir_prefix = "pandas-" cfg.versionfile_source = "pandas/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): pass LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run {dispcmd}".format(dispcmd=dispcmd)) print(e) return None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() if PY3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd)) return None return stdout def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '{root}', but '{dirname}' " "doesn't start with prefix '{parentdir_prefix}'".format( root=root, dirname=dirname, parentdir_prefix=parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '{}', no digits".format(",".join(refs - tags))) if verbose: print("likely tags: {}".format(",".join(sorted(tags)))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking {r}".format(r=r)) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in {root}".format(root=root)) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: " "'{describe_out}'".format( describe_out=describe_out)) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '{full_tag}' doesn't start with prefix " \ "'{tag_prefix}'" print(fmt.format(full_tag=full_tag, tag_prefix=tag_prefix)) pieces["error"] = ("tag '{full_tag}' doesn't start with " "prefix '{tag_prefix}'".format( full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.{:d}.g{}".format(pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post{:d}".format(pieces["distance"]) if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g{}".format(pieces["short"]) else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g{}".format(pieces["short"]) return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '{style}'".format(style=style)) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"}
# -*- coding: utf-8 -*- """ test .agg behavior / note that .apply is tested generally in test_groupby.py """ import pytest import numpy as np import pandas as pd from pandas import concat, DataFrame, Index, MultiIndex, Series from pandas.core.groupby.grouper import Grouping from pandas.core.base import SpecificationError from pandas.compat import OrderedDict import pandas.util.testing as tm def test_agg_regression1(tsframe): grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_agg_must_agg(df): grouped = df.groupby('A')['C'] msg = "Must produce aggregated value" with pytest.raises(Exception, match=msg): grouped.agg(lambda x: x.describe()) with pytest.raises(Exception, match=msg): grouped.agg(lambda x: x.index[:2]) def test_agg_ser_multi_key(df): # TODO(wesm): unused ser = df.C # noqa f = lambda x: x.sum() results = df.C.groupby([df.A, df.B]).aggregate(f) expected = df.groupby(['A', 'B']).sum()['C'] tm.assert_series_equal(results, expected) def test_groupby_aggregation_mixed_dtype(): # GH 6212 expected = DataFrame({ 'v1': [5, 5, 7, np.nan, 3, 3, 4, 1], 'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]}, index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99), ('big', 'damp'), ('blue', 'dry'), ('red', 'red'), ('red', 'wet')], names=['by1', 'by2'])) df = DataFrame({ 'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9], 'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99], 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12], 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99, np.nan, np.nan] }) g = df.groupby(['by1', 'by2']) result = g[['v1', 'v2']].mean() tm.assert_frame_equal(result, expected) def test_agg_apply_corner(ts, tsframe): # nothing to group, all NA grouped = ts.groupby(ts * np.nan) assert ts.dtype == np.float64 # groupby float64 values results in Float64Index exp = Series([], dtype=np.float64, index=pd.Index([], dtype=np.float64)) tm.assert_series_equal(grouped.sum(), exp) tm.assert_series_equal(grouped.agg(np.sum), exp) tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False) # DataFrame grouped = tsframe.groupby(tsframe['A'] * np.nan) exp_df = DataFrame(columns=tsframe.columns, dtype=float, index=pd.Index([], dtype=np.float64)) tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False) tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False) def test_agg_grouping_is_list_tuple(ts): df = tm.makeTimeDataFrame() grouped = df.groupby(lambda x: x.year) grouper = grouped.grouper.groupings[0].grouper grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper)) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper)) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_agg_python_multiindex(mframe): grouped = mframe.groupby(['A', 'B']) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('groupbyfunc', [ lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()], ]) def test_aggregate_str_func(tsframe, groupbyfunc): grouped = tsframe.groupby(groupbyfunc) # single series result = grouped['A'].agg('std') expected = grouped['A'].std() tm.assert_series_equal(result, expected) # group frame by function name result = grouped.aggregate('var') expected = grouped.var() tm.assert_frame_equal(result, expected) # group frame by function dict result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'], ['C', 'mean'], ['D', 'sem']])) expected = DataFrame(OrderedDict([['A', grouped['A'].var()], ['B', grouped['B'].std()], ['C', grouped['C'].mean()], ['D', grouped['D'].sem()]])) tm.assert_frame_equal(result, expected) def test_aggregate_item_by_item(df): grouped = df.groupby('A') aggfun = lambda ser: ser.size result = grouped.agg(aggfun) foo = (df.A == 'foo').sum() bar = (df.A == 'bar').sum() K = len(result.columns) # GH5782 # odd comparisons can result here, so cast to make easy exp = pd.Series(np.array([foo] * K), index=list('BCD'), dtype=np.float64, name='foo') tm.assert_series_equal(result.xs('foo'), exp) exp = pd.Series(np.array([bar] * K), index=list('BCD'), dtype=np.float64, name='bar') tm.assert_almost_equal(result.xs('bar'), exp) def aggfun(ser): return ser.size result = DataFrame().groupby(df.A).agg(aggfun) assert isinstance(result, DataFrame) assert len(result) == 0 def test_wrap_agg_out(three_group): grouped = three_group.groupby(['A', 'B']) def func(ser): if ser.dtype == np.object: raise TypeError else: return ser.sum() result = grouped.aggregate(func) exp_grouped = three_group.loc[:, three_group.columns != 'C'] expected = exp_grouped.groupby(['A', 'B']).aggregate(func) tm.assert_frame_equal(result, expected) def test_agg_multiple_functions_maintain_order(df): # GH #610 funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)] result = df.groupby('A')['C'].agg(funcs) exp_cols = Index(['mean', 'max', 'min']) tm.assert_index_equal(result.columns, exp_cols) def test_multiple_functions_tuples_and_non_tuples(df): # #1359 funcs = [('foo', 'mean'), 'std'] ex_funcs = [('foo', 'mean'), ('std', 'std')] result = df.groupby('A')['C'].agg(funcs) expected = df.groupby('A')['C'].agg(ex_funcs) tm.assert_frame_equal(result, expected) result = df.groupby('A').agg(funcs) expected = df.groupby('A').agg(ex_funcs) tm.assert_frame_equal(result, expected) def test_agg_multiple_functions_too_many_lambdas(df): grouped = df.groupby('A') funcs = ['mean', lambda x: x.mean(), lambda x: x.std()] msg = 'Function names must be unique, found multiple named <lambda>' with pytest.raises(SpecificationError, match=msg): grouped.agg(funcs) def test_more_flexible_frame_multi_function(df): grouped = df.groupby('A') exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]])) exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]])) expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1) expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]]) result = grouped.aggregate(d) tm.assert_frame_equal(result, expected) # be careful result = grouped.aggregate(OrderedDict([['C', np.mean], ['D', [np.mean, np.std]]])) expected = grouped.aggregate(OrderedDict([['C', np.mean], ['D', [np.mean, np.std]]])) tm.assert_frame_equal(result, expected) def foo(x): return np.mean(x) def bar(x): return np.std(x, ddof=1) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): d = OrderedDict([['C', np.mean], ['D', OrderedDict([['foo', np.mean], ['bar', np.std]])]]) result = grouped.aggregate(d) d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) expected = grouped.aggregate(d) tm.assert_frame_equal(result, expected) def test_multi_function_flexible_mix(df): # GH #1268 grouped = df.groupby('A') # Expected d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], ['D', {'sum': 'sum'}]]) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): expected = grouped.aggregate(d) # Test 1 d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], ['D', 'sum']]) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = grouped.aggregate(d) tm.assert_frame_equal(result, expected) # Test 2 d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], ['D', ['sum']]]) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = grouped.aggregate(d) tm.assert_frame_equal(result, expected)
dsm054/pandas
pandas/tests/groupby/aggregate/test_aggregate.py
pandas/_version.py
""" Read SAS7BDAT files Based on code written by Jared Hobbs: https://bitbucket.org/jaredhobbs/sas7bdat See also: https://github.com/BioStatMatt/sas7bdat Partial documentation of the file format: https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ from datetime import datetime import struct import numpy as np from pandas.errors import EmptyDataError import pandas as pd from pandas import compat from pandas.io.common import BaseIterator, get_filepath_or_buffer from pandas.io.sas._sas import Parser import pandas.io.sas.sas_constants as const class _subheader_pointer(object): pass class _column(object): pass # SAS7BDAT represents a SAS data file in SAS7BDAT format. class SAS7BDATReader(BaseIterator): """ Read SAS files in SAS7BDAT format. Parameters ---------- path_or_buf : path name or buffer Name of SAS file or file-like object pointing to SAS file contents. index : column identifier, defaults to None Column to use as index. convert_dates : boolean, defaults to True Attempt to convert dates to Pandas datetime values. Note that some rarely used SAS date formats may be unsupported. blank_missing : boolean, defaults to True Convert empty strings to missing values (SAS uses blanks to indicate missing character variables). chunksize : int, defaults to None Return SAS7BDATReader object for iterations, returns chunks with given number of lines. encoding : string, defaults to None String encoding. convert_text : bool, defaults to True If False, text variables are left as raw bytes. convert_header_text : bool, defaults to True If False, header text, including column names, are left as raw bytes. """ def __init__(self, path_or_buf, index=None, convert_dates=True, blank_missing=True, chunksize=None, encoding=None, convert_text=True, convert_header_text=True): self.index = index self.convert_dates = convert_dates self.blank_missing = blank_missing self.chunksize = chunksize self.encoding = encoding self.convert_text = convert_text self.convert_header_text = convert_header_text self.default_encoding = "latin-1" self.compression = "" self.column_names_strings = [] self.column_names = [] self.column_formats = [] self.columns = [] self._current_page_data_subheader_pointers = [] self._cached_page = None self._column_data_lengths = [] self._column_data_offsets = [] self._column_types = [] self._current_row_in_file_index = 0 self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf) if isinstance(self._path_or_buf, compat.string_types): self._path_or_buf = open(self._path_or_buf, 'rb') self.handle = self._path_or_buf self._get_properties() self._parse_metadata() def column_data_lengths(self): """Return a numpy int64 array of the column data lengths""" return np.asarray(self._column_data_lengths, dtype=np.int64) def column_data_offsets(self): """Return a numpy int64 array of the column offsets""" return np.asarray(self._column_data_offsets, dtype=np.int64) def column_types(self): """Returns a numpy character array of the column types: s (string) or d (double)""" return np.asarray(self._column_types, dtype=np.dtype('S1')) def close(self): try: self.handle.close() except AttributeError: pass def _get_properties(self): # Check magic number self._path_or_buf.seek(0) self._cached_page = self._path_or_buf.read(288) if self._cached_page[0:len(const.magic)] != const.magic: self.close() raise ValueError("magic number mismatch (not a SAS file?)") # Get alignment information align1, align2 = 0, 0 buf = self._read_bytes(const.align_1_offset, const.align_1_length) if buf == const.u64_byte_checker_value: align2 = const.align_2_value self.U64 = True self._int_length = 8 self._page_bit_offset = const.page_bit_offset_x64 self._subheader_pointer_length = const.subheader_pointer_length_x64 else: self.U64 = False self._page_bit_offset = const.page_bit_offset_x86 self._subheader_pointer_length = const.subheader_pointer_length_x86 self._int_length = 4 buf = self._read_bytes(const.align_2_offset, const.align_2_length) if buf == const.align_1_checker_value: align1 = const.align_2_value total_align = align1 + align2 # Get endianness information buf = self._read_bytes(const.endianness_offset, const.endianness_length) if buf == b'\x01': self.byte_order = "<" else: self.byte_order = ">" # Get encoding information buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] if buf in const.encoding_names: self.file_encoding = const.encoding_names[buf] else: self.file_encoding = "unknown (code=%s)" % str(buf) # Get platform information buf = self._read_bytes(const.platform_offset, const.platform_length) if buf == b'1': self.platform = "unix" elif buf == b'2': self.platform = "windows" else: self.platform = "unknown" buf = self._read_bytes(const.dataset_offset, const.dataset_length) self.name = buf.rstrip(b'\x00 ') if self.convert_header_text: self.name = self.name.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.file_type_offset, const.file_type_length) self.file_type = buf.rstrip(b'\x00 ') if self.convert_header_text: self.file_type = self.file_type.decode( self.encoding or self.default_encoding) # Timestamp is epoch 01/01/1960 epoch = datetime(1960, 1, 1) x = self._read_float(const.date_created_offset + align1, const.date_created_length) self.date_created = epoch + pd.to_timedelta(x, unit='s') x = self._read_float(const.date_modified_offset + align1, const.date_modified_length) self.date_modified = epoch + pd.to_timedelta(x, unit='s') self.header_length = self._read_int(const.header_size_offset + align1, const.header_size_length) # Read the rest of the header into cached_page. buf = self._path_or_buf.read(self.header_length - 288) self._cached_page += buf if len(self._cached_page) != self.header_length: self.close() raise ValueError("The SAS7BDAT file appears to be truncated.") self._page_length = self._read_int(const.page_size_offset + align1, const.page_size_length) self._page_count = self._read_int(const.page_count_offset + align1, const.page_count_length) buf = self._read_bytes(const.sas_release_offset + total_align, const.sas_release_length) self.sas_release = buf.rstrip(b'\x00 ') if self.convert_header_text: self.sas_release = self.sas_release.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.sas_server_type_offset + total_align, const.sas_server_type_length) self.server_type = buf.rstrip(b'\x00 ') if self.convert_header_text: self.server_type = self.server_type.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.os_version_number_offset + total_align, const.os_version_number_length) self.os_version = buf.rstrip(b'\x00 ') if self.convert_header_text: self.os_version = self.os_version.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.os_name_offset + total_align, const.os_name_length) buf = buf.rstrip(b'\x00 ') if len(buf) > 0: self.os_name = buf.decode(self.encoding or self.default_encoding) else: buf = self._read_bytes(const.os_maker_offset + total_align, const.os_maker_length) self.os_name = buf.rstrip(b'\x00 ') if self.convert_header_text: self.os_name = self.os_name.decode( self.encoding or self.default_encoding) def __next__(self): da = self.read(nrows=self.chunksize or 1) if da is None: raise StopIteration return da # Read a single float of the given width (4 or 8). def _read_float(self, offset, width): if width not in (4, 8): self.close() raise ValueError("invalid float width") buf = self._read_bytes(offset, width) fd = "f" if width == 4 else "d" return struct.unpack(self.byte_order + fd, buf)[0] # Read a single signed integer of the given width (1, 2, 4 or 8). def _read_int(self, offset, width): if width not in (1, 2, 4, 8): self.close() raise ValueError("invalid int width") buf = self._read_bytes(offset, width) it = {1: "b", 2: "h", 4: "l", 8: "q"}[width] iv = struct.unpack(self.byte_order + it, buf)[0] return iv def _read_bytes(self, offset, length): if self._cached_page is None: self._path_or_buf.seek(offset) buf = self._path_or_buf.read(length) if len(buf) < length: self.close() msg = "Unable to read {:d} bytes from file position {:d}." raise ValueError(msg.format(length, offset)) return buf else: if offset + length > len(self._cached_page): self.close() raise ValueError("The cached page is too small.") return self._cached_page[offset:offset + length] def _parse_metadata(self): done = False while not done: self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: self.close() raise ValueError( "Failed to read a meta data page from the SAS file.") done = self._process_page_meta() def _process_page_meta(self): self._read_page_header() pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types if self._current_page_type in pt: self._process_page_metadata() is_data_page = self._current_page_type & const.page_data_type is_mix_page = self._current_page_type in const.page_mix_types return (is_data_page or is_mix_page or self._current_page_data_subheader_pointers != []) def _read_page_header(self): bit_offset = self._page_bit_offset tx = const.page_type_offset + bit_offset self._current_page_type = self._read_int(tx, const.page_type_length) tx = const.block_count_offset + bit_offset self._current_page_block_count = self._read_int( tx, const.block_count_length) tx = const.subheader_count_offset + bit_offset self._current_page_subheaders_count = ( self._read_int(tx, const.subheader_count_length)) def _process_page_metadata(self): bit_offset = self._page_bit_offset for i in range(self._current_page_subheaders_count): pointer = self._process_subheader_pointers( const.subheader_pointers_offset + bit_offset, i) if pointer.length == 0: continue if pointer.compression == const.truncated_subheader_id: continue subheader_signature = self._read_subheader_signature( pointer.offset) subheader_index = ( self._get_subheader_index(subheader_signature, pointer.compression, pointer.ptype)) self._process_subheader(subheader_index, pointer) def _get_subheader_index(self, signature, compression, ptype): index = const.subheader_signature_to_index.get(signature) if index is None: f1 = ((compression == const.compressed_subheader_id) or (compression == 0)) f2 = (ptype == const.compressed_subheader_type) if (self.compression != "") and f1 and f2: index = const.SASIndex.data_subheader_index else: self.close() raise ValueError("Unknown subheader signature") return index def _process_subheader_pointers(self, offset, subheader_pointer_index): subheader_pointer_length = self._subheader_pointer_length total_offset = (offset + subheader_pointer_length * subheader_pointer_index) subheader_offset = self._read_int(total_offset, self._int_length) total_offset += self._int_length subheader_length = self._read_int(total_offset, self._int_length) total_offset += self._int_length subheader_compression = self._read_int(total_offset, 1) total_offset += 1 subheader_type = self._read_int(total_offset, 1) x = _subheader_pointer() x.offset = subheader_offset x.length = subheader_length x.compression = subheader_compression x.ptype = subheader_type return x def _read_subheader_signature(self, offset): subheader_signature = self._read_bytes(offset, self._int_length) return subheader_signature def _process_subheader(self, subheader_index, pointer): offset = pointer.offset length = pointer.length if subheader_index == const.SASIndex.row_size_index: processor = self._process_rowsize_subheader elif subheader_index == const.SASIndex.column_size_index: processor = self._process_columnsize_subheader elif subheader_index == const.SASIndex.column_text_index: processor = self._process_columntext_subheader elif subheader_index == const.SASIndex.column_name_index: processor = self._process_columnname_subheader elif subheader_index == const.SASIndex.column_attributes_index: processor = self._process_columnattributes_subheader elif subheader_index == const.SASIndex.format_and_label_index: processor = self._process_format_subheader elif subheader_index == const.SASIndex.column_list_index: processor = self._process_columnlist_subheader elif subheader_index == const.SASIndex.subheader_counts_index: processor = self._process_subheader_counts elif subheader_index == const.SASIndex.data_subheader_index: self._current_page_data_subheader_pointers.append(pointer) return else: raise ValueError("unknown subheader index") processor(offset, length) def _process_rowsize_subheader(self, offset, length): int_len = self._int_length lcs_offset = offset lcp_offset = offset if self.U64: lcs_offset += 682 lcp_offset += 706 else: lcs_offset += 354 lcp_offset += 378 self.row_length = self._read_int( offset + const.row_length_offset_multiplier * int_len, int_len) self.row_count = self._read_int( offset + const.row_count_offset_multiplier * int_len, int_len) self.col_count_p1 = self._read_int( offset + const.col_count_p1_multiplier * int_len, int_len) self.col_count_p2 = self._read_int( offset + const.col_count_p2_multiplier * int_len, int_len) mx = const.row_count_on_mix_page_offset_multiplier * int_len self._mix_page_row_count = self._read_int(offset + mx, int_len) self._lcs = self._read_int(lcs_offset, 2) self._lcp = self._read_int(lcp_offset, 2) def _process_columnsize_subheader(self, offset, length): int_len = self._int_length offset += int_len self.column_count = self._read_int(offset, int_len) if (self.col_count_p1 + self.col_count_p2 != self.column_count): print("Warning: column count mismatch (%d + %d != %d)\n", self.col_count_p1, self.col_count_p2, self.column_count) # Unknown purpose def _process_subheader_counts(self, offset, length): pass def _process_columntext_subheader(self, offset, length): offset += self._int_length text_block_size = self._read_int(offset, const.text_block_size_length) buf = self._read_bytes(offset, text_block_size) cname_raw = buf[0:text_block_size].rstrip(b"\x00 ") cname = cname_raw if self.convert_header_text: cname = cname.decode(self.encoding or self.default_encoding) self.column_names_strings.append(cname) if len(self.column_names_strings) == 1: compression_literal = "" for cl in const.compression_literals: if cl in cname_raw: compression_literal = cl self.compression = compression_literal offset -= self._int_length offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) compression_literal = buf.rstrip(b"\x00") if compression_literal == "": self._lcs = 0 offset1 = offset + 32 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif compression_literal == const.rle_compression: offset1 = offset + 40 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif self._lcs > 0: self._lcp = 0 offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcs) self.creator_proc = buf[0:self._lcp] if self.convert_header_text: if hasattr(self, "creator_proc"): self.creator_proc = self.creator_proc.decode( self.encoding or self.default_encoding) def _process_columnname_subheader(self, offset, length): int_len = self._int_length offset += int_len column_name_pointers_count = (length - 2 * int_len - 12) // 8 for i in range(column_name_pointers_count): text_subheader = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_text_subheader_offset col_name_offset = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_offset_offset col_name_length = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_length_offset idx = self._read_int( text_subheader, const.column_name_text_subheader_length) col_offset = self._read_int( col_name_offset, const.column_name_offset_length) col_len = self._read_int( col_name_length, const.column_name_length_length) name_str = self.column_names_strings[idx] self.column_names.append(name_str[col_offset:col_offset + col_len]) def _process_columnattributes_subheader(self, offset, length): int_len = self._int_length column_attributes_vectors_count = ( length - 2 * int_len - 12) // (int_len + 8) for i in range(column_attributes_vectors_count): col_data_offset = (offset + int_len + const.column_data_offset_offset + i * (int_len + 8)) col_data_len = (offset + 2 * int_len + const.column_data_length_offset + i * (int_len + 8)) col_types = (offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)) x = self._read_int(col_data_offset, int_len) self._column_data_offsets.append(x) x = self._read_int(col_data_len, const.column_data_length_length) self._column_data_lengths.append(x) x = self._read_int(col_types, const.column_type_length) self._column_types.append(b'd' if x == 1 else b's') def _process_columnlist_subheader(self, offset, length): # unknown purpose pass def _process_format_subheader(self, offset, length): int_len = self._int_length text_subheader_format = ( offset + const.column_format_text_subheader_index_offset + 3 * int_len) col_format_offset = (offset + const.column_format_offset_offset + 3 * int_len) col_format_len = (offset + const.column_format_length_offset + 3 * int_len) text_subheader_label = ( offset + const.column_label_text_subheader_index_offset + 3 * int_len) col_label_offset = (offset + const.column_label_offset_offset + 3 * int_len) col_label_len = offset + const.column_label_length_offset + 3 * int_len x = self._read_int(text_subheader_format, const.column_format_text_subheader_index_length) format_idx = min(x, len(self.column_names_strings) - 1) format_start = self._read_int( col_format_offset, const.column_format_offset_length) format_len = self._read_int( col_format_len, const.column_format_length_length) label_idx = self._read_int( text_subheader_label, const.column_label_text_subheader_index_length) label_idx = min(label_idx, len(self.column_names_strings) - 1) label_start = self._read_int( col_label_offset, const.column_label_offset_length) label_len = self._read_int(col_label_len, const.column_label_length_length) label_names = self.column_names_strings[label_idx] column_label = label_names[label_start: label_start + label_len] format_names = self.column_names_strings[format_idx] column_format = format_names[format_start: format_start + format_len] current_column_number = len(self.columns) col = _column() col.col_id = current_column_number col.name = self.column_names[current_column_number] col.label = column_label col.format = column_format col.ctype = self._column_types[current_column_number] col.length = self._column_data_lengths[current_column_number] self.column_formats.append(column_format) self.columns.append(col) def read(self, nrows=None): if (nrows is None) and (self.chunksize is not None): nrows = self.chunksize elif nrows is None: nrows = self.row_count if len(self._column_types) == 0: self.close() raise EmptyDataError("No columns to parse from file") if self._current_row_in_file_index >= self.row_count: return None m = self.row_count - self._current_row_in_file_index if nrows > m: nrows = m nd = self._column_types.count(b'd') ns = self._column_types.count(b's') self._string_chunk = np.empty((ns, nrows), dtype=np.object) self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8) self._current_row_in_chunk_index = 0 p = Parser(self) p.read(nrows) rslt = self._chunk_to_dataframe() if self.index is not None: rslt = rslt.set_index(self.index) return rslt def _read_next_page(self): self._current_page_data_subheader_pointers = [] self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: return True elif len(self._cached_page) != self._page_length: self.close() msg = ("failed to read complete page from file " "(read {:d} of {:d} bytes)") raise ValueError(msg.format(len(self._cached_page), self._page_length)) self._read_page_header() page_type = self._current_page_type if page_type == const.page_meta_type: self._process_page_metadata() is_data_page = page_type & const.page_data_type pt = [const.page_meta_type] + const.page_mix_types if not is_data_page and self._current_page_type not in pt: return self._read_next_page() return False def _chunk_to_dataframe(self): n = self._current_row_in_chunk_index m = self._current_row_in_file_index ix = range(m - n, m) rslt = pd.DataFrame(index=ix) js, jb = 0, 0 for j in range(self.column_count): name = self.column_names[j] if self._column_types[j] == b'd': rslt[name] = self._byte_chunk[jb, :].view( dtype=self.byte_order + 'd') rslt[name] = np.asarray(rslt[name], dtype=np.float64) if self.convert_dates: unit = None if self.column_formats[j] in const.sas_date_formats: unit = 'd' elif self.column_formats[j] in const.sas_datetime_formats: unit = 's' if unit: rslt[name] = pd.to_datetime(rslt[name], unit=unit, origin="1960-01-01") jb += 1 elif self._column_types[j] == b's': rslt[name] = self._string_chunk[js, :] if self.convert_text and (self.encoding is not None): rslt[name] = rslt[name].str.decode( self.encoding or self.default_encoding) if self.blank_missing: ii = rslt[name].str.len() == 0 rslt.loc[ii, name] = np.nan js += 1 else: self.close() raise ValueError("unknown column type %s" % self._column_types[j]) return rslt
# -*- coding: utf-8 -*- """ test .agg behavior / note that .apply is tested generally in test_groupby.py """ import pytest import numpy as np import pandas as pd from pandas import concat, DataFrame, Index, MultiIndex, Series from pandas.core.groupby.grouper import Grouping from pandas.core.base import SpecificationError from pandas.compat import OrderedDict import pandas.util.testing as tm def test_agg_regression1(tsframe): grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_agg_must_agg(df): grouped = df.groupby('A')['C'] msg = "Must produce aggregated value" with pytest.raises(Exception, match=msg): grouped.agg(lambda x: x.describe()) with pytest.raises(Exception, match=msg): grouped.agg(lambda x: x.index[:2]) def test_agg_ser_multi_key(df): # TODO(wesm): unused ser = df.C # noqa f = lambda x: x.sum() results = df.C.groupby([df.A, df.B]).aggregate(f) expected = df.groupby(['A', 'B']).sum()['C'] tm.assert_series_equal(results, expected) def test_groupby_aggregation_mixed_dtype(): # GH 6212 expected = DataFrame({ 'v1': [5, 5, 7, np.nan, 3, 3, 4, 1], 'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]}, index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99), ('big', 'damp'), ('blue', 'dry'), ('red', 'red'), ('red', 'wet')], names=['by1', 'by2'])) df = DataFrame({ 'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9], 'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99], 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12], 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99, np.nan, np.nan] }) g = df.groupby(['by1', 'by2']) result = g[['v1', 'v2']].mean() tm.assert_frame_equal(result, expected) def test_agg_apply_corner(ts, tsframe): # nothing to group, all NA grouped = ts.groupby(ts * np.nan) assert ts.dtype == np.float64 # groupby float64 values results in Float64Index exp = Series([], dtype=np.float64, index=pd.Index([], dtype=np.float64)) tm.assert_series_equal(grouped.sum(), exp) tm.assert_series_equal(grouped.agg(np.sum), exp) tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False) # DataFrame grouped = tsframe.groupby(tsframe['A'] * np.nan) exp_df = DataFrame(columns=tsframe.columns, dtype=float, index=pd.Index([], dtype=np.float64)) tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False) tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False) def test_agg_grouping_is_list_tuple(ts): df = tm.makeTimeDataFrame() grouped = df.groupby(lambda x: x.year) grouper = grouped.grouper.groupings[0].grouper grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper)) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper)) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_agg_python_multiindex(mframe): grouped = mframe.groupby(['A', 'B']) result = grouped.agg(np.mean) expected = grouped.mean() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('groupbyfunc', [ lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()], ]) def test_aggregate_str_func(tsframe, groupbyfunc): grouped = tsframe.groupby(groupbyfunc) # single series result = grouped['A'].agg('std') expected = grouped['A'].std() tm.assert_series_equal(result, expected) # group frame by function name result = grouped.aggregate('var') expected = grouped.var() tm.assert_frame_equal(result, expected) # group frame by function dict result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'], ['C', 'mean'], ['D', 'sem']])) expected = DataFrame(OrderedDict([['A', grouped['A'].var()], ['B', grouped['B'].std()], ['C', grouped['C'].mean()], ['D', grouped['D'].sem()]])) tm.assert_frame_equal(result, expected) def test_aggregate_item_by_item(df): grouped = df.groupby('A') aggfun = lambda ser: ser.size result = grouped.agg(aggfun) foo = (df.A == 'foo').sum() bar = (df.A == 'bar').sum() K = len(result.columns) # GH5782 # odd comparisons can result here, so cast to make easy exp = pd.Series(np.array([foo] * K), index=list('BCD'), dtype=np.float64, name='foo') tm.assert_series_equal(result.xs('foo'), exp) exp = pd.Series(np.array([bar] * K), index=list('BCD'), dtype=np.float64, name='bar') tm.assert_almost_equal(result.xs('bar'), exp) def aggfun(ser): return ser.size result = DataFrame().groupby(df.A).agg(aggfun) assert isinstance(result, DataFrame) assert len(result) == 0 def test_wrap_agg_out(three_group): grouped = three_group.groupby(['A', 'B']) def func(ser): if ser.dtype == np.object: raise TypeError else: return ser.sum() result = grouped.aggregate(func) exp_grouped = three_group.loc[:, three_group.columns != 'C'] expected = exp_grouped.groupby(['A', 'B']).aggregate(func) tm.assert_frame_equal(result, expected) def test_agg_multiple_functions_maintain_order(df): # GH #610 funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)] result = df.groupby('A')['C'].agg(funcs) exp_cols = Index(['mean', 'max', 'min']) tm.assert_index_equal(result.columns, exp_cols) def test_multiple_functions_tuples_and_non_tuples(df): # #1359 funcs = [('foo', 'mean'), 'std'] ex_funcs = [('foo', 'mean'), ('std', 'std')] result = df.groupby('A')['C'].agg(funcs) expected = df.groupby('A')['C'].agg(ex_funcs) tm.assert_frame_equal(result, expected) result = df.groupby('A').agg(funcs) expected = df.groupby('A').agg(ex_funcs) tm.assert_frame_equal(result, expected) def test_agg_multiple_functions_too_many_lambdas(df): grouped = df.groupby('A') funcs = ['mean', lambda x: x.mean(), lambda x: x.std()] msg = 'Function names must be unique, found multiple named <lambda>' with pytest.raises(SpecificationError, match=msg): grouped.agg(funcs) def test_more_flexible_frame_multi_function(df): grouped = df.groupby('A') exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]])) exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]])) expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1) expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]]) result = grouped.aggregate(d) tm.assert_frame_equal(result, expected) # be careful result = grouped.aggregate(OrderedDict([['C', np.mean], ['D', [np.mean, np.std]]])) expected = grouped.aggregate(OrderedDict([['C', np.mean], ['D', [np.mean, np.std]]])) tm.assert_frame_equal(result, expected) def foo(x): return np.mean(x) def bar(x): return np.std(x, ddof=1) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): d = OrderedDict([['C', np.mean], ['D', OrderedDict([['foo', np.mean], ['bar', np.std]])]]) result = grouped.aggregate(d) d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) expected = grouped.aggregate(d) tm.assert_frame_equal(result, expected) def test_multi_function_flexible_mix(df): # GH #1268 grouped = df.groupby('A') # Expected d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], ['D', {'sum': 'sum'}]]) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): expected = grouped.aggregate(d) # Test 1 d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], ['D', 'sum']]) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = grouped.aggregate(d) tm.assert_frame_equal(result, expected) # Test 2 d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], ['D', ['sum']]]) # this uses column selection & renaming with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = grouped.aggregate(d) tm.assert_frame_equal(result, expected)
dsm054/pandas
pandas/tests/groupby/aggregate/test_aggregate.py
pandas/io/sas/sas7bdat.py
from .dual_structured_quad import ( DualRectilinearGraph, DualStructuredQuadGraph, DualUniformRectilinearGraph, ) from .structured_quad import ( RectilinearGraph, StructuredQuadGraph, UniformRectilinearGraph, ) __all__ = [ "StructuredQuadGraph", "RectilinearGraph", "UniformRectilinearGraph", "DualUniformRectilinearGraph", "DualRectilinearGraph", "DualStructuredQuadGraph", ]
import numpy as np import pytest from landlab import HexModelGrid def test_perimeter_nodes(): """Test perimeter nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.perimeter_nodes == [3, 7, 11, 10, 9, 8, 4, 0, 1, 2]) def test_right_edge_nodes(): """Test right edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_right_edge == [3, 7, 11]) def test_top_edge_nodes(): """Test top edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_top_edge == [8, 9, 10, 11]) def test_left_edge_nodes(): """Test left edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_left_edge == [0, 4, 8]) def test_bottom_edge_nodes(): """Test bottom edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_bottom_edge == [0, 1, 2, 3]) def test_edges_are_readonly(edge_name): grid = HexModelGrid((3, 4), node_layout="rect") assert not grid.perimeter_nodes.flags["WRITEABLE"] with pytest.raises(ValueError): getattr(grid, "nodes_at_" + edge_name)[0] = 999 def test_edges_are_cached(edge_name): grid = HexModelGrid((3, 4), node_layout="rect") x = grid.perimeter_nodes assert grid.perimeter_nodes is x x = getattr(grid, "nodes_at_" + edge_name) assert getattr(grid, "nodes_at_" + edge_name) is x
cmshobe/landlab
tests/grid/test_hex_grid/test_edges.py
landlab/graph/structured_quad/__init__.py
#!/usr/bin/env python """ .. codeauthor:: D Litwin, G Tucker .. sectionauthor:: D Litwin, G Tucker """ from .dupuit_percolator import GroundwaterDupuitPercolator __all__ = ["GroundwaterDupuitPercolator"]
import numpy as np import pytest from landlab import HexModelGrid def test_perimeter_nodes(): """Test perimeter nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.perimeter_nodes == [3, 7, 11, 10, 9, 8, 4, 0, 1, 2]) def test_right_edge_nodes(): """Test right edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_right_edge == [3, 7, 11]) def test_top_edge_nodes(): """Test top edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_top_edge == [8, 9, 10, 11]) def test_left_edge_nodes(): """Test left edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_left_edge == [0, 4, 8]) def test_bottom_edge_nodes(): """Test bottom edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_bottom_edge == [0, 1, 2, 3]) def test_edges_are_readonly(edge_name): grid = HexModelGrid((3, 4), node_layout="rect") assert not grid.perimeter_nodes.flags["WRITEABLE"] with pytest.raises(ValueError): getattr(grid, "nodes_at_" + edge_name)[0] = 999 def test_edges_are_cached(edge_name): grid = HexModelGrid((3, 4), node_layout="rect") x = grid.perimeter_nodes assert grid.perimeter_nodes is x x = getattr(grid, "nodes_at_" + edge_name) assert getattr(grid, "nodes_at_" + edge_name) is x
cmshobe/landlab
tests/grid/test_hex_grid/test_edges.py
landlab/components/groundwater/__init__.py
from .drainage_density import DrainageDensity __all__ = ["DrainageDensity"]
import numpy as np import pytest from landlab import HexModelGrid def test_perimeter_nodes(): """Test perimeter nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.perimeter_nodes == [3, 7, 11, 10, 9, 8, 4, 0, 1, 2]) def test_right_edge_nodes(): """Test right edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_right_edge == [3, 7, 11]) def test_top_edge_nodes(): """Test top edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_top_edge == [8, 9, 10, 11]) def test_left_edge_nodes(): """Test left edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_left_edge == [0, 4, 8]) def test_bottom_edge_nodes(): """Test bottom edge nodes of a hex grid.""" grid = HexModelGrid((3, 4), node_layout="rect") assert np.all(grid.nodes_at_bottom_edge == [0, 1, 2, 3]) def test_edges_are_readonly(edge_name): grid = HexModelGrid((3, 4), node_layout="rect") assert not grid.perimeter_nodes.flags["WRITEABLE"] with pytest.raises(ValueError): getattr(grid, "nodes_at_" + edge_name)[0] = 999 def test_edges_are_cached(edge_name): grid = HexModelGrid((3, 4), node_layout="rect") x = grid.perimeter_nodes assert grid.perimeter_nodes is x x = getattr(grid, "nodes_at_" + edge_name) assert getattr(grid, "nodes_at_" + edge_name) is x
cmshobe/landlab
tests/grid/test_hex_grid/test_edges.py
landlab/components/drainage_density/__init__.py
# -*- coding: utf-8 -*- # added new list_tbl definition from functools import partial from navmazing import NavigateToAttribute, NavigateToSibling from cfme.common import SummaryMixin, Taggable from cfme.fixtures import pytest_selenium as sel from cfme.web_ui import CheckboxTable, toolbar as tb, paginator, InfoBlock, match_location from utils.appliance import Navigatable from utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to from . import pol_btn, mon_btn list_tbl = CheckboxTable(table_locator="//div[@id='list_grid']//table") match_page = partial(match_location, controller='container_node', title='Nodes') class Node(Taggable, SummaryMixin, Navigatable): def __init__(self, name, provider, appliance=None): self.name = name self.provider = provider Navigatable.__init__(self, appliance=appliance) def load_details(self, refresh=False): navigate_to(self, 'Details') if refresh: tb.refresh() def get_detail(self, *ident): """ Gets details from the details infoblock Args: *ident: Table name and Key name, e.g. "Relationships", "Images" Returns: A string representing the contents of the summary's value. """ self.load_details(refresh=False) return InfoBlock.text(*ident) @navigator.register(Node, 'All') class All(CFMENavigateStep): prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn') def am_i_here(self): return match_page(summary='Nodes') def step(self): from cfme.web_ui.menu import nav nav._nav_to_fn('Compute', 'Containers', 'Container Nodes')(None) def resetter(self): # Reset view and selection tb.select("List View") sel.check(paginator.check_all()) sel.uncheck(paginator.check_all()) @navigator.register(Node, 'Details') class Details(CFMENavigateStep): prerequisite = NavigateToSibling('All') def am_i_here(self): return match_page(summary='{} (Summary)'.format(self.obj.name)) def step(self): # Assuming default list view from prerequisite list_tbl.click_row_by_cells({'Name': self.obj.name, 'Provider': self.obj.provider.name}) @navigator.register(Node, 'EditTags') class EditTags(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): match_page(summary='Tag Assignment') def step(self): pol_btn('Edit Tags') @navigator.register(Node, 'ManagePolicies') class ManagePolicies(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): match_page(summary='Select Policy Profiles') def step(self): pol_btn('Manage Policies') @navigator.register(Node, 'Utilization') class Utilization(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): match_page(summary='{} Capacity & Utilization'.format(self.obj.name)) def step(self): mon_btn('Utilization')
# -*- coding: utf-8 -*- import datetime import fauxfactory import pytest from cfme.rest import dialog as _dialog from cfme.rest import services as _services from cfme.rest import service_catalogs as _service_catalogs from cfme.rest import service_templates as _service_templates from cfme import test_requirements from utils.providers import setup_a_provider as _setup_a_provider from utils.wait import wait_for from utils import error, version pytestmark = [test_requirements.service, pytest.mark.tier(2)] @pytest.fixture(scope="module") def a_provider(): return _setup_a_provider("infra") @pytest.fixture(scope="function") def dialog(): return _dialog() @pytest.fixture(scope="function") def service_catalogs(request, rest_api): return _service_catalogs(request, rest_api) @pytest.fixture(scope="function") def services(request, rest_api, a_provider, dialog, service_catalogs): return _services(request, rest_api, a_provider, dialog, service_catalogs) @pytest.fixture(scope='function') def service_templates(request, rest_api, dialog): return _service_templates(request, rest_api, dialog) class TestServiceRESTAPI(object): def test_edit_service(self, rest_api, services): """Tests editing a service. Prerequisities: * An appliance with ``/api`` available. Steps: * POST /api/services (method ``edit``) with the ``name`` * Check if the service with ``new_name`` exists Metadata: test_flag: rest """ ser = services[0] new_name = fauxfactory.gen_alphanumeric() ser.action.edit(name=new_name) wait_for( lambda: rest_api.collections.services.find_by(name=new_name), num_sec=180, delay=10, ) def test_edit_multiple_services(self, rest_api, services): """Tests editing multiple service catalogs at time. Prerequisities: * An appliance with ``/api`` available. Steps: * POST /api/services (method ``edit``) with the list of dictionaries used to edit * Check if the services with ``new_name`` each exists Metadata: test_flag: rest """ new_names = [] services_data_edited = [] for ser in services: new_name = fauxfactory.gen_alphanumeric() new_names.append(new_name) services_data_edited.append({ "href": ser.href, "name": new_name, }) rest_api.collections.services.action.edit(*services_data_edited) for new_name in new_names: wait_for( lambda: rest_api.collections.service_templates.find_by(name=new_name), num_sec=180, delay=10, ) def test_delete_service(self, rest_api, services): service = rest_api.collections.services[0] service.action.delete() with error.expected("ActiveRecord::RecordNotFound"): service.action.delete() def test_delete_services(self, rest_api, services): rest_api.collections.services.action.delete(*services) with error.expected("ActiveRecord::RecordNotFound"): rest_api.collections.services.action.delete(*services) def test_retire_service_now(self, rest_api, services): """Test retiring a service Prerequisities: * An appliance with ``/api`` available. Steps: * Retrieve list of entities using GET /api/services , pick the first one * POST /api/service/<id> (method ``retire``) Metadata: test_flag: rest """ assert "retire" in rest_api.collections.services.action.all retire_service = services[0] retire_service.action.retire() wait_for( lambda: not rest_api.collections.services.find_by(name=retire_service.name), num_sec=600, delay=10, ) def test_retire_service_future(self, rest_api, services): """Test retiring a service Prerequisities: * An appliance with ``/api`` available. Steps: * Retrieve list of entities using GET /api/services , pick the first one * POST /api/service/<id> (method ``retire``) with the ``retire_date`` Metadata: test_flag: rest """ assert "retire" in rest_api.collections.services.action.all retire_service = services[0] date = (datetime.datetime.now() + datetime.timedelta(days=5)).strftime('%m/%d/%y') future = { "date": date, "warn": "4", } date_before = retire_service.updated_at retire_service.action.retire(future) def _finished(): retire_service.reload() if retire_service.updated_at > date_before: return True return False wait_for(_finished, num_sec=600, delay=5, message="REST automation_request finishes") @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') def test_set_service_owner(self, rest_api, services): if "set_ownership" not in rest_api.collections.services.action.all: pytest.skip("Set owner action for service is not implemented in this version") service = services[0] user = rest_api.collections.users.get(userid='admin') data = { "owner": {"href": user.href} } service.action.set_ownership(data) service.reload() assert hasattr(service, "evm_owner") assert service.evm_owner.userid == user.userid @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') def test_set_services_owner(self, rest_api, services): if "set_ownership" not in rest_api.collections.services.action.all: pytest.skip("Set owner action for service is not implemented in this version") data = [] user = rest_api.collections.users.get(userid='admin') for service in services: tmp_data = { "href": service.href, "owner": {"href": user.href} } data.append(tmp_data) rest_api.collections.services.action.set_ownership(*data) for service in services: service.reload() assert hasattr(service, "evm_owner") assert service.evm_owner.userid == user.userid class TestServiceDialogsRESTAPI(object): @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.parametrize("method", ["post", "delete"]) def test_delete_service_dialog(self, rest_api, dialog, method): service_dialog = rest_api.collections.service_dialogs.find_by(label=dialog.label)[0] service_dialog.action.delete(force_method=method) with error.expected("ActiveRecord::RecordNotFound"): service_dialog.action.delete() @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') def test_delete_service_dialogs(self, rest_api, dialog): service_dialog = rest_api.collections.service_dialogs.find_by(label=dialog.label)[0] rest_api.collections.service_dialogs.action.delete(service_dialog) with error.expected("ActiveRecord::RecordNotFound"): rest_api.collections.service_dialogs.action.delete(service_dialog) class TestServiceTemplateRESTAPI(object): def test_edit_service_template(self, rest_api, service_templates): """Tests editing a service template. Prerequisities: * An appliance with ``/api`` available. Steps: * POST /api/service_templates (method ``edit``) with the ``name`` * Check if the service_template with ``new_name`` exists Metadata: test_flag: rest """ scl = rest_api.collections.service_templates[0] new_name = fauxfactory.gen_alphanumeric() scl.action.edit(name=new_name) wait_for( lambda: rest_api.collections.service_catalogs.find_by(name=new_name), num_sec=180, delay=10, ) def test_delete_service_templates(self, rest_api, service_templates): rest_api.collections.service_templates.action.delete(*service_templates) with error.expected("ActiveRecord::RecordNotFound"): rest_api.collections.service_templates.action.delete(*service_templates) def test_delete_service_template(self, rest_api, service_templates): s_tpl = rest_api.collections.service_templates[0] s_tpl.action.delete() with error.expected("ActiveRecord::RecordNotFound"): s_tpl.action.delete() @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') def test_assign_unassign_service_template_to_service_catalog(self, rest_api, service_catalogs, service_templates): """Tests assigning and unassigning the service templates to service catalog. Prerequisities: * An appliance with ``/api`` available. Steps: * POST /api/service_catalogs/<id>/service_templates (method ``assign``) with the list of dictionaries service templates list * Check if the service_templates were assigned to the service catalog * POST /api/service_catalogs/<id>/service_templates (method ``unassign``) with the list of dictionaries service templates list * Check if the service_templates were unassigned to the service catalog Metadata: test_flag: rest """ scl = service_catalogs[0] stpl = service_templates[0] scl.service_templates.action.assign(stpl) scl.reload() assert stpl.id in [st.id for st in scl.service_templates.all] scl.service_templates.action.unassign(stpl) scl.reload() assert stpl.id not in [st.id for st in scl.service_templates.all] def test_edit_multiple_service_templates(self, rest_api, service_templates): """Tests editing multiple service catalogs at time. Prerequisities: * An appliance with ``/api`` available. Steps: * POST /api/service_templates (method ``edit``) with the list of dictionaries used to edit * Check if the service_templates with ``new_name`` each exists Metadata: test_flag: rest """ new_names = [] service_tpls_data_edited = [] for tpl in service_templates: new_name = fauxfactory.gen_alphanumeric() new_names.append(new_name) service_tpls_data_edited.append({ "href": tpl.href, "name": new_name, }) rest_api.collections.service_templates.action.edit(*service_tpls_data_edited) for new_name in new_names: wait_for( lambda: rest_api.collections.service_templates.find_by(name=new_name), num_sec=180, delay=10, )
kzvyahin/cfme_tests
cfme/tests/services/test_rest_services.py
cfme/containers/node.py
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Tools related to error printing/displaying.""" from PyQt5.QtWidgets import QMessageBox from qutebrowser.utils import log, utils def _get_name(exc: BaseException) -> str: """Get a suitable exception name as a string.""" prefixes = ['qutebrowser', 'builtins'] name = utils.qualname(exc.__class__) for prefix in prefixes: if name.startswith(prefix): name = name[len(prefix) + 1:] break return name def handle_fatal_exc(exc: BaseException, title: str, *, no_err_windows: bool, pre_text: str = '', post_text: str = '') -> None: """Handle a fatal "expected" exception by displaying an error box. If --no-err-windows is given as argument, the text is logged to the error logger instead. Args: exc: The Exception object being handled. no_err_windows: Show text in log instead of error window. title: The title to be used for the error message. pre_text: The text to be displayed before the exception text. post_text: The text to be displayed after the exception text. """ if no_err_windows: lines = [ "Handling fatal {} with --no-err-windows!".format(_get_name(exc)), "", "title: {}".format(title), "pre_text: {}".format(pre_text), "post_text: {}".format(post_text), "exception text: {}".format(str(exc) or 'none'), ] log.misc.exception('\n'.join(lines)) else: if pre_text: msg_text = '{}: {}'.format(pre_text, exc) else: msg_text = str(exc) if post_text: msg_text += '\n\n{}'.format(post_text) msgbox = QMessageBox(QMessageBox.Critical, title, msg_text) msgbox.exec_()
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. import pytest_bdd as bdd from qutebrowser.utils import qtutils bdd.scenarios('qutescheme.feature') @bdd.then(bdd.parsers.parse("the {kind} request should be blocked")) def request_blocked(request, quteproc, kind): blocking_set_msg = ( "Blocking malicious request from qute://settings/set?* to " "qute://settings/set?*") blocking_csrf_msg = ( "Blocking malicious request from " "http://localhost:*/data/misc/qutescheme_csrf.html to " "qute://settings/set?*") blocking_js_msg = ( "[http://localhost:*/data/misc/qutescheme_csrf.html:0] Not allowed to " "load local resource: qute://settings/set?*" ) unsafe_redirect_msg = "Load error: ERR_UNSAFE_REDIRECT" blocked_request_msg = "Load error: ERR_BLOCKED_BY_CLIENT" webkit_error_invalid = ( "Error while loading qute://settings/set?*: Invalid qute://settings " "request") webkit_error_unsupported = ( "Error while loading qute://settings/set?*: Unsupported request type") if request.config.webengine and qtutils.version_check('5.12'): # On Qt 5.12, we mark qute:// as a local scheme, causing most requests # being blocked by Chromium internally (logging to the JS console). expected_messages = { 'img': [blocking_js_msg], 'link': [blocking_js_msg], 'redirect': [blocking_set_msg, blocked_request_msg], 'form': [blocking_js_msg], } if qtutils.version_check('5.15', compiled=False): # On Qt 5.15, Chromium blocks the redirect as ERR_UNSAFE_REDIRECT # instead. expected_messages['redirect'] = [unsafe_redirect_msg] elif request.config.webengine: expected_messages = { 'img': [blocking_csrf_msg], 'link': [blocking_set_msg, blocked_request_msg], 'redirect': [blocking_set_msg, blocked_request_msg], 'form': [blocking_set_msg, blocked_request_msg], } else: # QtWebKit expected_messages = { 'img': [blocking_csrf_msg], 'link': [blocking_csrf_msg, webkit_error_invalid], 'redirect': [blocking_csrf_msg, webkit_error_invalid], 'form': [webkit_error_unsupported], } for pattern in expected_messages[kind]: msg = quteproc.wait_for(message=pattern) msg.expected = True
t-wissmann/qutebrowser
tests/end2end/features/test_qutescheme_bdd.py
qutebrowser/utils/error.py
# Copyright 2019, Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from google.api_core import gapic_v1 from google.cloud.pubsub_v1.publisher._sequencer import base from google.pubsub_v1 import types as gapic_types class UnorderedSequencer(base.Sequencer): """ Sequences messages into batches for one topic without any ordering. Public methods are NOT thread-safe. """ def __init__(self, client, topic): self._client = client self._topic = topic self._current_batch = None self._stopped = False def is_finished(self): """ Whether the sequencer is finished and should be cleaned up. Returns: bool: Whether the sequencer is finished and should be cleaned up. """ # TODO: Implement. Not implementing yet because of possible performance # impact due to extra locking required. This does mean that # UnorderedSequencers don't get cleaned up, but this is the same as # previously existing behavior. return False def stop(self): """ Stop the sequencer. Subsequent publishes will fail. Raises: RuntimeError: If called after stop() has already been called. """ if self._stopped: raise RuntimeError("Unordered sequencer already stopped.") self.commit() self._stopped = True def commit(self): """ Commit the batch. Raises: RuntimeError: If called after stop() has already been called. """ if self._stopped: raise RuntimeError("Unordered sequencer already stopped.") if self._current_batch: self._current_batch.commit() # At this point, we lose track of the old batch, but we don't # care since we just committed it. # Setting this to None guarantees the next publish() creates a new # batch. self._current_batch = None def unpause(self): """ Not relevant for this class. """ raise NotImplementedError def _create_batch( self, commit_retry=gapic_v1.method.DEFAULT, commit_timeout: gapic_types.TimeoutType = gapic_v1.method.DEFAULT, ): """ Create a new batch using the client's batch class and other stored settings. Args: commit_retry (Optional[google.api_core.retry.Retry]): The retry settings to apply when publishing the batch. commit_timeout (:class:`~.pubsub_v1.types.TimeoutType`): The timeout to apply when publishing the batch. """ return self._client._batch_class( client=self._client, topic=self._topic, settings=self._client.batch_settings, batch_done_callback=None, commit_when_full=True, commit_retry=commit_retry, commit_timeout=commit_timeout, ) def publish( self, message, retry=gapic_v1.method.DEFAULT, timeout: gapic_types.TimeoutType = gapic_v1.method.DEFAULT, ): """ Batch message into existing or new batch. Args: message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. retry (Optional[google.api_core.retry.Retry]): The retry settings to apply when publishing the message. timeout (:class:`~.pubsub_v1.types.TimeoutType`): The timeout to apply when publishing the message. Returns: ~google.api_core.future.Future: An object conforming to the :class:`~concurrent.futures.Future` interface. The future tracks the publishing status of the message. Raises: RuntimeError: If called after stop() has already been called. pubsub_v1.publisher.exceptions.MessageTooLargeError: If publishing the ``message`` would exceed the max size limit on the backend. """ if self._stopped: raise RuntimeError("Unordered sequencer already stopped.") if not self._current_batch: newbatch = self._create_batch(commit_retry=retry, commit_timeout=timeout) self._current_batch = newbatch batch = self._current_batch future = None while future is None: # Might throw MessageTooLargeError future = batch.publish(message) # batch is full, triggering commit_when_full if future is None: batch = self._create_batch(commit_retry=retry, commit_timeout=timeout) # At this point, we lose track of the old batch, but we don't # care since it's already committed (because it was full.) self._current_batch = batch return future # Used only for testing. def _set_batch(self, batch): self._current_batch = batch
# Copyright 2017, Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import threading import time import mock import pytest import google.api_core.exceptions from google.api_core import gapic_v1 from google.auth import credentials from google.cloud.pubsub_v1 import publisher from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher import exceptions from google.cloud.pubsub_v1.publisher._batch.base import BatchStatus from google.cloud.pubsub_v1.publisher._batch.base import BatchCancellationReason from google.cloud.pubsub_v1.publisher._batch import thread from google.cloud.pubsub_v1.publisher._batch.thread import Batch from google.pubsub_v1 import types as gapic_types def create_client(): creds = mock.Mock(spec=credentials.Credentials) return publisher.Client(credentials=creds) def create_batch( topic="topic_name", batch_done_callback=None, commit_when_full=True, commit_retry=gapic_v1.method.DEFAULT, commit_timeout: gapic_types.TimeoutType = gapic_v1.method.DEFAULT, **batch_settings ): """Return a batch object suitable for testing. Args: topic (str): Topic name. batch_done_callback (Callable[bool]): A callable that is called when the batch is done, either with a success or a failure flag. commit_when_full (bool): Whether to commit the batch when the batch has reached byte-size or number-of-messages limits. commit_retry (Optional[google.api_core.retry.Retry]): The retry settings for the batch commit call. commit_timeout (:class:`~.pubsub_v1.types.TimeoutType`): The timeout to apply to the batch commit call. batch_settings (Mapping[str, str]): Arguments passed on to the :class:``~.pubsub_v1.types.BatchSettings`` constructor. Returns: ~.pubsub_v1.publisher.batch.thread.Batch: A batch object. """ client = create_client() settings = types.BatchSettings(**batch_settings) return Batch( client, topic, settings, batch_done_callback=batch_done_callback, commit_when_full=commit_when_full, commit_retry=commit_retry, commit_timeout=commit_timeout, ) @mock.patch.object(threading, "Lock") def test_make_lock(Lock): lock = Batch.make_lock() assert lock is Lock.return_value Lock.assert_called_once_with() def test_client(): client = create_client() settings = types.BatchSettings() batch = Batch(client, "topic_name", settings) assert batch.client is client def test_commit(): batch = create_batch() with mock.patch.object( Batch, "_start_commit_thread", autospec=True ) as _start_commit_thread: batch.commit() _start_commit_thread.assert_called_once() # The batch's status needs to be something other than "accepting messages", # since the commit started. assert batch.status != BatchStatus.ACCEPTING_MESSAGES assert batch.status == BatchStatus.STARTING def test_commit_no_op(): batch = create_batch() batch._status = BatchStatus.IN_PROGRESS with mock.patch.object(threading, "Thread", autospec=True) as Thread: batch.commit() # Make sure a thread was not created. Thread.assert_not_called() # Check that batch status is unchanged. assert batch.status == BatchStatus.IN_PROGRESS def test_blocking__commit(): batch = create_batch() futures = ( batch.publish({"data": b"This is my message."}), batch.publish({"data": b"This is another message."}), ) # Set up the underlying API publish method to return a PublishResponse. publish_response = gapic_types.PublishResponse(message_ids=["a", "b"]) patch = mock.patch.object( type(batch.client.api), "publish", return_value=publish_response ) with patch as publish: batch._commit() # Establish that the underlying API call was made with expected # arguments. publish.assert_called_once_with( topic="topic_name", messages=[ gapic_types.PubsubMessage(data=b"This is my message."), gapic_types.PubsubMessage(data=b"This is another message."), ], retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, ) # Establish that all of the futures are done, and that they have the # expected values. assert futures[0].done() assert futures[0].result() == "a" assert futures[1].done() assert futures[1].result() == "b" def test_blocking__commit_custom_retry(): batch = create_batch(commit_retry=mock.sentinel.custom_retry) batch.publish({"data": b"This is my message."}) # Set up the underlying API publish method to return a PublishResponse. publish_response = gapic_types.PublishResponse(message_ids=["a"]) patch = mock.patch.object( type(batch.client.api), "publish", return_value=publish_response ) with patch as publish: batch._commit() # Establish that the underlying API call was made with expected # arguments. publish.assert_called_once_with( topic="topic_name", messages=[gapic_types.PubsubMessage(data=b"This is my message.")], retry=mock.sentinel.custom_retry, timeout=gapic_v1.method.DEFAULT, ) def test_blocking__commit_custom_timeout(): batch = create_batch(commit_timeout=mock.sentinel.custom_timeout) batch.publish({"data": b"This is my message."}) # Set up the underlying API publish method to return a PublishResponse. publish_response = gapic_types.PublishResponse(message_ids=["a"]) patch = mock.patch.object( type(batch.client.api), "publish", return_value=publish_response ) with patch as publish: batch._commit() # Establish that the underlying API call was made with expected # arguments. publish.assert_called_once_with( topic="topic_name", messages=[gapic_types.PubsubMessage(data=b"This is my message.")], retry=gapic_v1.method.DEFAULT, timeout=mock.sentinel.custom_timeout, ) def test_client_api_publish_not_blocking_additional_publish_calls(): batch = create_batch(max_messages=1) api_publish_called = threading.Event() def api_publish_delay(topic="", messages=(), retry=None, timeout=None): api_publish_called.set() time.sleep(1.0) message_ids = [str(i) for i in range(len(messages))] return gapic_types.PublishResponse(message_ids=message_ids) api_publish_patch = mock.patch.object( type(batch.client.api), "publish", side_effect=api_publish_delay ) with api_publish_patch: batch.publish({"data": b"first message"}) start = datetime.datetime.now() event_set = api_publish_called.wait(timeout=1.0) if not event_set: # pragma: NO COVER pytest.fail("API publish was not called in time") batch.publish({"data": b"second message"}) end = datetime.datetime.now() # While a batch commit in progress, waiting for the API publish call to # complete should not unnecessariliy delay other calls to batch.publish(). assert (end - start).total_seconds() < 1.0 @mock.patch.object(thread, "_LOGGER") def test_blocking__commit_starting(_LOGGER): batch = create_batch() batch._status = BatchStatus.STARTING batch._commit() assert batch._status == BatchStatus.SUCCESS _LOGGER.debug.assert_called_once_with("No messages to publish, exiting commit") @mock.patch.object(thread, "_LOGGER") def test_blocking__commit_already_started(_LOGGER): batch = create_batch() batch._status = BatchStatus.IN_PROGRESS batch._commit() assert batch._status == BatchStatus.IN_PROGRESS _LOGGER.debug.assert_called_once_with( "Batch is already in progress or has been cancelled, exiting commit" ) def test_blocking__commit_no_messages(): batch = create_batch() with mock.patch.object(type(batch.client.api), "publish") as publish: batch._commit() assert publish.call_count == 0 def test_blocking__commit_wrong_messageid_length(): batch = create_batch() futures = ( batch.publish({"data": b"blah blah blah"}), batch.publish({"data": b"blah blah blah blah"}), ) # Set up a PublishResponse that only returns one message ID. publish_response = gapic_types.PublishResponse(message_ids=["a"]) patch = mock.patch.object( type(batch.client.api), "publish", return_value=publish_response ) with patch: batch._commit() for future in futures: assert future.done() assert isinstance(future.exception(), exceptions.PublishError) def test_block__commmit_api_error(): batch = create_batch() futures = ( batch.publish({"data": b"blah blah blah"}), batch.publish({"data": b"blah blah blah blah"}), ) # Make the API throw an error when publishing. error = google.api_core.exceptions.InternalServerError("uh oh") patch = mock.patch.object(type(batch.client.api), "publish", side_effect=error) with patch: batch._commit() for future in futures: assert future.done() assert future.exception() == error def test_block__commmit_retry_error(): batch = create_batch() futures = ( batch.publish({"data": b"blah blah blah"}), batch.publish({"data": b"blah blah blah blah"}), ) # Make the API throw an error when publishing. error = google.api_core.exceptions.RetryError("uh oh", None) patch = mock.patch.object(type(batch.client.api), "publish", side_effect=error) with patch: batch._commit() for future in futures: assert future.done() assert future.exception() == error def test_publish_updating_batch_size(): batch = create_batch(topic="topic_foo") messages = ( gapic_types.PubsubMessage(data=b"foobarbaz"), gapic_types.PubsubMessage(data=b"spameggs"), gapic_types.PubsubMessage(data=b"1335020400"), ) # Publish each of the messages, which should save them to the batch. futures = [batch.publish(message) for message in messages] # There should be three messages on the batch, and three futures. assert len(batch.messages) == 3 assert batch._futures == futures # The size should have been incremented by the sum of the size # contributions of each message to the PublishRequest. base_request_size = gapic_types.PublishRequest(topic="topic_foo")._pb.ByteSize() expected_request_size = base_request_size + sum( gapic_types.PublishRequest(messages=[msg])._pb.ByteSize() for msg in messages ) assert batch.size == expected_request_size assert batch.size > 0 # I do not always trust protobuf. def test_publish(): batch = create_batch() message = gapic_types.PubsubMessage() future = batch.publish(message) assert len(batch.messages) == 1 assert batch._futures == [future] def test_publish_max_messages_zero(): batch = create_batch(topic="topic_foo", max_messages=0) message = gapic_types.PubsubMessage(data=b"foobarbaz") with mock.patch.object(batch, "commit") as commit: future = batch.publish(message) assert future is not None assert len(batch.messages) == 1 assert batch._futures == [future] commit.assert_called_once() def test_publish_max_messages_enforced(): batch = create_batch(topic="topic_foo", max_messages=1) message = gapic_types.PubsubMessage(data=b"foobarbaz") message2 = gapic_types.PubsubMessage(data=b"foobarbaz2") future = batch.publish(message) future2 = batch.publish(message2) assert future is not None assert future2 is None assert len(batch.messages) == 1 assert len(batch._futures) == 1 def test_publish_max_bytes_enforced(): batch = create_batch(topic="topic_foo", max_bytes=15) message = gapic_types.PubsubMessage(data=b"foobarbaz") message2 = gapic_types.PubsubMessage(data=b"foobarbaz2") future = batch.publish(message) future2 = batch.publish(message2) assert future is not None assert future2 is None assert len(batch.messages) == 1 assert len(batch._futures) == 1 def test_publish_exceed_max_messages(): max_messages = 4 batch = create_batch(max_messages=max_messages) messages = ( gapic_types.PubsubMessage(data=b"foobarbaz"), gapic_types.PubsubMessage(data=b"spameggs"), gapic_types.PubsubMessage(data=b"1335020400"), ) # Publish each of the messages, which should save them to the batch. with mock.patch.object(batch, "commit") as commit: futures = [batch.publish(message) for message in messages] assert batch._futures == futures assert len(futures) == max_messages - 1 # Commit should not yet have been called. assert commit.call_count == 0 # When a fourth message is published, commit should be called. # No future will be returned in this case. future = batch.publish(gapic_types.PubsubMessage(data=b"last one")) commit.assert_called_once_with() assert future is None assert batch._futures == futures @mock.patch.object(thread, "_SERVER_PUBLISH_MAX_BYTES", 1000) def test_publish_single_message_size_exceeds_server_size_limit(): batch = create_batch( topic="topic_foo", max_messages=1000, max_bytes=1000 * 1000, # way larger than (mocked) server side limit ) big_message = gapic_types.PubsubMessage(data=b"x" * 984) request_size = gapic_types.PublishRequest( topic="topic_foo", messages=[big_message] )._pb.ByteSize() assert request_size == 1001 # sanity check, just above the (mocked) server limit with pytest.raises(exceptions.MessageTooLargeError): batch.publish(big_message) @mock.patch.object(thread, "_SERVER_PUBLISH_MAX_BYTES", 1000) def test_publish_total_messages_size_exceeds_server_size_limit(): batch = create_batch(topic="topic_foo", max_messages=10, max_bytes=1500) messages = ( gapic_types.PubsubMessage(data=b"x" * 500), gapic_types.PubsubMessage(data=b"x" * 600), ) # Sanity check - request size is still below BatchSettings.max_bytes, # but it exceeds the server-side size limit. request_size = gapic_types.PublishRequest( topic="topic_foo", messages=messages )._pb.ByteSize() assert 1000 < request_size < 1500 with mock.patch.object(batch, "commit") as fake_commit: batch.publish(messages[0]) batch.publish(messages[1]) # The server side limit should kick in and cause a commit. fake_commit.assert_called_once() def test_publish_dict(): batch = create_batch() future = batch.publish({"data": b"foobarbaz", "attributes": {"spam": "eggs"}}) # There should be one message on the batch. expected_message = gapic_types.PubsubMessage( data=b"foobarbaz", attributes={"spam": "eggs"} ) assert batch.messages == [expected_message] assert batch._futures == [future] def test_cancel(): batch = create_batch() futures = ( batch.publish({"data": b"This is my message."}), batch.publish({"data": b"This is another message."}), ) batch.cancel(BatchCancellationReason.PRIOR_ORDERED_MESSAGE_FAILED) # Assert all futures are cancelled with an error. for future in futures: exc = future.exception() assert type(exc) is RuntimeError assert exc.args[0] == BatchCancellationReason.PRIOR_ORDERED_MESSAGE_FAILED.value def test_do_not_commit_when_full_when_flag_is_off(): max_messages = 4 # Set commit_when_full flag to False batch = create_batch(max_messages=max_messages, commit_when_full=False) messages = ( gapic_types.PubsubMessage(data=b"foobarbaz"), gapic_types.PubsubMessage(data=b"spameggs"), gapic_types.PubsubMessage(data=b"1335020400"), ) with mock.patch.object(batch, "commit") as commit: # Publish 3 messages. futures = [batch.publish(message) for message in messages] assert len(futures) == 3 # When a fourth message is published, commit should not be called. future = batch.publish(gapic_types.PubsubMessage(data=b"last one")) assert commit.call_count == 0 assert future is None class BatchDoneCallbackTracker(object): def __init__(self): self.called = False self.success = None def __call__(self, success): self.called = True self.success = success def test_batch_done_callback_called_on_success(): batch_done_callback_tracker = BatchDoneCallbackTracker() batch = create_batch(batch_done_callback=batch_done_callback_tracker) # Ensure messages exist. message = gapic_types.PubsubMessage(data=b"foobarbaz") batch.publish(message) # One response for one published message. publish_response = gapic_types.PublishResponse(message_ids=["a"]) with mock.patch.object( type(batch.client.api), "publish", return_value=publish_response ): batch._commit() assert batch_done_callback_tracker.called assert batch_done_callback_tracker.success def test_batch_done_callback_called_on_publish_failure(): batch_done_callback_tracker = BatchDoneCallbackTracker() batch = create_batch(batch_done_callback=batch_done_callback_tracker) # Ensure messages exist. message = gapic_types.PubsubMessage(data=b"foobarbaz") batch.publish(message) # One response for one published message. publish_response = gapic_types.PublishResponse(message_ids=["a"]) # Induce publish error. error = google.api_core.exceptions.InternalServerError("uh oh") with mock.patch.object( type(batch.client.api), "publish", return_value=publish_response, side_effect=error, ): batch._commit() assert batch_done_callback_tracker.called assert not batch_done_callback_tracker.success def test_batch_done_callback_called_on_publish_response_invalid(): batch_done_callback_tracker = BatchDoneCallbackTracker() batch = create_batch(batch_done_callback=batch_done_callback_tracker) # Ensure messages exist. message = gapic_types.PubsubMessage(data=b"foobarbaz") batch.publish(message) # No message ids returned in successful publish response -> invalid. publish_response = gapic_types.PublishResponse(message_ids=[]) with mock.patch.object( type(batch.client.api), "publish", return_value=publish_response ): batch._commit() assert batch_done_callback_tracker.called assert not batch_done_callback_tracker.success
googleapis/python-pubsub
tests/unit/pubsub_v1/publisher/batch/test_thread.py
google/cloud/pubsub_v1/publisher/_sequencer/unordered_sequencer.py
# -*- coding: utf-8 -*- from branca.element import MacroElement from folium.elements import JSCSSMixin from folium.utilities import parse_options from jinja2 import Template class MousePosition(JSCSSMixin, MacroElement): """Add a field that shows the coordinates of the mouse position. Uses the Leaflet plugin by Ardhi Lukianto under MIT license. https://github.com/ardhi/Leaflet.MousePosition Parameters ---------- position : str, default 'bottomright' The standard Control position parameter for the widget. separator : str, default ' : ' Character used to separate latitude and longitude values. empty_string : str, default 'Unavailable' Initial text to display. lng_first : bool, default False Whether to put the longitude first or not. Set as True to display longitude before latitude. num_digits : int, default '5' Number of decimal places included in the displayed longitude and latitude decimal degree values. prefix : str, default '' A string to be prepended to the coordinates. lat_formatter : str, default None Custom Javascript function to format the latitude value. lng_formatter : str, default None Custom Javascript function to format the longitude value. Examples -------- >>> fmtr = "function(num) {return L.Util.formatNum(num, 3) + ' º ';};" >>> MousePosition(position='topright', separator=' | ', prefix="Mouse:", ... lat_formatter=fmtr, lng_formatter=fmtr) """ _template = Template(""" {% macro script(this, kwargs) %} var {{ this.get_name() }} = new L.Control.MousePosition( {{ this.options|tojson }} ); {{ this.get_name() }}.options["latFormatter"] = {{ this.lat_formatter }}; {{ this.get_name() }}.options["lngFormatter"] = {{ this.lng_formatter }}; {{ this._parent.get_name() }}.addControl({{ this.get_name() }}); {% endmacro %} """) default_js = [ ('Control_MousePosition_js', 'https://cdn.jsdelivr.net/gh/ardhi/Leaflet.MousePosition/src/L.Control.MousePosition.min.js') ] default_css = [ ('Control_MousePosition_css', 'https://cdn.jsdelivr.net/gh/ardhi/Leaflet.MousePosition/src/L.Control.MousePosition.min.css') ] def __init__(self, position='bottomright', separator=' : ', empty_string='Unavailable', lng_first=False, num_digits=5, prefix='', lat_formatter=None, lng_formatter=None, **kwargs): super(MousePosition, self).__init__() self._name = 'MousePosition' self.options = parse_options( position=position, separator=separator, empty_string=empty_string, lng_first=lng_first, num_digits=num_digits, prefix=prefix, **kwargs ) self.lat_formatter = lat_formatter or 'undefined' self.lng_formatter = lng_formatter or 'undefined'
# -*- coding: utf-8 -*- """ Test HeatMap ------------ """ import folium from folium.plugins import HeatMap from folium.utilities import normalize from jinja2 import Template import numpy as np import pytest def test_heat_map(): np.random.seed(3141592) data = (np.random.normal(size=(100, 2)) * np.array([[1, 1]]) + np.array([[48, 5]])) m = folium.Map([48., 5.], tiles='stamentoner', zoom_start=6) hm = HeatMap(data) m.add_child(hm) m._repr_html_() out = normalize(m._parent.render()) # We verify that the script import is present. script = '<script src="https://cdn.jsdelivr.net/gh/python-visualization/folium@master/folium/templates/leaflet_heat.min.js"></script>' # noqa assert script in out # We verify that the script part is correct. tmpl = Template(""" var {{this.get_name()}} = L.heatLayer( {{this.data}}, { minOpacity: {{this.min_opacity}}, maxZoom: {{this.max_zoom}}, radius: {{this.radius}}, blur: {{this.blur}}, gradient: {{this.gradient}} }) .addTo({{this._parent.get_name()}}); """) assert tmpl.render(this=hm) bounds = m.get_bounds() np.testing.assert_allclose( bounds, [[46.218566840847025, 3.0302801394447734], [50.75345011431167, 7.132453997672826]]) def test_heatmap_data(): data = HeatMap(np.array([[3, 4, 1], [5, 6, 1], [7, 8, 0.5]])).data assert isinstance(data, list) assert len(data) == 3 for i in range(len(data)): assert isinstance(data[i], list) assert len(data[i]) == 3 def test_heat_map_exception(): with pytest.raises(ValueError): HeatMap(np.array([[4, 5, 1], [3, 6, np.nan]])) with pytest.raises(Exception): HeatMap(np.array([3, 4, 5]))
ocefpaf/folium
tests/plugins/test_heat_map.py
folium/plugins/mouse_position.py
"""Support for N26 switches.""" import logging from homeassistant.components.switch import SwitchDevice from . import DEFAULT_SCAN_INTERVAL, DOMAIN from .const import CARD_STATE_ACTIVE, CARD_STATE_BLOCKED, DATA _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the N26 switch platform.""" if discovery_info is None: return api_list = hass.data[DOMAIN][DATA] switch_entities = [] for api_data in api_list: for card in api_data.cards: switch_entities.append(N26CardSwitch(api_data, card)) add_entities(switch_entities) class N26CardSwitch(SwitchDevice): """Representation of a N26 card block/unblock switch.""" def __init__(self, api_data, card: dict): """Initialize the N26 card block/unblock switch.""" self._data = api_data self._card = card @property def unique_id(self): """Return the unique ID of the entity.""" return self._card["id"] @property def name(self) -> str: """Friendly name of the sensor.""" return "card_{}".format(self._card["id"]) @property def is_on(self): """Return true if switch is on.""" return self._card["status"] == CARD_STATE_ACTIVE def turn_on(self, **kwargs): """Block the card.""" self._data.api.unblock_card(self._card["id"]) self._card["status"] = CARD_STATE_ACTIVE def turn_off(self, **kwargs): """Unblock the card.""" self._data.api.block_card(self._card["id"]) self._card["status"] = CARD_STATE_BLOCKED def update(self): """Update the switch state.""" self._data.update_cards() self._card = self._data.card(self._card["id"], self._card)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/n26/switch.py
"""Support for the Dyson 360 eye vacuum cleaner robot.""" import logging from libpurecool.const import Dyson360EyeMode, PowerMode from libpurecool.dyson_360_eye import Dyson360Eye from homeassistant.components.vacuum import ( SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, VacuumDevice, ) from homeassistant.helpers.icon import icon_for_battery_level from . import DYSON_DEVICES _LOGGER = logging.getLogger(__name__) ATTR_CLEAN_ID = "clean_id" ATTR_FULL_CLEAN_TYPE = "full_clean_type" ATTR_POSITION = "position" DYSON_360_EYE_DEVICES = "dyson_360_eye_devices" SUPPORT_DYSON = ( SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | SUPPORT_BATTERY | SUPPORT_STOP ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dyson 360 Eye robot vacuum platform.""" _LOGGER.debug("Creating new Dyson 360 Eye robot vacuum") if DYSON_360_EYE_DEVICES not in hass.data: hass.data[DYSON_360_EYE_DEVICES] = [] # Get Dyson Devices from parent component for device in [d for d in hass.data[DYSON_DEVICES] if isinstance(d, Dyson360Eye)]: dyson_entity = Dyson360EyeDevice(device) hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity) add_entities(hass.data[DYSON_360_EYE_DEVICES]) return True class Dyson360EyeDevice(VacuumDevice): """Dyson 360 Eye robot vacuum device.""" def __init__(self, device): """Dyson 360 Eye robot vacuum device.""" _LOGGER.debug("Creating device %s", device.name) self._device = device async def async_added_to_hass(self): """Call when entity is added to hass.""" self.hass.async_add_job(self._device.add_message_listener, self.on_message) def on_message(self, message): """Handle a new messages that was received from the vacuum.""" _LOGGER.debug("Message received for %s device: %s", self.name, message) self.schedule_update_ha_state() @property def should_poll(self) -> bool: """Return True if entity has to be polled for state. False if entity pushes its state to HA. """ return False @property def name(self): """Return the name of the device.""" return self._device.name @property def status(self): """Return the status of the vacuum cleaner.""" dyson_labels = { Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging", Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged", Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused", Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning", Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home", Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning", Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked", Dyson360EyeMode.FAULT_REPLACE_ON_DOCK: "Error - Replace device on dock", Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished", Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging", } return dyson_labels.get(self._device.state.state, self._device.state.state) @property def battery_level(self): """Return the battery level of the vacuum cleaner.""" return self._device.state.battery_level @property def fan_speed(self): """Return the fan speed of the vacuum cleaner.""" speed_labels = {PowerMode.MAX: "Max", PowerMode.QUIET: "Quiet"} return speed_labels[self._device.state.power_mode] @property def fan_speed_list(self): """Get the list of available fan speed steps of the vacuum cleaner.""" return ["Quiet", "Max"] @property def device_state_attributes(self): """Return the specific state attributes of this vacuum cleaner.""" return {ATTR_POSITION: str(self._device.state.position)} @property def is_on(self) -> bool: """Return True if entity is on.""" return self._device.state.state in [ Dyson360EyeMode.FULL_CLEAN_INITIATED, Dyson360EyeMode.FULL_CLEAN_ABORTED, Dyson360EyeMode.FULL_CLEAN_RUNNING, ] @property def available(self) -> bool: """Return True if entity is available.""" return True @property def supported_features(self): """Flag vacuum cleaner robot features that are supported.""" return SUPPORT_DYSON @property def battery_icon(self): """Return the battery icon for the vacuum cleaner.""" charging = self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGING] return icon_for_battery_level( battery_level=self.battery_level, charging=charging ) def turn_on(self, **kwargs): """Turn the vacuum on.""" _LOGGER.debug("Turn on device %s", self.name) if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]: self._device.resume() else: self._device.start() def turn_off(self, **kwargs): """Turn the vacuum off and return to home.""" _LOGGER.debug("Turn off device %s", self.name) self._device.pause() def stop(self, **kwargs): """Stop the vacuum cleaner.""" _LOGGER.debug("Stop device %s", self.name) self._device.pause() def set_fan_speed(self, fan_speed, **kwargs): """Set fan speed.""" _LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name) power_modes = {"Quiet": PowerMode.QUIET, "Max": PowerMode.MAX} self._device.set_power_mode(power_modes[fan_speed]) def start_pause(self, **kwargs): """Start, pause or resume the cleaning task.""" if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]: _LOGGER.debug("Resume device %s", self.name) self._device.resume() elif self._device.state.state in [ Dyson360EyeMode.INACTIVE_CHARGED, Dyson360EyeMode.INACTIVE_CHARGING, ]: _LOGGER.debug("Start device %s", self.name) self._device.start() else: _LOGGER.debug("Pause device %s", self.name) self._device.pause() def return_to_base(self, **kwargs): """Set the vacuum cleaner to return to the dock.""" _LOGGER.debug("Return to base device %s", self.name) self._device.abort()
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/dyson/vacuum.py
"""Support for monitoring an SABnzbd NZB client.""" from datetime import timedelta import logging from pysabnzbd import SabnzbdApi, SabnzbdApiException import voluptuous as vol from homeassistant.components.discovery import SERVICE_SABNZBD from homeassistant.const import ( CONF_API_KEY, CONF_HOST, CONF_NAME, CONF_PATH, CONF_PORT, CONF_SENSORS, CONF_SSL, ) from homeassistant.core import callback from homeassistant.helpers import discovery from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.event import async_track_time_interval from homeassistant.util.json import load_json, save_json _LOGGER = logging.getLogger(__name__) DOMAIN = "sabnzbd" DATA_SABNZBD = "sabznbd" _CONFIGURING = {} ATTR_SPEED = "speed" BASE_URL_FORMAT = "{}://{}:{}/" CONFIG_FILE = "sabnzbd.conf" DEFAULT_HOST = "localhost" DEFAULT_NAME = "SABnzbd" DEFAULT_PORT = 8080 DEFAULT_SPEED_LIMIT = "100" DEFAULT_SSL = False UPDATE_INTERVAL = timedelta(seconds=30) SERVICE_PAUSE = "pause" SERVICE_RESUME = "resume" SERVICE_SET_SPEED = "set_speed" SIGNAL_SABNZBD_UPDATED = "sabnzbd_updated" SENSOR_TYPES = { "current_status": ["Status", None, "status"], "speed": ["Speed", "MB/s", "kbpersec"], "queue_size": ["Queue", "MB", "mb"], "queue_remaining": ["Left", "MB", "mbleft"], "disk_size": ["Disk", "GB", "diskspacetotal1"], "disk_free": ["Disk Free", "GB", "diskspace1"], "queue_count": ["Queue Count", None, "noofslots_total"], "day_size": ["Daily Total", "GB", "day_size"], "week_size": ["Weekly Total", "GB", "week_size"], "month_size": ["Monthly Total", "GB", "month_size"], "total_size": ["Total", "GB", "total_size"], } SPEED_LIMIT_SCHEMA = vol.Schema( {vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.string} ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PATH): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_SENSORS): vol.All( cv.ensure_list, [vol.In(SENSOR_TYPES)] ), vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_check_sabnzbd(sab_api): """Check if we can reach SABnzbd.""" try: await sab_api.check_available() return True except SabnzbdApiException: _LOGGER.error("Connection to SABnzbd API failed") return False async def async_configure_sabnzbd( hass, config, use_ssl, name=DEFAULT_NAME, api_key=None ): """Try to configure Sabnzbd and request api key if configuration fails.""" host = config[CONF_HOST] port = config[CONF_PORT] web_root = config.get(CONF_PATH) uri_scheme = "https" if use_ssl else "http" base_url = BASE_URL_FORMAT.format(uri_scheme, host, port) if api_key is None: conf = await hass.async_add_job(load_json, hass.config.path(CONFIG_FILE)) api_key = conf.get(base_url, {}).get(CONF_API_KEY, "") sab_api = SabnzbdApi( base_url, api_key, web_root=web_root, session=async_get_clientsession(hass) ) if await async_check_sabnzbd(sab_api): async_setup_sabnzbd(hass, sab_api, config, name) else: async_request_configuration(hass, config, base_url, web_root) async def async_setup(hass, config): """Set up the SABnzbd component.""" async def sabnzbd_discovered(service, info): """Handle service discovery.""" ssl = info.get("properties", {}).get("https", "0") == "1" await async_configure_sabnzbd(hass, info, ssl) discovery.async_listen(hass, SERVICE_SABNZBD, sabnzbd_discovered) conf = config.get(DOMAIN) if conf is not None: use_ssl = conf.get(CONF_SSL) name = conf.get(CONF_NAME) api_key = conf.get(CONF_API_KEY) await async_configure_sabnzbd(hass, conf, use_ssl, name, api_key) return True @callback def async_setup_sabnzbd(hass, sab_api, config, name): """Set up SABnzbd sensors and services.""" sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {})) if config.get(CONF_SENSORS): hass.data[DATA_SABNZBD] = sab_api_data hass.async_create_task( discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config) ) async def async_service_handler(service): """Handle service calls.""" if service.service == SERVICE_PAUSE: await sab_api_data.async_pause_queue() elif service.service == SERVICE_RESUME: await sab_api_data.async_resume_queue() elif service.service == SERVICE_SET_SPEED: speed = service.data.get(ATTR_SPEED) await sab_api_data.async_set_queue_speed(speed) hass.services.async_register( DOMAIN, SERVICE_PAUSE, async_service_handler, schema=vol.Schema({}) ) hass.services.async_register( DOMAIN, SERVICE_RESUME, async_service_handler, schema=vol.Schema({}) ) hass.services.async_register( DOMAIN, SERVICE_SET_SPEED, async_service_handler, schema=SPEED_LIMIT_SCHEMA ) async def async_update_sabnzbd(now): """Refresh SABnzbd queue data.""" try: await sab_api.refresh_data() async_dispatcher_send(hass, SIGNAL_SABNZBD_UPDATED, None) except SabnzbdApiException as err: _LOGGER.error(err) async_track_time_interval(hass, async_update_sabnzbd, UPDATE_INTERVAL) @callback def async_request_configuration(hass, config, host, web_root): """Request configuration steps from the user.""" configurator = hass.components.configurator # We got an error if this method is called while we are configuring if host in _CONFIGURING: configurator.async_notify_errors( _CONFIGURING[host], "Failed to register, please try again." ) return async def async_configuration_callback(data): """Handle configuration changes.""" api_key = data.get(CONF_API_KEY) sab_api = SabnzbdApi( host, api_key, web_root=web_root, session=async_get_clientsession(hass) ) if not await async_check_sabnzbd(sab_api): return def success(): """Signal successful setup.""" conf = load_json(hass.config.path(CONFIG_FILE)) conf[host] = {CONF_API_KEY: api_key} save_json(hass.config.path(CONFIG_FILE), conf) req_config = _CONFIGURING.pop(host) configurator.request_done(req_config) hass.async_add_job(success) async_setup_sabnzbd(hass, sab_api, config, config.get(CONF_NAME, DEFAULT_NAME)) _CONFIGURING[host] = configurator.async_request_config( DEFAULT_NAME, async_configuration_callback, description="Enter the API Key", submit_caption="Confirm", fields=[{"id": CONF_API_KEY, "name": "API Key", "type": ""}], ) class SabnzbdApiData: """Class for storing/refreshing sabnzbd api queue data.""" def __init__(self, sab_api, name, sensors): """Initialize component.""" self.sab_api = sab_api self.name = name self.sensors = sensors async def async_pause_queue(self): """Pause Sabnzbd queue.""" try: return await self.sab_api.pause_queue() except SabnzbdApiException as err: _LOGGER.error(err) return False async def async_resume_queue(self): """Resume Sabnzbd queue.""" try: return await self.sab_api.resume_queue() except SabnzbdApiException as err: _LOGGER.error(err) return False async def async_set_queue_speed(self, limit): """Set speed limit for the Sabnzbd queue.""" try: return await self.sab_api.set_speed_limit(limit) except SabnzbdApiException as err: _LOGGER.error(err) return False def get_queue_field(self, field): """Return the value for the given field from the Sabnzbd queue.""" return self.sab_api.queue.get(field)
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/sabnzbd/__init__.py
"""Support for Satel Integra devices.""" import collections import logging from satel_integra.satel_integra import AsyncSatel import voluptuous as vol from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send DEFAULT_ALARM_NAME = "satel_integra" DEFAULT_PORT = 7094 DEFAULT_CONF_ARM_HOME_MODE = 1 DEFAULT_DEVICE_PARTITION = 1 DEFAULT_ZONE_TYPE = "motion" _LOGGER = logging.getLogger(__name__) DOMAIN = "satel_integra" DATA_SATEL = "satel_integra" CONF_DEVICE_CODE = "code" CONF_DEVICE_PARTITIONS = "partitions" CONF_ARM_HOME_MODE = "arm_home_mode" CONF_ZONE_NAME = "name" CONF_ZONE_TYPE = "type" CONF_ZONES = "zones" CONF_OUTPUTS = "outputs" CONF_SWITCHABLE_OUTPUTS = "switchable_outputs" ZONES = "zones" SIGNAL_PANEL_MESSAGE = "satel_integra.panel_message" SIGNAL_PANEL_ARM_AWAY = "satel_integra.panel_arm_away" SIGNAL_PANEL_ARM_HOME = "satel_integra.panel_arm_home" SIGNAL_PANEL_DISARM = "satel_integra.panel_disarm" SIGNAL_ZONES_UPDATED = "satel_integra.zones_updated" SIGNAL_OUTPUTS_UPDATED = "satel_integra.outputs_updated" ZONE_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_NAME): cv.string, vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string, } ) EDITABLE_OUTPUT_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_NAME): cv.string}) PARTITION_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_NAME): cv.string, vol.Optional(CONF_ARM_HOME_MODE, default=DEFAULT_CONF_ARM_HOME_MODE): vol.In( [1, 2, 3] ), } ) def is_alarm_code_necessary(value): """Check if alarm code must be configured.""" if value.get(CONF_SWITCHABLE_OUTPUTS) and CONF_DEVICE_CODE not in value: raise vol.Invalid( "You need to specify alarm " " code to use switchable_outputs" ) return value CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_DEVICE_CODE): cv.string, vol.Optional(CONF_DEVICE_PARTITIONS, default={}): { vol.Coerce(int): PARTITION_SCHEMA }, vol.Optional(CONF_ZONES, default={}): {vol.Coerce(int): ZONE_SCHEMA}, vol.Optional(CONF_OUTPUTS, default={}): {vol.Coerce(int): ZONE_SCHEMA}, vol.Optional(CONF_SWITCHABLE_OUTPUTS, default={}): { vol.Coerce(int): EDITABLE_OUTPUT_SCHEMA }, }, is_alarm_code_necessary, ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Satel Integra component.""" conf = config.get(DOMAIN) zones = conf.get(CONF_ZONES) outputs = conf.get(CONF_OUTPUTS) switchable_outputs = conf.get(CONF_SWITCHABLE_OUTPUTS) host = conf.get(CONF_HOST) port = conf.get(CONF_PORT) partitions = conf.get(CONF_DEVICE_PARTITIONS) monitored_outputs = collections.OrderedDict( list(outputs.items()) + list(switchable_outputs.items()) ) controller = AsyncSatel(host, port, hass.loop, zones, monitored_outputs, partitions) hass.data[DATA_SATEL] = controller result = await controller.connect() if not result: return False async def _close(): controller.close() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close()) _LOGGER.debug("Arm home config: %s, mode: %s ", conf, conf.get(CONF_ARM_HOME_MODE)) hass.async_create_task( async_load_platform(hass, "alarm_control_panel", DOMAIN, conf, config) ) hass.async_create_task( async_load_platform( hass, "binary_sensor", DOMAIN, {CONF_ZONES: zones, CONF_OUTPUTS: outputs}, config, ) ) hass.async_create_task( async_load_platform( hass, "switch", DOMAIN, { CONF_SWITCHABLE_OUTPUTS: switchable_outputs, CONF_DEVICE_CODE: conf.get(CONF_DEVICE_CODE), }, config, ) ) @callback def alarm_status_update_callback(): """Send status update received from alarm to home assistant.""" _LOGGER.debug("Sending request to update panel state") async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE) @callback def zones_update_callback(status): """Update zone objects as per notification from the alarm.""" _LOGGER.debug("Zones callback, status: %s", status) async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES]) @callback def outputs_update_callback(status): """Update zone objects as per notification from the alarm.""" _LOGGER.debug("Outputs updated callback , status: %s", status) async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"]) # Create a task instead of adding a tracking job, since this task will # run until the connection to satel_integra is closed. hass.loop.create_task(controller.keep_alive()) hass.loop.create_task( controller.monitor_status( alarm_status_update_callback, zones_update_callback, outputs_update_callback ) ) return True
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/satel_integra/__init__.py
"""Mail (SMTP) notification service.""" from email.mime.application import MIMEApplication from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import email.utils import logging import os import smtplib import voluptuous as vol from homeassistant.components.notify import ( ATTR_DATA, ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService, ) from homeassistant.const import ( CONF_PASSWORD, CONF_PORT, CONF_RECIPIENT, CONF_SENDER, CONF_TIMEOUT, CONF_USERNAME, ) import homeassistant.helpers.config_validation as cv import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) ATTR_IMAGES = "images" # optional embedded image file attachments ATTR_HTML = "html" CONF_ENCRYPTION = "encryption" CONF_DEBUG = "debug" CONF_SERVER = "server" CONF_SENDER_NAME = "sender_name" DEFAULT_HOST = "localhost" DEFAULT_PORT = 587 DEFAULT_TIMEOUT = 5 DEFAULT_DEBUG = False DEFAULT_ENCRYPTION = "starttls" ENCRYPTION_OPTIONS = ["tls", "starttls", "none"] # pylint: disable=no-value-for-parameter PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_RECIPIENT): vol.All(cv.ensure_list, [vol.Email()]), vol.Required(CONF_SENDER): vol.Email(), vol.Optional(CONF_SERVER, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_ENCRYPTION, default=DEFAULT_ENCRYPTION): vol.In( ENCRYPTION_OPTIONS ), vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(CONF_SENDER_NAME): cv.string, vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean, } ) def get_service(hass, config, discovery_info=None): """Get the mail notification service.""" mail_service = MailNotificationService( config.get(CONF_SERVER), config.get(CONF_PORT), config.get(CONF_TIMEOUT), config.get(CONF_SENDER), config.get(CONF_ENCRYPTION), config.get(CONF_USERNAME), config.get(CONF_PASSWORD), config.get(CONF_RECIPIENT), config.get(CONF_SENDER_NAME), config.get(CONF_DEBUG), ) if mail_service.connection_is_valid(): return mail_service return None class MailNotificationService(BaseNotificationService): """Implement the notification service for E-mail messages.""" def __init__( self, server, port, timeout, sender, encryption, username, password, recipients, sender_name, debug, ): """Initialize the SMTP service.""" self._server = server self._port = port self._timeout = timeout self._sender = sender self.encryption = encryption self.username = username self.password = password self.recipients = recipients self._sender_name = sender_name self.debug = debug self.tries = 2 def connect(self): """Connect/authenticate to SMTP Server.""" if self.encryption == "tls": mail = smtplib.SMTP_SSL(self._server, self._port, timeout=self._timeout) else: mail = smtplib.SMTP(self._server, self._port, timeout=self._timeout) mail.set_debuglevel(self.debug) mail.ehlo_or_helo_if_needed() if self.encryption == "starttls": mail.starttls() mail.ehlo() if self.username and self.password: mail.login(self.username, self.password) return mail def connection_is_valid(self): """Check for valid config, verify connectivity.""" server = None try: server = self.connect() except (smtplib.socket.gaierror, ConnectionRefusedError): _LOGGER.exception( "SMTP server not found or refused connection (%s:%s). " "Please check the IP address, hostname, and availability of your SMTP server.", self._server, self._port, ) except smtplib.SMTPAuthenticationError: _LOGGER.exception( "Login not possible. " "Please check your setting and/or your credentials" ) return False finally: if server: server.quit() return True def send_message(self, message="", **kwargs): """ Build and send a message to a user. Will send plain text normally, or will build a multipart HTML message with inline image attachments if images config is defined, or will build a multipart HTML if html config is defined. """ subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT) data = kwargs.get(ATTR_DATA) if data: if ATTR_HTML in data: msg = _build_html_msg( message, data[ATTR_HTML], images=data.get(ATTR_IMAGES, []) ) else: msg = _build_multipart_msg(message, images=data.get(ATTR_IMAGES, [])) else: msg = _build_text_msg(message) msg["Subject"] = subject msg["To"] = ",".join(self.recipients) if self._sender_name: msg["From"] = f"{self._sender_name} <{self._sender}>" else: msg["From"] = self._sender msg["X-Mailer"] = "HomeAssistant" msg["Date"] = email.utils.format_datetime(dt_util.now()) msg["Message-Id"] = email.utils.make_msgid() return self._send_email(msg) def _send_email(self, msg): """Send the message.""" mail = self.connect() for _ in range(self.tries): try: mail.sendmail(self._sender, self.recipients, msg.as_string()) break except smtplib.SMTPServerDisconnected: _LOGGER.warning( "SMTPServerDisconnected sending mail: retrying connection" ) mail.quit() mail = self.connect() except smtplib.SMTPException: _LOGGER.warning("SMTPException sending mail: retrying connection") mail.quit() mail = self.connect() mail.quit() def _build_text_msg(message): """Build plaintext email.""" _LOGGER.debug("Building plain text email") return MIMEText(message) def _build_multipart_msg(message, images): """Build Multipart message with in-line images.""" _LOGGER.debug("Building multipart email with embedded attachment(s)") msg = MIMEMultipart("related") msg_alt = MIMEMultipart("alternative") msg.attach(msg_alt) body_txt = MIMEText(message) msg_alt.attach(body_txt) body_text = [f"<p>{message}</p><br>"] for atch_num, atch_name in enumerate(images): cid = f"image{atch_num}" body_text.append(f'<img src="cid:{cid}"><br>') try: with open(atch_name, "rb") as attachment_file: file_bytes = attachment_file.read() try: attachment = MIMEImage(file_bytes) msg.attach(attachment) attachment.add_header("Content-ID", f"<{cid}>") except TypeError: _LOGGER.warning( "Attachment %s has an unknown MIME type. " "Falling back to file", atch_name, ) attachment = MIMEApplication(file_bytes, Name=atch_name) attachment["Content-Disposition"] = ( "attachment; " 'filename="%s"' % atch_name ) msg.attach(attachment) except FileNotFoundError: _LOGGER.warning("Attachment %s not found. Skipping", atch_name) body_html = MIMEText("".join(body_text), "html") msg_alt.attach(body_html) return msg def _build_html_msg(text, html, images): """Build Multipart message with in-line images and rich HTML (UTF-8).""" _LOGGER.debug("Building HTML rich email") msg = MIMEMultipart("related") alternative = MIMEMultipart("alternative") alternative.attach(MIMEText(text, _charset="utf-8")) alternative.attach(MIMEText(html, ATTR_HTML, _charset="utf-8")) msg.attach(alternative) for atch_num, atch_name in enumerate(images): name = os.path.basename(atch_name) try: with open(atch_name, "rb") as attachment_file: attachment = MIMEImage(attachment_file.read(), filename=name) msg.attach(attachment) attachment.add_header("Content-ID", f"<{name}>") except FileNotFoundError: _LOGGER.warning( "Attachment %s [#%s] not found. Skipping", atch_name, atch_num ) return msg
"""The tests for the demo water_heater component.""" import unittest import pytest import voluptuous as vol from homeassistant.components import water_heater from homeassistant.setup import setup_component from homeassistant.util.unit_system import IMPERIAL_SYSTEM from tests.common import get_test_home_assistant from tests.components.water_heater import common ENTITY_WATER_HEATER = "water_heater.demo_water_heater" ENTITY_WATER_HEATER_CELSIUS = "water_heater.demo_water_heater_celsius" class TestDemowater_heater(unittest.TestCase): """Test the demo water_heater.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.config.units = IMPERIAL_SYSTEM assert setup_component( self.hass, water_heater.DOMAIN, {"water_heater": {"platform": "demo"}} ) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_setup_params(self): """Test the initial parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") assert "off" == state.attributes.get("away_mode") assert "eco" == state.attributes.get("operation_mode") def test_default_setup_params(self): """Test the setup with default parameters.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("min_temp") assert 140 == state.attributes.get("max_temp") def test_set_only_target_temp_bad_attr(self): """Test setting the target temperature without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") with pytest.raises(vol.Invalid): common.set_temperature(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert 119 == state.attributes.get("temperature") def test_set_only_target_temp(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert 119 == state.attributes.get("temperature") common.set_temperature(self.hass, 110, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert 110 == state.attributes.get("temperature") def test_set_operation_bad_attr_and_state(self): """Test setting operation mode without required attribute. Also check the state. """ state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state with pytest.raises(vol.Invalid): common.set_operation_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state def test_set_operation(self): """Test setting of new operation mode.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "eco" == state.attributes.get("operation_mode") assert "eco" == state.state common.set_operation_mode(self.hass, "electric", ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "electric" == state.attributes.get("operation_mode") assert "electric" == state.state def test_set_away_mode_bad_attr(self): """Test setting the away mode without required attribute.""" state = self.hass.states.get(ENTITY_WATER_HEATER) assert "off" == state.attributes.get("away_mode") with pytest.raises(vol.Invalid): common.set_away_mode(self.hass, None, ENTITY_WATER_HEATER) self.hass.block_till_done() assert "off" == state.attributes.get("away_mode") def test_set_away_mode_on(self): """Test setting the away mode on/true.""" common.set_away_mode(self.hass, True, ENTITY_WATER_HEATER) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER) assert "on" == state.attributes.get("away_mode") def test_set_away_mode_off(self): """Test setting the away mode off/false.""" common.set_away_mode(self.hass, False, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert "off" == state.attributes.get("away_mode") def test_set_only_target_temp_with_convert(self): """Test the setting of the target temperature.""" state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 113 == state.attributes.get("temperature") common.set_temperature(self.hass, 114, ENTITY_WATER_HEATER_CELSIUS) self.hass.block_till_done() state = self.hass.states.get(ENTITY_WATER_HEATER_CELSIUS) assert 114 == state.attributes.get("temperature")
leppa/home-assistant
tests/components/demo/test_water_heater.py
homeassistant/components/smtp/notify.py