title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG: to_json - prevent various segfault conditions (GH14256)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index d6bdf153e0368..f4fbbd3596b57 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -956,6 +956,7 @@ I/O - Bug in :meth:`DataFrame.to_html` with ``notebook=True`` where DataFrames with named indices or non-MultiIndex indices had undesired horizontal or vertical alignment for column or row labels, respectively (:issue:`16792`) - Bug in :meth:`DataFrame.to_html` in which there was no validation of the ``justify`` parameter (:issue:`17527`) - Bug in :func:`HDFStore.select` when reading a contiguous mixed-data table featuring VLArray (:issue:`17021`) +- Bug in :func:`to_json` where several conditions (including objects with unprintable symbols, objects with deep recursion, overlong labels) caused segfaults instead of raising the appropriate exception (:issue:`14256`) Plotting ^^^^^^^^ @@ -1033,3 +1034,4 @@ Other ^^^^^ - Bug where some inplace operators were not being wrapped and produced a copy when invoked (:issue:`12962`) - Bug in :func:`eval` where the ``inplace`` parameter was being incorrectly handled (:issue:`16732`) + diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index 4f51fa8b3eb38..159645b4007e1 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -307,4 +307,11 @@ EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer); EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t); +#define Buffer_Reserve(__enc, __len) \ + if ((size_t)((__enc)->end - (__enc)->offset) < (size_t)(__len)) { \ + Buffer_Realloc((__enc), (__len)); \ + } + +void Buffer_Realloc(JSONObjectEncoder *enc, size_t cbNeeded); + #endif // PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_ diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c index 6bf2297749006..2d6c823a45515 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c @@ -714,11 +714,6 @@ int Buffer_EscapeStringValidated(JSOBJ obj, JSONObjectEncoder *enc, } } -#define Buffer_Reserve(__enc, __len) \ - if ((size_t)((__enc)->end - (__enc)->offset) < (size_t)(__len)) { \ - Buffer_Realloc((__enc), (__len)); \ - } - #define Buffer_AppendCharUnchecked(__enc, __chr) *((__enc)->offset++) = __chr; FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin, @@ -976,6 +971,7 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, } enc->iterEnd(obj, &tc); + Buffer_Reserve(enc, 2); Buffer_AppendCharUnchecked(enc, ']'); break; } @@ -1003,6 +999,7 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, } enc->iterEnd(obj, &tc); + Buffer_Reserve(enc, 2); Buffer_AppendCharUnchecked(enc, '}'); break; } diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 1ee862b54cf0b..ae7854dfc1427 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -783,6 +783,7 @@ static void NpyArr_getLabel(JSOBJ obj, JSONTypeContext *tc, size_t *outLen, JSONObjectEncoder *enc = (JSONObjectEncoder *)tc->encoder; PRINTMARK(); *outLen = strlen(labels[idx]); + Buffer_Reserve(enc, *outLen); memcpy(enc->offset, labels[idx], sizeof(char) * (*outLen)); enc->offset += *outLen; *outLen = 0; @@ -879,7 +880,7 @@ int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) { NpyArrContext *npyarr; PRINTMARK(); - if (PyErr_Occurred()) { + if (PyErr_Occurred() || ((JSONObjectEncoder *)tc->encoder)->errorMsg) { return 0; } @@ -1224,6 +1225,10 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { PyObject *attrName; char *attrStr; + if (PyErr_Occurred() || ((JSONObjectEncoder *)tc->encoder)->errorMsg) { + return 0; + } + if (itemValue) { Py_DECREF(GET_TC(tc)->itemValue); GET_TC(tc)->itemValue = itemValue = NULL; diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index de4afec883efd..6625446bea469 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -511,6 +511,51 @@ def test_blocks_compat_GH9037(self): by_blocks=True, check_exact=True) + def test_frame_nonprintable_bytes(self): + # GH14256: failing column caused segfaults, if it is not the last one + + class BinaryThing(object): + + def __init__(self, hexed): + self.hexed = hexed + if compat.PY2: + self.binary = hexed.decode('hex') + else: + self.binary = bytes.fromhex(hexed) + + def __str__(self): + return self.hexed + + hexed = '574b4454ba8c5eb4f98a8f45' + binthing = BinaryThing(hexed) + + # verify the proper conversion of printable content + df_printable = DataFrame({'A': [binthing.hexed]}) + assert df_printable.to_json() == '{"A":{"0":"%s"}}' % hexed + + # check if non-printable content throws appropriate Exception + df_nonprintable = DataFrame({'A': [binthing]}) + with pytest.raises(OverflowError): + df_nonprintable.to_json() + + # the same with multiple columns threw segfaults + df_mixed = DataFrame({'A': [binthing], 'B': [1]}, + columns=['A', 'B']) + with pytest.raises(OverflowError): + df_mixed.to_json() + + # default_handler should resolve exceptions for non-string types + assert df_nonprintable.to_json(default_handler=str) == \ + '{"A":{"0":"%s"}}' % hexed + assert df_mixed.to_json(default_handler=str) == \ + '{"A":{"0":"%s"},"B":{"0":1}}' % hexed + + def test_label_overflow(self): + # GH14256: buffer length not checked when writing label + df = pd.DataFrame({'foo': [1337], 'bar' * 100000: [1]}) + assert df.to_json() == \ + '{"%s":{"0":1},"foo":{"0":1337}}' % ('bar' * 100000) + def test_series_non_unique_index(self): s = Series(['a', 'b'], index=[1, 1])
- [x] closes #14256 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry There were several sources for the JSON string buffer at enc->start exceeding the reserved space: - Loops on DataFrame columns being stuck on the same column if enc->errorMsg was set. This is fixed by adding the check for the errorMsg to the respective iterNext function. - Loops on objects (Dir_iterNext) not breaking due to similar reasons, which allows infinite recursion. Also added check for errorMsg. - Column labels were added in iterName methods inside objToJSON.c where the check for remaining buffer was not accessible. Moved Buffer_Reserve to the ujson header file. - Closing brackets could be added without sufficient buffer. Added Buffer_Reserve calls.
https://api.github.com/repos/pandas-dev/pandas/pulls/17857
2017-10-12T15:59:09Z
2017-10-14T14:36:39Z
2017-10-14T14:36:39Z
2017-10-14T14:36:53Z
BUG: adds validation for boolean keywords in DataFrame.set_index
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ccaa408603333..d177574ad3fdc 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -162,5 +162,5 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a numexpr query (:issue:`18221`) -- +- Added checking of boolean kwargs in DataFrame methods (:issue:`16714`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f3137c1edf2af..26e16ece3fca9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -84,7 +84,7 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import (Appender, Substitution, rewrite_axis_style_signature) -from pandas.util._validators import (validate_bool_kwarg, +from pandas.util._validators import (validate_keywords_as_bool, validate_axis_style_args) from pandas.core.indexes.period import PeriodIndex @@ -746,6 +746,7 @@ def iterrows(self): s = klass(v, index=columns, name=k) yield k, s + @validate_keywords_as_bool('index') def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples, with index value as first @@ -1000,6 +1001,7 @@ def to_dict(self, orient='dict', into=dict): else: raise ValueError("orient '%s' not understood" % orient) + @validate_keywords_as_bool('verbose', 'reauth') def to_gbq(self, destination_table, project_id, chunksize=10000, verbose=True, reauth=False, if_exists='fail', private_key=None): """Write a DataFrame to a Google BigQuery table. @@ -1181,6 +1183,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, return cls(mgr) + @validate_keywords_as_bool('convert_datetime64') def to_records(self, index=True, convert_datetime64=True): """ Convert DataFrame to record array. Index will be put in the @@ -1426,6 +1429,7 @@ def to_panel(self): return self._constructor_expanddim(new_mgr) + @validate_keywords_as_bool('index') def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, compression=None, quoting=None, @@ -1865,6 +1869,7 @@ def _sizeof_fmt(num, size_qualifier): _sizeof_fmt(mem_usage, size_qualifier)) _put_lines(buf, lines) + @validate_keywords_as_bool('index', 'deep') def memory_usage(self, index=True, deep=False): """Memory usage of DataFrame columns. @@ -2215,6 +2220,7 @@ def _getitem_frame(self, key): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) + @validate_keywords_as_bool('inplace') def query(self, expr, inplace=False, **kwargs): """Query the columns of a frame with a boolean expression. @@ -2286,7 +2292,6 @@ def query(self, expr, inplace=False, **kwargs): >>> df.query('a > b') >>> df[df.a > df.b] # same result as the previous expression """ - inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, compat.string_types): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) @@ -2306,6 +2311,7 @@ def query(self, expr, inplace=False, **kwargs): else: return new_data + @validate_keywords_as_bool('inplace') def eval(self, expr, inplace=False, **kwargs): """Evaluate an expression in the context of the calling DataFrame instance. @@ -2352,7 +2358,6 @@ def eval(self, expr, inplace=False, **kwargs): """ from pandas.core.computation.eval import eval as _eval - inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: @@ -2589,6 +2594,7 @@ def _set_item(self, key, value): if len(self): self._check_setitem_copy() + @validate_keywords_as_bool('allow_duplicates') def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. @@ -2905,6 +2911,7 @@ def _reindex_multi(self, axes, copy, fill_value): copy=copy, fill_value=fill_value) + @validate_keywords_as_bool('copy') @Appender(_shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, @@ -3037,6 +3044,7 @@ def shift(self, periods=1, freq=None, axis=0): return super(DataFrame, self).shift(periods=periods, freq=freq, axis=axis) + @validate_keywords_as_bool('drop', 'append', 'inplace', 'verify_integrity') def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ @@ -3102,7 +3110,6 @@ def set_index(self, keys, drop=True, append=False, inplace=False, ------- dataframe : DataFrame """ - inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(keys, list): keys = [keys] @@ -3164,6 +3171,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, if not inplace: return frame + @validate_keywords_as_bool('drop', 'inplace') def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): """ @@ -3300,7 +3308,6 @@ class max type lion mammal 80.5 run monkey mammal NaN jump """ - inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: new_obj = self else: @@ -3399,6 +3406,7 @@ def notna(self): def notnull(self): return super(DataFrame, self).notnull() + @validate_keywords_as_bool('inplace') def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ @@ -3468,7 +3476,6 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, 1 3.0 4.0 NaN 1 """ - inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): result = self for ax in axis: @@ -3508,6 +3515,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, else: return result + @validate_keywords_as_bool('inplace') def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only @@ -3529,7 +3537,6 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False): ------- deduplicated : DataFrame """ - inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: @@ -3585,10 +3592,10 @@ def f(vals): # ---------------------------------------------------------------------- # Sorting + @validate_keywords_as_bool('ascending', 'inplace') @Appender(_shared_docs['sort_values'] % _shared_doc_kwargs) def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): - inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) other_axis = 0 if axis == 1 else 1 @@ -3640,15 +3647,14 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False, else: return self._constructor(new_data).__finalize__(self) + @validate_keywords_as_bool('ascending', 'inplace', 'sort_remaining') @Appender(_shared_docs['sort_index'] % _shared_doc_kwargs) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): - # TODO: this can be combined with Series.sort_index impl as # almost identical - inplace = validate_bool_kwarg(inplace, 'inplace') # 10726 if by is not None: warnings.warn("by argument to sort_index is deprecated, " @@ -4019,6 +4025,7 @@ def _flex_compare_frame(self, other, func, str_rep, level, try_cast=True): return self._compare_frame_evaluate(other, func, str_rep, try_cast=try_cast) + @validate_keywords_as_bool('overwrite') def combine(self, other, func, fill_value=None, overwrite=True): """ Add two DataFrame objects and do not propagate NaN values, so if for a @@ -4152,6 +4159,7 @@ def combiner(x, y, needs_i8_conversion=False): return self.combine(other, combiner, overwrite=False) + @validate_keywords_as_bool('overwrite', 'raise_conflict') def update(self, other, join='left', overwrite=True, filter_func=None, raise_conflict=False): """ @@ -4742,6 +4750,7 @@ def aggregate(self, func, axis=0, *args, **kwargs): agg = aggregate + @validate_keywords_as_bool('broadcast', 'raw') def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): """ @@ -5041,6 +5050,7 @@ def infer(x): # ---------------------------------------------------------------------- # Merging / joining methods + @validate_keywords_as_bool('ignore_index', 'verify_integrity') def append(self, other, ignore_index=False, verify_integrity=False): """ Append rows of `other` to the end of this frame, returning a new @@ -5164,6 +5174,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity) + @validate_keywords_as_bool('sort') def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ @@ -5524,6 +5535,7 @@ def cov(self, min_periods=None): return self._constructor(baseCov, index=idx, columns=cols) + @validate_keywords_as_bool('drop') def corrwith(self, other, axis=0, drop=False): """ Compute pairwise correlation between rows or columns of two DataFrame @@ -5577,6 +5589,7 @@ def corrwith(self, other, axis=0, drop=False): # ---------------------------------------------------------------------- # ndarray-like stats methods + @validate_keywords_as_bool('numeric_only') def count(self, axis=0, level=None, numeric_only=False): """ Return Series with number of non-NA/null observations over requested @@ -5740,6 +5753,7 @@ def f(x): return Series(result, index=labels) + @validate_keywords_as_bool('dropna') def nunique(self, axis=0, dropna=True): """ Return Series with number of distinct observations over requested @@ -5771,6 +5785,7 @@ def nunique(self, axis=0, dropna=True): """ return self.apply(Series.nunique, axis=axis, dropna=dropna) + @validate_keywords_as_bool('dropna') def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. @@ -5802,6 +5817,7 @@ def idxmin(self, axis=0, skipna=True): result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) + @validate_keywords_as_bool('dropna') def idxmax(self, axis=0, skipna=True): """ Return index of first occurrence of maximum over requested axis. @@ -5842,6 +5858,7 @@ def _get_agg_axis(self, axis_num): else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) + @validate_keywords_as_bool('numeric_only') def mode(self, axis=0, numeric_only=False): """ Gets the mode(s) of each element along the axis selected. Adds a row @@ -5880,6 +5897,7 @@ def f(s): return data.apply(f, axis=axis) + @validate_keywords_as_bool('numeric_only') def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear'): """ @@ -5953,6 +5971,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, return result + @validate_keywords_as_bool('copy') def to_timestamp(self, freq=None, how='start', axis=0, copy=True): """ Cast to DatetimeIndex of timestamps, at *beginning* of period @@ -5987,6 +6006,7 @@ def to_timestamp(self, freq=None, how='start', axis=0, copy=True): return self._constructor(new_data) + @validate_keywords_as_bool('copy') def to_period(self, freq=None, axis=0, copy=True): """ Convert DataFrame from DatetimeIndex to PeriodIndex with desired diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py index 2de0e866f6e70..fbe4505b0dc67 100644 --- a/pandas/tests/frame/test_validate.py +++ b/pandas/tests/frame/test_validate.py @@ -31,3 +31,11 @@ def test_validate_bool_args(self, dataframe, func, inplace): with tm.assert_raises_regex(ValueError, msg): getattr(dataframe, func)(**kwargs) + + @pytest.mark.parametrize('keyword', ('drop', 'append', 'inplace', + 'verify_integrity')) + def test_set_index_validation(self, dataframe, keyword): + msg = 'For argument "{}" expected type bool'.format(keyword) + kw = {keyword: 'yes please'} + with tm.assert_raises_regex(ValueError, msg): + dataframe.set_index('b', **kw) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 728db6af5558b..e18c32494570e 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -2,6 +2,7 @@ Module that contains many useful utilities for validating data or function arguments """ +import functools import warnings from pandas.core.dtypes.common import is_bool @@ -320,3 +321,33 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): msg = "Cannot specify all of '{}', 'index', 'columns'." raise TypeError(msg.format(arg_name)) return out + + +def validate_keywords_as_bool(*keywords): + """For a list of keywords, ensure all are bool + + Usage + ----- + Designed to be used as decorator around methods to check many + keywords at once: + + @validate_keywords_as_bool('inplace', 'append') + def set_index(self, keys, inplace=False, append=False): + etc. + + See Also + -------- + validate_bool_kwargs + + """ + keywords = set(keywords) + + def validate_kwargs(func): + @functools.wraps(func) + def validator(*args, **kwargs): + # only validate present keywords + for kw in keywords.intersection(kwargs.keys()): + validate_bool_kwarg(kwargs[kw], kw) + return func(*args, **kwargs) + return validator + return validate_kwargs
ENH: Adds util._validators.validate_keywords_as_bool decorator - [x] closes #16714 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I've used a decorator so that we can check all the boolean kwargs at once rather than have a lot of lines of `validate_bool_kwarg` at the top of every method.
https://api.github.com/repos/pandas-dev/pandas/pulls/17853
2017-10-12T12:18:26Z
2017-12-28T12:36:32Z
null
2017-12-28T12:36:32Z
BUG: duplicate indexing with embedded non-orderables (#17610)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 2b2dd4915b560..ca2ab43022608 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -978,6 +978,7 @@ Indexing - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` when no valid entry (:issue:`17400`) - Bug in :func:`Series.rename` when called with a callable, incorrectly alters the name of the ``Series``, rather than the name of the ``Index``. (:issue:`17407`) - Bug in :func:`String.str_get` raises ``IndexError`` instead of inserting NaNs when using a negative index. (:issue:`17704`) +- Bug in ``Series`` containing duplicate indexing when gets embedded non-orderables or orderables, raises error or returns unexpected result. (:issue:`17610`) I/O ^^^ diff --git a/pandas/core/series.py b/pandas/core/series.py index 1c92c4b8850ee..507c867ab73a5 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -622,12 +622,11 @@ def __getitem__(self, key): try: result = self.index.get_value(self, key) - if not is_scalar(result): + if (not is_scalar(result)) and (key in self.index): if is_list_like(result) and not isinstance(result, Series): - # we need to box if we have a non-unique index here # otherwise have inline ndarray/lists - if not self.index.is_unique: + if not is_scalar(self.index.get_loc(key)): result = self._constructor( result, index=[key] * len(result), dtype=self.dtype).__finalize__(self) diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index d141b378fe214..9685df271b354 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -546,6 +546,22 @@ def test_getitem_setitem_periodindex(self): result[4:8] = ts[4:8] assert_series_equal(result, ts) + def test_getitem_with_duplicates_indices(self): + # GH 17610 + s = pd.Series({1: 12, 2: [1, 2, 2, 3]}) + s = s.append(pd.Series({1: 313})) + s_1 = pd.Series({1: 12, },) + s_1 = s_1.append(pd.Series({1: 313})) + assert_series_equal(s[1], s_1, check_dtype=False) + assert s[2] == [1, 2, 2, 3] + + s = pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}) + s = s.append(pd.Series({1: [1, 2, 3]})) + s_1 = pd.Series({1: [1, 2, 3], }) + s_1 = s_1.append(pd.Series({1: [1, 2, 3]})) + assert_series_equal(s[1], s_1, check_dtype=False) + assert s[2] == [1, 2, 2, 3] + def test_getitem_median_slice_bug(self): index = date_range('20090415', '20090519', freq='2B') s = Series(np.random.randn(13), index=index)
BUG: duplicate indexing with embedded non-orderables (#17610) - [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17851
2017-10-12T08:54:41Z
2017-12-03T12:59:06Z
null
2017-12-03T12:59:06Z
BUG: duplicate indexing with embedded non-orderables (#17610)
BUG: duplicate indexing with embedded non-orderables BUG: duplicate indexing with embedded non-orderables BUG: duplicate indexing with embedded non-orderables (#17610) ss - [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17849
2017-10-12T06:21:12Z
2017-10-12T06:56:16Z
null
2017-10-12T07:24:52Z
BUG: duplicate indexing with embedded non-orderables (#17610)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index d7a08b1985076..8be404fd1cefc 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -936,6 +936,7 @@ Indexing - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` when no valid entry (:issue:`17400`) - Bug in :func:`Series.rename` when called with a `callable`, incorrectly alters the name of the `Series`, rather than the name of the `Index`. (:issue:`17407`) - Bug in :func:`String.str_get` raises `index out of range` error instead of inserting NaNs when using a negative index. (:issue:`17704`) +- Bug in ``Series`` containing duplicate indexing when gets embedded non-orderables or orderables, raises error or returns unexpected result. (:issue:`17610`) I/O ^^^ diff --git a/pandas/core/series.py b/pandas/core/series.py index 8499f8b55d2d0..13b3b66d5ff6f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -626,9 +626,10 @@ def __getitem__(self, key): # we need to box if we have a non-unique index here # otherwise have inline ndarray/lists if not self.index.is_unique: - result = self._constructor( - result, index=[key] * len(result), - dtype=self.dtype).__finalize__(self) + if key in self.index.get_duplicates(): + result = self._constructor( + result, index=[key] * len(result), + dtype=self.dtype).__finalize__(self) return result except InvalidIndexError: diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 75ae47ed2fdc1..6d55ac6b94079 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -546,6 +546,23 @@ def test_getitem_setitem_periodindex(self): result[4:8] = ts[4:8] assert_series_equal(result, ts) + def test_getitem_with_duplicates_indices(self): + s = pd.Series({1: 12, 2: [1, 2, 2, 3]}) + s = s.append(pd.Series({1: 313})) + s_1 = pd.Series({1: 12, },) + s_1 = s_1.append(pd.Series({1: 313})) + + assert_series_equal(s[1], s_1, check_dtype=False) + assert s[2] == [1, 2, 2, 3] + + s = pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}) + s = s.append(pd.Series({1: [1, 2, 3]})) + s_1 = pd.Series({1: [1, 2, 3], }) + s_1 = s_1.append(pd.Series({1: [1, 2, 3]})) + + assert_series_equal(s[1], s_1, check_dtype=False) + assert s[2] == [1, 2, 2, 3] + def test_getitem_median_slice_bug(self): index = date_range('20090415', '20090519', freq='2B') s = Series(np.random.randn(13), index=index)
Bug in ``Series`` containing duplicate indexing when gets embedded non-orderables or orderables, raises error or returns unexpected result. (:issue:`17610`) - [x] closes # 17610 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17848
2017-10-12T05:38:04Z
2017-10-12T06:08:47Z
null
2017-10-12T06:08:47Z
BUG: Ignore division by 0 when merging empty dataframes (#17776)
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 4c6cdb9846305..87e4dba0342a0 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -120,7 +120,7 @@ Reshaping - Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`) - Bug in ``pd.concat`` when empty and non-empty DataFrames or Series are concatenated (:issue:`18178` :issue:`18187`) - Bug in ``DataFrame.filter(...)`` when :class:`unicode` is passed as a condition in Python 2 (:issue:`13101`) -- +- Bug when merging empty DataFrames when ``np.seterr(divide='raise')`` is set (:issue:`17776`) Numeric ^^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e4b31939250a7..56ca913dbcddb 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1529,7 +1529,8 @@ def _get_join_keys(llab, rlab, shape, sort): rkey = stride * rlab[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): - stride //= shape[i] + with np.errstate(divide='ignore'): + stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py index ee7c4e5c90bb8..b76951e8c2ac2 100644 --- a/pandas/tests/reshape/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -864,6 +864,12 @@ def test_validation(self): result = merge(left, right, on=['a', 'b'], validate='1:1') assert_frame_equal(result, expected_multi) + def test_merge_two_empty_df_no_division_error(self): + # GH17776, PR #17846 + a = pd.DataFrame({'a': [], 'b': [], 'c': []}) + with np.errstate(divide='raise'): + merge(a, a, on=('a', 'b')) + def _check_merge(x, y): for how in ['inner', 'left', 'outer']:
- [x] closes #17776 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17846
2017-10-11T18:57:43Z
2017-11-27T11:36:21Z
2017-11-27T11:36:21Z
2017-12-18T20:06:47Z
BUG: set tz on DTI from fixed format HDFStore
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index f04410ef63531..3581565e82fe6 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -938,6 +938,7 @@ Indexing I/O ^^^ +- Bug in :func:`read_hdf` when reading a timezone aware index from ``fixed`` format HDFStore (:issue:`17618`) - Bug in :func:`read_csv` in which columns were not being thoroughly de-duplicated (:issue:`17060`) - Bug in :func:`read_csv` in which specified column names were not being thoroughly de-duplicated (:issue:`17095`) - Bug in :func:`read_csv` in which non integer values for the header argument generated an unhelpful / unrelated error message (:issue:`16338`) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ca1b4d031d3ce..6a2ddaec73823 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2391,8 +2391,11 @@ def _alias_to_class(self, alias): def _get_index_factory(self, klass): if klass == DatetimeIndex: def f(values, freq=None, tz=None): - return DatetimeIndex._simple_new(values, None, freq=freq, - tz=tz) + # data are already in UTC, localize and convert if tz present + result = DatetimeIndex._simple_new(values, None, freq=freq) + if tz is not None: + result = result.tz_localize('UTC').tz_convert(tz) + return result return f elif klass == PeriodIndex: def f(values, freq=None, tz=None): diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 2fe3cf1f34d44..6e3e338ce3de3 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -2272,6 +2272,17 @@ def test_calendar_roundtrip_issue(self): result = store.select('table') assert_series_equal(result, s) + def test_roundtrip_tz_aware_index(self): + # GH 17618 + time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern') + df = pd.DataFrame(data=[0], index=[time]) + + with ensure_clean_store(self.path) as store: + store.put('frame', df, format='fixed') + recons = store['frame'] + tm.assert_frame_equal(recons, df) + assert recons.index[0].value == 946706400000000000 + def test_append_with_timedelta(self): # GH 3577 # append timedelta
Set the tz after creating the DatetimeIndex instance when reading from a fixed format HDFStore. Setting the tz during instance creation offset data. closes #17618 - [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17844
2017-10-11T06:39:28Z
2017-10-13T10:32:47Z
2017-10-13T10:32:46Z
2017-10-13T10:32:51Z
Refactor index-as-string groupby tests and fix spurious warning (Bug 17383)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index f04410ef63531..5727888044229 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -974,6 +974,7 @@ Groupby/Resample/Rolling - Bug in ``DataFrame.groupby`` where index and column keys were not recognized correctly when the number of keys equaled the number of elements on the groupby axis (:issue:`16859`) - Bug in ``groupby.nunique()`` with ``TimeGrouper`` which cannot handle ``NaT`` correctly (:issue:`17575`) - Bug in ``DataFrame.groupby`` where a single level selection from a ``MultiIndex`` unexpectedly sorts (:issue:`17537`) +- Bug in ``DataFrame.groupby`` where spurious warning is raised when ``Grouper`` object is used to override ambiguous column name (:issue:`17383`) - Bug in ``TimeGrouper`` differs when passes as a list and as a scalar (:issue:`17530`) Sparse diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 9518f17e5f4f1..54ced824b7353 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2704,7 +2704,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, # a passed-in Grouper, directly convert if isinstance(key, Grouper): - binner, grouper, obj = key._get_grouper(obj) + binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, [], obj else: diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 657de9b589dc9..740526e262d16 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -253,158 +253,6 @@ def test_grouper_column_and_index(self): expected = df_single.reset_index().groupby(['inner', 'B']).mean() assert_frame_equal(result, expected) - def test_grouper_index_level_as_string(self): - # GH 5677, allow strings passed as the `by` parameter to reference - # columns or index levels - - idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 3), - ('b', 1), ('b', 2), ('b', 3)]) - idx.names = ['outer', 'inner'] - df_multi = pd.DataFrame({"A": np.arange(6), - 'B': ['one', 'one', 'two', - 'two', 'one', 'one']}, - index=idx) - - df_single = df_multi.reset_index('outer') - - # Column and Index on MultiIndex - result = df_multi.groupby(['B', 'inner']).mean() - expected = df_multi.groupby(['B', pd.Grouper(level='inner')]).mean() - assert_frame_equal(result, expected) - - # Index and Column on MultiIndex - result = df_multi.groupby(['inner', 'B']).mean() - expected = df_multi.groupby([pd.Grouper(level='inner'), 'B']).mean() - assert_frame_equal(result, expected) - - # Column and Index on single Index - result = df_single.groupby(['B', 'inner']).mean() - expected = df_single.groupby(['B', pd.Grouper(level='inner')]).mean() - assert_frame_equal(result, expected) - - # Index and Column on single Index - result = df_single.groupby(['inner', 'B']).mean() - expected = df_single.groupby([pd.Grouper(level='inner'), 'B']).mean() - assert_frame_equal(result, expected) - - # Single element list of Index on MultiIndex - result = df_multi.groupby(['inner']).mean() - expected = df_multi.groupby(pd.Grouper(level='inner')).mean() - assert_frame_equal(result, expected) - - # Single element list of Index on single Index - result = df_single.groupby(['inner']).mean() - expected = df_single.groupby(pd.Grouper(level='inner')).mean() - assert_frame_equal(result, expected) - - # Index on MultiIndex - result = df_multi.groupby('inner').mean() - expected = df_multi.groupby(pd.Grouper(level='inner')).mean() - assert_frame_equal(result, expected) - - # Index on single Index - result = df_single.groupby('inner').mean() - expected = df_single.groupby(pd.Grouper(level='inner')).mean() - assert_frame_equal(result, expected) - - def test_grouper_column_index_level_precedence(self): - # GH 5677, when a string passed as the `by` parameter - # matches a column and an index level the column takes - # precedence - - idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 3), - ('b', 1), ('b', 2), ('b', 3)]) - idx.names = ['outer', 'inner'] - df_multi_both = pd.DataFrame({"A": np.arange(6), - 'B': ['one', 'one', 'two', - 'two', 'one', 'one'], - 'inner': [1, 1, 1, 1, 1, 1]}, - index=idx) - - df_single_both = df_multi_both.reset_index('outer') - - # Group MultiIndex by single key - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_multi_both.groupby('inner').mean() - - expected = df_multi_both.groupby([pd.Grouper(key='inner')]).mean() - assert_frame_equal(result, expected) - not_expected = df_multi_both.groupby(pd.Grouper(level='inner')).mean() - assert not result.index.equals(not_expected.index) - - # Group single Index by single key - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_single_both.groupby('inner').mean() - - expected = df_single_both.groupby([pd.Grouper(key='inner')]).mean() - assert_frame_equal(result, expected) - not_expected = df_single_both.groupby(pd.Grouper(level='inner')).mean() - assert not result.index.equals(not_expected.index) - - # Group MultiIndex by single key list - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_multi_both.groupby(['inner']).mean() - - expected = df_multi_both.groupby([pd.Grouper(key='inner')]).mean() - assert_frame_equal(result, expected) - not_expected = df_multi_both.groupby(pd.Grouper(level='inner')).mean() - assert not result.index.equals(not_expected.index) - - # Group single Index by single key list - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_single_both.groupby(['inner']).mean() - - expected = df_single_both.groupby([pd.Grouper(key='inner')]).mean() - assert_frame_equal(result, expected) - not_expected = df_single_both.groupby(pd.Grouper(level='inner')).mean() - assert not result.index.equals(not_expected.index) - - # Group MultiIndex by two keys (1) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_multi_both.groupby(['B', 'inner']).mean() - - expected = df_multi_both.groupby(['B', - pd.Grouper(key='inner')]).mean() - assert_frame_equal(result, expected) - not_expected = df_multi_both.groupby(['B', - pd.Grouper(level='inner') - ]).mean() - assert not result.index.equals(not_expected.index) - - # Group MultiIndex by two keys (2) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_multi_both.groupby(['inner', 'B']).mean() - - expected = df_multi_both.groupby([pd.Grouper(key='inner'), - 'B']).mean() - assert_frame_equal(result, expected) - not_expected = df_multi_both.groupby([pd.Grouper(level='inner'), - 'B']).mean() - assert not result.index.equals(not_expected.index) - - # Group single Index by two keys (1) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_single_both.groupby(['B', 'inner']).mean() - - expected = df_single_both.groupby(['B', - pd.Grouper(key='inner')]).mean() - assert_frame_equal(result, expected) - not_expected = df_single_both.groupby(['B', - pd.Grouper(level='inner') - ]).mean() - assert not result.index.equals(not_expected.index) - - # Group single Index by two keys (2) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = df_single_both.groupby(['inner', 'B']).mean() - - expected = df_single_both.groupby([pd.Grouper(key='inner'), - 'B']).mean() - assert_frame_equal(result, expected) - not_expected = df_single_both.groupby([pd.Grouper(level='inner'), - 'B']).mean() - assert not result.index.equals(not_expected.index) - def test_grouper_getting_correct_binner(self): # GH 10063 diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py new file mode 100644 index 0000000000000..3b6e15036cfe2 --- /dev/null +++ b/pandas/tests/groupby/test_index_as_string.py @@ -0,0 +1,116 @@ +import pytest +import pandas as pd +import numpy as np + +from pandas.util.testing import assert_frame_equal, assert_series_equal +import pandas.util.testing as tm + + +@pytest.fixture(params=[['inner'], ['inner', 'outer']]) +def frame(request): + levels = request.param + df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'], + 'inner': [1, 2, 3, 1, 2, 3], + 'A': np.arange(6), + 'B': ['one', 'one', 'two', 'two', 'one', 'one']}) + if levels: + df = df.set_index(levels) + + return df + + +@pytest.fixture() +def series(): + df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'], + 'inner': [1, 2, 3, 1, 2, 3], + 'A': np.arange(6), + 'B': ['one', 'one', 'two', 'two', 'one', 'one']}) + s = df.set_index(['outer', 'inner', 'B'])['A'] + + return s + + +@pytest.mark.parametrize('key_strs,groupers', [ + ('inner', # Index name + pd.Grouper(level='inner') + ), + (['inner'], # List of index name + [pd.Grouper(level='inner')] + ), + (['B', 'inner'], # Column and index + ['B', pd.Grouper(level='inner')] + ), + (['inner', 'B'], # Index and column + [pd.Grouper(level='inner'), 'B'])]) +def test_grouper_index_level_as_string(frame, key_strs, groupers): + result = frame.groupby(key_strs).mean() + expected = frame.groupby(groupers).mean() + assert_frame_equal(result, expected) + + +@pytest.mark.parametrize('levels', [ + 'inner', 'outer', 'B', + ['inner'], ['outer'], ['B'], + ['inner', 'outer'], ['outer', 'inner'], + ['inner', 'outer', 'B'], ['B', 'outer', 'inner'] +]) +def test_grouper_index_level_as_string_series(series, levels): + + # Compute expected result + if isinstance(levels, list): + groupers = [pd.Grouper(level=lv) for lv in levels] + else: + groupers = pd.Grouper(level=levels) + + expected = series.groupby(groupers).mean() + + # Compute and check result + result = series.groupby(levels).mean() + assert_series_equal(result, expected) + + +@pytest.mark.parametrize('key_strs,key_groupers,level_groupers', [ + ('inner', # Index name + pd.Grouper(key='inner'), + pd.Grouper(level='inner'), + ), + (['inner'], # List of index name + [pd.Grouper(key='inner')], + [pd.Grouper(level='inner')] + ), + (['B', 'inner'], # Column and index + ['B', pd.Grouper(key='inner')], + ['B', pd.Grouper(level='inner')] + ), + (['inner', 'B'], # Index and column + [pd.Grouper(key='inner'), 'B'], + [pd.Grouper(level='inner'), 'B'])]) +def test_grouper_column_index_level_precedence(frame, + key_strs, + key_groupers, + level_groupers): + + # GH 5677, when a string passed as the `by` parameter + # matches a column and an index level the column takes + # precedence and a FutureWarning is raised + + # Add 'inner' column to frame + # (frame already has an 'inner' index) + frame['inner'] = [1, 1, 1, 1, 1, 1] + + # Performing a groupby with strings should produce warning + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = frame.groupby(key_strs).mean() + + # Grouping with key Grouper should produce the same result and no warning + with tm.assert_produces_warning(False): + expected = frame.groupby(key_groupers).mean() + + assert_frame_equal(result, expected) + + # Grouping with level Grouper should produce a difference result but + # still no warning + with tm.assert_produces_warning(False): + not_expected = frame.groupby(level_groupers).mean() + + assert not result.index.equals(not_expected.index)
- [x] closes #17383 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Test case refactoring: - Moved the existing index-as-string test cases out of `test_groupby.py` and into a new `test_index_as_string.py` file - Extracted test data generation functions and parameterized existing test cases to clean them up and shorten them. - Added a new parameterized test case on a `Series` - Updated `test_grouper_column_index_level_precedence` to reproduce false warning problem as described in #17383 - Updated `test_grouper_column_index_level_precedence` to verify when warnings should **NOT** be raised (Results in a test failure due to #17383 without this fix)
https://api.github.com/repos/pandas-dev/pandas/pulls/17843
2017-10-10T21:06:15Z
2017-10-14T14:54:14Z
2017-10-14T14:54:13Z
2017-10-14T14:54:50Z
API: Deprecate renamae_axis and reindex_axis
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index be9d1a5d83b85..3044a8886b9ae 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1201,8 +1201,11 @@ With a DataFrame, you can simultaneously reindex the index and columns: df df.reindex(index=['c', 'f', 'b'], columns=['three', 'two', 'one']) -For convenience, you may utilize the :meth:`~Series.reindex_axis` method, which -takes the labels and a keyword ``axis`` parameter. +You may also use ``reindex`` with an ``axis`` keyword: + +.. ipython:: python + + df.reindex(index=['c', 'f', 'b'], axis='index') Note that the ``Index`` objects containing the actual axis labels can be **shared** between objects. So if we have a Series and a DataFrame, the diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index f04410ef63531..2bee7cf5cdddc 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -810,6 +810,8 @@ Deprecations - ``.get_value`` and ``.set_value`` on ``Series``, ``DataFrame``, ``Panel``, ``SparseSeries``, and ``SparseDataFrame`` are deprecated in favor of using ``.iat[]`` or ``.at[]`` accessors (:issue:`15269`) - Passing a non-existent column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) - ``raise_on_error`` parameter to :func:`Series.where`, :func:`Series.mask`, :func:`DataFrame.where`, :func:`DataFrame.mask` is deprecated, in favor of ``errors=`` (:issue:`14968`) +- Using :meth:`DataFrame.rename_axis` and :meth:`Series.rename_axis` to alter index or column *labels* is now deprecated in favor of using ``.rename``. ``rename_axis`` may still be used to alter the name of the index or columns (:issue:`17833`). +- :meth:`~DataFrame.reindex_axis` has been deprecated in favor of :meth:`~DataFrame.reindex`. See :ref`here` <whatsnew_0210.enhancements.rename_reindex_axis> for more (:issue:`17833`). .. _whatsnew_0210.deprecations.select: @@ -998,6 +1000,7 @@ Reshaping - Bug in :func:`unique` where checking a tuple of strings raised a ``TypeError`` (:issue:`17108`) - Bug in :func:`concat` where order of result index was unpredictable if it contained non-comparable elements (:issue:`17344`) - Fixes regression when sorting by multiple columns on a ``datetime64`` dtype ``Series`` with ``NaT`` values (:issue:`16836`) +- Bug in :fun:`pivot_table` where the result's columns did not preserve the categorical dtype of ``columns`` when ``dropna`` was ``False`` (:issue:`17842`) Numeric ^^^^^^^ diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 691eaebfd5fc1..0e7ae0cbe7c87 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -89,7 +89,7 @@ def _align_core(terms): for axis, items in zip(range(ndim), axes): ti = terms[i].value - if hasattr(ti, 'reindex_axis'): + if hasattr(ti, 'reindex'): transpose = isinstance(ti, pd.Series) and naxes > 1 reindexer = axes[naxes - 1] if transpose else items @@ -104,11 +104,7 @@ def _align_core(terms): ).format(axis=axis, term=terms[i].name, ordm=ordm) warnings.warn(w, category=PerformanceWarning, stacklevel=6) - if transpose: - f = partial(ti.reindex, index=reindexer, copy=False) - else: - f = partial(ti.reindex_axis, reindexer, axis=axis, - copy=False) + f = partial(ti.reindex, reindexer, axis=axis, copy=False) terms[i].update(f()) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 94ff70f287fbe..c7e8c0da75e2c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -65,7 +65,6 @@ _values_from_object, _maybe_box_datetimelike, _dict_compat, - _all_not_none, standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -2736,7 +2735,7 @@ def reindexer(value): if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): - value = value.reindex_axis(cols, axis=1) + value = value.reindex(cols, axis=1) # now align rows value = reindexer(value).T @@ -2783,47 +2782,6 @@ def reindexer(value): return np.atleast_2d(np.asarray(value)) - def _validate_axis_style_args(self, arg, arg_name, index, columns, - axis, method_name): - if axis is not None: - # Using "axis" style, along with a positional arg - # Both index and columns should be None then - axis = self._get_axis_name(axis) - if index is not None or columns is not None: - msg = ( - "Can't specify both 'axis' and 'index' or 'columns'. " - "Specify either\n" - "\t.{method_name}.rename({arg_name}, axis=axis), or\n" - "\t.{method_name}.rename(index=index, columns=columns)" - ).format(arg_name=arg_name, method_name=method_name) - raise TypeError(msg) - if axis == 'index': - index = arg - elif axis == 'columns': - columns = arg - - elif _all_not_none(arg, index, columns): - msg = ( - "Cannot specify all of '{arg_name}', 'index', and 'columns'. " - "Specify either {arg_name} and 'axis', or 'index' and " - "'columns'." - ).format(arg_name=arg_name) - raise TypeError(msg) - - elif _all_not_none(arg, index): - # This is the "ambiguous" case, so emit a warning - msg = ( - "Interpreting call to '.{method_name}(a, b)' as " - "'.{method_name}(index=a, columns=b)'. " - "Use keyword arguments to remove any ambiguity." - ).format(method_name=method_name) - warnings.warn(msg, stacklevel=3) - index, columns = arg, index - elif index is None: - # This is for the default axis, like reindex([0, 1]) - index = arg - return index, columns - @property def _series(self): result = {} @@ -2952,11 +2910,11 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) def reindex(self, labels=None, index=None, columns=None, axis=None, **kwargs): - index, columns = self._validate_axis_style_args(labels, 'labels', - index, columns, - axis, 'reindex') - return super(DataFrame, self).reindex(index=index, columns=columns, - **kwargs) + axes = self._validate_axis_style_args(labels, 'labels', + axes=[index, columns], + axis=axis, method_name='reindex') + kwargs.update(axes) + return super(DataFrame, self).reindex(**kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, @@ -3041,11 +2999,11 @@ def rename(self, mapper=None, index=None, columns=None, axis=None, 2 2 5 4 3 6 """ - index, columns = self._validate_axis_style_args(mapper, 'mapper', - index, columns, - axis, 'rename') - return super(DataFrame, self).rename(index=index, columns=columns, - **kwargs) + axes = self._validate_axis_style_args(mapper, 'mapper', + axes=[index, columns], + axis=axis, method_name='rename') + kwargs.update(axes) + return super(DataFrame, self).rename(**kwargs) @Appender(_shared_docs['fillna'] % _shared_doc_kwargs) def fillna(self, value=None, method=None, axis=None, inplace=False, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9d9d8334fcaf4..5fe5718d46bcb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -29,7 +29,8 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame -from pandas.core.common import (_values_from_object, +from pandas.core.common import (_all_not_none, + _values_from_object, _maybe_box_datetimelike, SettingWithCopyError, SettingWithCopyWarning, AbstractMethodError) @@ -729,6 +730,51 @@ def swaplevel(self, i=-2, j=-1, axis=0): result._data.set_axis(axis, labels.swaplevel(i, j)) return result + def _validate_axis_style_args(self, arg, arg_name, axes, + axis, method_name): + out = {} + for i, value in enumerate(axes): + if value is not None: + out[self._AXIS_NAMES[i]] = value + + aliases = ', '.join(self._AXIS_NAMES.values()) + if axis is not None: + # Using "axis" style, along with a positional arg + # Both index and columns should be None then + axis = self._get_axis_name(axis) + if any(x is not None for x in axes): + msg = ( + "Can't specify both 'axis' and {aliases}. " + "Specify either\n" + "\t.{method_name}({arg_name}, axis=axis), or\n" + "\t.{method_name}(index=index, columns=columns)" + ).format(arg_name=arg_name, method_name=method_name, + aliases=aliases) + raise TypeError(msg) + out[axis] = arg + + elif _all_not_none(arg, *axes): + msg = ( + "Cannot specify all of '{arg_name}', {aliases}. " + "Specify either {arg_name} and 'axis', or {aliases}." + ).format(arg_name=arg_name, aliases=aliases) + raise TypeError(msg) + + elif _all_not_none(arg, axes[0]): + # This is the "ambiguous" case, so emit a warning + msg = ( + "Interpreting call to '.{method_name}(a, b)' as " + "'.{method_name}(index=a, columns=b)'. " # TODO + "Use keyword arguments to remove any ambiguity." + ).format(method_name=method_name) + warnings.warn(msg, stacklevel=3) + out[self._AXIS_ORDERS[0]] = arg + out[self._AXIS_ORDERS[1]] = axes[0] + elif axes[0] is None: + # This is for the default axis, like reindex([0, 1]) + out[self._AXIS_ORDERS[0]] = arg + return out + # ---------------------------------------------------------------------- # Rename @@ -893,17 +939,12 @@ def f(x): rename.__doc__ = _shared_docs['rename'] def rename_axis(self, mapper, axis=0, copy=True, inplace=False): - """ - Alter index and / or columns using input function or functions. - A scalar or list-like for ``mapper`` will alter the ``Index.name`` - or ``MultiIndex.names`` attribute. - A function or dict for ``mapper`` will alter the labels. - Function / dict values must be unique (1-to-1). Labels not contained in - a dict / Series will be left as-is. + """Alter the name of the index or columns. Parameters ---------- - mapper : scalar, list-like, dict-like or function, optional + mapper : scalar, list-like, optional + Value to set the axis name attribute. axis : int or string, default 0 copy : boolean, default True Also copy underlying data @@ -913,31 +954,35 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): ------- renamed : type of caller or None if inplace=True + Notes + ----- + Prior to version 0.21.0, ``rename_axis`` could also be used to change + the axis *labels* by passing a mapping or scalar. This behavior is + deprecated and will be removed in a future version. Use ``rename`` + instead. + See Also -------- - pandas.NDFrame.rename + pandas.Series.rename, pandas.DataFrame.rename pandas.Index.rename Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) - >>> df.rename_axis("foo") # scalar, alters df.index.name + >>> df.rename_axis("foo") A B foo 0 1 4 1 2 5 2 3 6 - >>> df.rename_axis(lambda x: 2 * x) # function: alters labels - A B - 0 1 4 - 2 2 5 - 4 3 6 - >>> df.rename_axis({"A": "ehh", "C": "see"}, axis="columns") # mapping - ehh B + + >>> df.rename_axis("bar", axis="columns") + bar A B 0 1 4 1 2 5 2 3 6 + """ inplace = validate_bool_kwarg(inplace, 'inplace') non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not @@ -945,6 +990,9 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): if non_mapper: return self._set_axis_name(mapper, axis=axis, inplace=inplace) else: + msg = ("Using 'rename_axis' to alter labels is deprecated. " + "Use '.rename' instead") + warnings.warn(msg, FutureWarning, stacklevel=2) axis = self._get_axis_name(axis) d = {'copy': copy, 'inplace': inplace} d[axis] = mapper @@ -2981,6 +3029,11 @@ def reindex(self, *args, **kwargs): tolerance = kwargs.pop('tolerance', None) fill_value = kwargs.pop('fill_value', np.nan) + # Series.reindex doesn't use / need the axis kwarg + # We pop and ignore it here, to make writing Series/Frame generic code + # easier + kwargs.pop("axis", None) + if kwargs: raise TypeError('reindex() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) @@ -3085,11 +3138,14 @@ def _reindex_multi(self, axes, copy, fill_value): @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): + msg = ("'.reindex_axis' is deprecated and will be removed in a future " + "version. Use '.reindex' instead.") self._consolidate_inplace() axis_name = self._get_axis_name(axis) axis_values = self._get_axis(axis_name) method = missing.clean_reindex_fill_method(method) + warnings.warn(msg, FutureWarning, stacklevel=3) new_index, indexer = axis_values.reindex(labels, method, level, limit=limit) return self._reindex_with_indexers({axis: [new_index, indexer]}, diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 9518f17e5f4f1..ccaf90b4482a7 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -901,7 +901,7 @@ def reset_identity(values): result.index.get_indexer_for(ax.values)) result = result.take(indexer, axis=self.axis) else: - result = result.reindex_axis(ax, axis=self.axis) + result = result.reindex(ax, axis=self.axis) elif self.group_keys: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index f1a3fe81a4540..654c3510b7cf7 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -368,7 +368,7 @@ def _setitem_with_indexer(self, indexer, value): # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) - self.obj._data = self.obj.reindex_axis(labels, i)._data + self.obj._data = self.obj.reindex(labels, axis=i)._data self.obj._maybe_update_cacher(clear=True) self.obj.is_copy = None @@ -1132,7 +1132,7 @@ def _getitem_iterable(self, key, axis=None): if labels.is_unique and Index(keyarr).is_unique: try: - return self.obj.reindex_axis(keyarr, axis=axis) + return self.obj.reindex(keyarr, axis=axis) except AttributeError: # Series diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 689f5521e1ccb..879859309c4f9 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -3283,8 +3283,8 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, for k, obj in aligned_args.items(): axis = getattr(obj, '_info_axis_number', 0) - kwargs[k] = obj.reindex_axis(b_items, axis=axis, - copy=align_copy) + kwargs[k] = obj.reindex(b_items, axis=axis, + copy=align_copy) kwargs['mgr'] = self applied = getattr(b, f)(**kwargs) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index b2f50eaf733d8..1f22cb49d0196 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1197,13 +1197,21 @@ def _wrap_result(self, result, axis): return self._construct_return_type(result, axes) @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) - def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs): + def reindex(self, labels=None, + items=None, major_axis=None, minor_axis=None, + axis=None, **kwargs): major_axis = (major_axis if major_axis is not None else kwargs.pop('major', None)) minor_axis = (minor_axis if minor_axis is not None else kwargs.pop('minor', None)) - return super(Panel, self).reindex(items=items, major_axis=major_axis, - minor_axis=minor_axis, **kwargs) + axes = self._validate_axis_style_args( + labels, 'labels', axes=[items, major_axis, minor_axis], + axis=axis, method_name='reindex') + if self.ndim >= 4: + # Hack for PanelND + axes = {} + kwargs.update(axes) + return super(Panel, self).reindex(**kwargs) @Appender(_shared_docs['rename'] % _shared_doc_kwargs) def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs): diff --git a/pandas/core/panel4d.py b/pandas/core/panel4d.py index 16e7d0dfcc336..e6914fb268359 100644 --- a/pandas/core/panel4d.py +++ b/pandas/core/panel4d.py @@ -57,4 +57,18 @@ def panel4d_init(self, data=None, labels=None, items=None, major_axis=None, dtype=dtype) +def panel4d_reindex(self, labs=None, labels=None, items=None, major_axis=None, + minor_axis=None, axis=None, **kwargs): + # Hack for reindex_axis deprecation + # Ha, we used labels for two different things + # I think this will work still. + axes = self._validate_axis_style_args( + labs, 'labels', + axes=[labels, items, major_axis, minor_axis], + axis=axis, method_name='reindex') + kwargs.update(axes) + return super(Panel, self).reindex(**kwargs) + + Panel4D.__init__ = panel4d_init +Panel4D.reindex = panel4d_reindex diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 38c28af4d6ecb..7ee021e5c6246 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -101,14 +101,14 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', try: m = MultiIndex.from_arrays(cartesian_product(table.index.levels), names=table.index.names) - table = table.reindex_axis(m, axis=0) + table = table.reindex(m, axis=0) except AttributeError: pass # it's a single level try: m = MultiIndex.from_arrays(cartesian_product(table.columns.levels), names=table.columns.names) - table = table.reindex_axis(m, axis=1) + table = table.reindex(m, axis=1) except AttributeError: pass # it's a single level or a series diff --git a/pandas/core/series.py b/pandas/core/series.py index 93afdc5151b35..8499f8b55d2d0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2615,6 +2615,10 @@ def reindex_axis(self, labels, axis=0, **kwargs): """ for compatibility with higher dims """ if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") + msg = ("'.reindex_axis' is deprecated and will be removed in a future " + "version. Use '.reindex' instead.") + warnings.warn(msg, FutureWarning, stacklevel=2) + return self.reindex(index=labels, **kwargs) def memory_usage(self, index=True, deep=False): diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py index d2b9583d8efe5..748a52f484893 100644 --- a/pandas/core/sparse/scipy_sparse.py +++ b/pandas/core/sparse/scipy_sparse.py @@ -134,5 +134,5 @@ def _coo_to_sparse_series(A, dense_index=False): i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) - s = s.reindex_axis(ind) + s = s.reindex(ind) return s diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ca1b4d031d3ce..39d088e00b219 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1040,7 +1040,7 @@ def append_to_multiple(self, d, value, selector, data_columns=None, dc = data_columns if k == selector else None # compute the val - val = value.reindex_axis(v, axis=axis) + val = value.reindex(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs) @@ -3493,7 +3493,7 @@ def get_blk_items(mgr, blocks): data_columns = self.validate_data_columns( data_columns, min_itemsize) if len(data_columns): - mgr = block_obj.reindex_axis( + mgr = block_obj.reindex( Index(axis_labels).difference(Index(data_columns)), axis=axis )._data @@ -3501,7 +3501,7 @@ def get_blk_items(mgr, blocks): blocks = list(mgr.blocks) blk_items = get_blk_items(mgr, blocks) for c in data_columns: - mgr = block_obj.reindex_axis([c], axis=axis)._data + mgr = block_obj.reindex([c], axis=axis)._data blocks.extend(mgr.blocks) blk_items.extend(get_blk_items(mgr, mgr.blocks)) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c4cd562df7eb3..0d77b5f41a08e 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -697,7 +697,7 @@ def _parse_errorbars(self, label, err): from pandas import DataFrame, Series def match_labels(data, e): - e = e.reindex_axis(data.index) + e = e.reindex(data.index) return e # key-matched DataFrame diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index feb32324ff1b1..84f7dd108f2cb 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -436,6 +436,25 @@ def test_rename_axis_inplace(self): assert no_return is None assert_frame_equal(result, expected) + def test_rename_axis_warns(self): + # https://github.com/pandas-dev/pandas/issues/17833 + df = pd.DataFrame({"A": [1, 2], "B": [1, 2]}) + with tm.assert_produces_warning(FutureWarning) as w: + df.rename_axis(id, axis=0) + assert 'rename' in str(w[0].message) + + with tm.assert_produces_warning(FutureWarning) as w: + df.rename_axis({0: 10, 1: 20}, axis=0) + assert 'rename' in str(w[0].message) + + with tm.assert_produces_warning(FutureWarning) as w: + df.rename_axis(id, axis=1) + assert 'rename' in str(w[0].message) + + with tm.assert_produces_warning(FutureWarning) as w: + df['A'].rename_axis(id) + assert 'rename' in str(w[0].message) + def test_rename_multiindex(self): tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')] diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 38ed8ee20bc50..fee0c8b213bd9 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -418,11 +418,13 @@ def test_reindex_fill_value(self): assert_frame_equal(result, expected) # reindex_axis - result = df.reindex_axis(lrange(15), fill_value=0., axis=0) + with tm.assert_produces_warning(FutureWarning): + result = df.reindex_axis(lrange(15), fill_value=0., axis=0) expected = df.reindex(lrange(15)).fillna(0) assert_frame_equal(result, expected) - result = df.reindex_axis(lrange(5), fill_value=0., axis=1) + with tm.assert_produces_warning(FutureWarning): + result = df.reindex_axis(lrange(5), fill_value=0., axis=1) expected = df.reindex(columns=lrange(5)).fillna(0) assert_frame_equal(result, expected) @@ -1030,12 +1032,16 @@ def test_reindex_corner(self): def test_reindex_axis(self): cols = ['A', 'B', 'E'] - reindexed1 = self.intframe.reindex_axis(cols, axis=1) + with tm.assert_produces_warning(FutureWarning) as m: + reindexed1 = self.intframe.reindex_axis(cols, axis=1) + assert 'reindex' in str(m[0].message) reindexed2 = self.intframe.reindex(columns=cols) assert_frame_equal(reindexed1, reindexed2) rows = self.intframe.index[0:5] - reindexed1 = self.intframe.reindex_axis(rows, axis=0) + with tm.assert_produces_warning(FutureWarning) as m: + reindexed1 = self.intframe.reindex_axis(rows, axis=0) + assert 'reindex' in str(m[0].message) reindexed2 = self.intframe.reindex(index=rows) assert_frame_equal(reindexed1, reindexed2) @@ -1043,7 +1049,9 @@ def test_reindex_axis(self): # no-op case cols = self.frame.columns.copy() - newFrame = self.frame.reindex_axis(cols, axis=1) + with tm.assert_produces_warning(FutureWarning) as m: + newFrame = self.frame.reindex_axis(cols, axis=1) + assert 'reindex' in str(m[0].message) assert_frame_equal(newFrame, self.frame) def test_reindex_with_nans(self): diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 4126bb1de84d7..90afd2e216045 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -116,7 +116,9 @@ def test_pivot_table_dropna_categoricals(self): result_false = df.pivot_table(index='B', columns='A', values='C', dropna=False) - expected_columns = Series(['a', 'b', 'c', 'd'], name='A') + expected_columns = ( + Series(['a', 'b', 'c', 'd'], name='A').astype('category') + ) expected_false = DataFrame([[0.0, 3.0, 6.0, np.NaN], [1.0, 4.0, 7.0, np.NaN], [2.0, 5.0, 8.0, np.NaN]], diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index 632d3b4ad2e7a..fc9f89934b4ea 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -311,7 +311,7 @@ def test_include_na(self): 'a': {0: 1, 1: 0, 2: 0}, 'b': {0: 0, 1: 1, 2: 0}}, dtype=np.uint8) - exp_na = exp_na.reindex_axis(['a', 'b', nan], 1) + exp_na = exp_na.reindex(['a', 'b', nan], axis=1) # hack (NaN handling in assert_index_equal) exp_na.columns = res_na.columns assert_frame_equal(res_na, exp_na) @@ -542,8 +542,8 @@ def test_basic_drop_first_NA(self): 2: 0}, nan: {0: 0, 1: 0, - 2: 1}}, dtype=np.uint8).reindex_axis( - ['b', nan], 1) + 2: 1}}, dtype=np.uint8).reindex( + ['b', nan], axis=1) assert_frame_equal(res_na, exp_na) res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse, diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 7c7399317809f..c218eee921bb1 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -1414,6 +1414,12 @@ def test_deprecated_numpy_func_call(self): check_stacklevel=False): getattr(getattr(self, series), func)() + def test_deprecated_reindex_axis(self): + # https://github.com/pandas-dev/pandas/issues/17833 + with tm.assert_produces_warning(FutureWarning) as m: + self.bseries.reindex_axis([0, 1, 2]) + assert 'reindex' in str(m[0].message) + @pytest.mark.parametrize( 'datetime_type', (np.datetime64, diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 94577db15f01a..785be71e236d7 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1805,7 +1805,7 @@ def test_reindex_level_partial_selection(self): expected = self.frame.iloc[[0, 1, 2, 7, 8, 9]] tm.assert_frame_equal(result, expected) - result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0) + result = self.frame.T.reindex(['foo', 'qux'], axis=1, level=0) tm.assert_frame_equal(result, expected.T) result = self.frame.loc[['foo', 'qux']] diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 2769ec0d2dbed..da30c8c403d41 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1444,6 +1444,22 @@ def test_reindex(self): assert_panel_equal(result, self.panel) assert result is self.panel + def test_reindex_axis_style(self): + with catch_warnings(record=True): + panel = Panel(np.random.rand(5, 5, 5)) + expected0 = Panel(panel.values).iloc[[0, 1]] + expected1 = Panel(panel.values).iloc[:, [0, 1]] + expected2 = Panel(panel.values).iloc[:, :, [0, 1]] + + result = panel.reindex([0, 1], axis=0) + assert_panel_equal(result, expected0) + + result = panel.reindex([0, 1], axis=1) + assert_panel_equal(result, expected1) + + result = panel.reindex([0, 1], axis=2) + assert_panel_equal(result, expected2) + def test_reindex_multi(self): with catch_warnings(record=True): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index cd15203eccd82..4e26689badb3c 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -1422,7 +1422,7 @@ def test_resample_ohlc_dataframe(self): Timestamp('2011-01-06 10:59:05', tz=None): 1500000000, Timestamp('2011-01-06 12:43:33', tz=None): 5000000000, Timestamp('2011-01-06 12:54:09', tz=None): 100000000}}) - ).reindex_axis(['VOLUME', 'PRICE'], axis=1) + ).reindex(['VOLUME', 'PRICE'], axis=1) res = df.resample('H').ohlc() exp = pd.concat([df['VOLUME'].resample('H').ohlc(), df['PRICE'].resample('H').ohlc()], @@ -1652,7 +1652,7 @@ def test_resample_categorical_data_with_timedeltaindex(self): expected = DataFrame({'Group_obj': ['A', 'A'], 'Group': ['A', 'A']}, index=pd.to_timedelta([0, 10], unit='s')) - expected = expected.reindex_axis(['Group_obj', 'Group'], 1) + expected = expected.reindex(['Group_obj', 'Group'], axis=1) tm.assert_frame_equal(result, expected) def test_resample_daily_anchored(self):
Closes https://github.com/pandas-dev/pandas/issues/17833 Some notes: - the positional argument for `rename_axis` is `mapper`, which isn't really descriptive of what it does now. Oh well - Added a TODO in https://github.com/pandas-dev/pandas/compare/master...TomAugspurger:depr-rename_axis?expand=1#diff-492dc2a862db88ec1f1d80589ce8a7d6R92. That will (presumably) break in the future when we remove reindex_axis. I haven't attempted to understand what's going on there yet. Maybe it'll be easy
https://api.github.com/repos/pandas-dev/pandas/pulls/17842
2017-10-10T19:49:31Z
2017-10-11T15:18:06Z
2017-10-11T15:18:06Z
2017-10-14T15:53:47Z
BUG: merging with a boolean/int categorical column
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index f04410ef63531..1ca2a81967949 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -1010,6 +1010,7 @@ Categorical - Bug in :func:`Series.isin` when called with a categorical (:issue:`16639`) - Bug in the categorical constructor with empty values and categories causing the ``.categories`` to be an empty ``Float64Index`` rather than an empty ``Index`` with object dtype (:issue:`17248`) - Bug in categorical operations with :ref:`Series.cat <categorical.cat>` not preserving the original Series' name (:issue:`17509`) +- Bug in :func:`DataFrame.merge` failing for categorical columns with boolean/int data types (:issue:`17187`) PyPy ^^^^ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 689f5521e1ccb..f6773db8074b2 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -5494,7 +5494,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): # preserve these for validation in _concat_compat return self.block.values - if self.block.is_bool: + if self.block.is_bool and not self.block.is_categorical: # External code requested filling/upcasting, bool values must # be upcasted to object to avoid being upcasted to numeric. values = self.block.astype(np.object_).values diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py index ed99814afd20a..81956c0bd5b28 100644 --- a/pandas/tests/reshape/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -1546,6 +1546,30 @@ def test_dtype_on_categorical_dates(self): result_inner = pd.merge(df, df2, how='inner', on=['date']) assert_frame_equal(result_inner, expected_inner) + @pytest.mark.parametrize('category_column,categories,expected_categories', + [([False, True, True, False], [True, False], + [True, False]), + ([2, 1, 1, 2], [1, 2], [1, 2]), + (['False', 'True', 'True', 'False'], + ['True', 'False'], ['True', 'False'])]) + def test_merging_with_bool_or_int_cateorical_column(self, category_column, + categories, + expected_categories): + # GH 17187 + # merging with a boolean/int categorical column + df1 = pd.DataFrame({'id': [1, 2, 3, 4], + 'cat': category_column}) + df1['cat'] = df1['cat'].astype('category', + categories=categories, ordered=True) + df2 = pd.DataFrame({'id': [2, 4], 'num': [1, 9]}) + result = df1.merge(df2) + expected = pd.DataFrame({'id': [2, 4], 'cat': expected_categories, + 'num': [1, 9]}) + expected['cat'] = expected['cat'].astype('category', + categories=categories, + ordered=True) + assert_frame_equal(expected, result) + @pytest.fixture def left_df():
Additional check prevents trying to change types of categorical blocks. - [x] closes #17187 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17841
2017-10-10T15:35:23Z
2017-10-14T14:48:51Z
2017-10-14T14:48:51Z
2017-10-14T14:49:03Z
BUG: Fix range dtype in Series/DataFrame constructor on Windows
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index a889292cacc99..6b768fddb7037 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -955,6 +955,7 @@ Conversion - Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`) - Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). - Bug in :meth:`to_numeric` in which elements were not always being coerced to numeric when ``errors='coerce'`` (:issue:`17007`, :issue:`17125`) +- Bug in ``DataFrame`` and ``Series`` constructors where ``range`` objects are converted to ``int32`` dtype on Windows instead of ``int64`` (:issue:`16804`) Indexing ^^^^^^^^ diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index b367fda002b74..3853ac017044c 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -100,6 +100,10 @@ def signature(f): 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) + def get_range_parameters(data): + """Gets the start, stop, and step parameters from a range object""" + return data.start, data.stop, data.step + # have to explicitly put builtins into the namespace range = range map = map @@ -146,6 +150,24 @@ def bytes_to_str(b, encoding='ascii'): def signature(f): return inspect.getargspec(f) + def get_range_parameters(data): + """Gets the start, stop, and step parameters from a range object""" + # seems we only have indexing ops to infer + # rather than direct accessors + if len(data) > 1: + step = data[1] - data[0] + stop = data[-1] + step + start = data[0] + elif len(data): + start = data[0] + stop = data[0] + 1 + step = 1 + else: + start = stop = 0 + step = 1 + + return start, stop, step + # import iterator versions of these functions range = xrange intern = intern diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index b2e55d4826670..9cb01896424f7 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -10,7 +10,7 @@ is_int64_dtype) from pandas import compat -from pandas.compat import lrange, range +from pandas.compat import lrange, range, get_range_parameters from pandas.compat.numpy import function as nv from pandas.core.common import _all_none from pandas.core.indexes.base import Index, _index_shared_docs @@ -113,24 +113,7 @@ def from_range(cls, data, name=None, dtype=None, **kwargs): '{0}(...) must be called with object coercible to a ' 'range, {1} was passed'.format(cls.__name__, repr(data))) - if compat.PY3: - step = data.step - stop = data.stop - start = data.start - else: - # seems we only have indexing ops to infer - # rather than direct accessors - if len(data) > 1: - step = data[1] - data[0] - stop = data[-1] + step - start = data[0] - elif len(data): - start = data[0] - stop = data[0] + 1 - step = 1 - else: - start = stop = 0 - step = 1 + start, stop, step = get_range_parameters(data) return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs) @classmethod diff --git a/pandas/core/series.py b/pandas/core/series.py index dbd91309ed185..1c92c4b8850ee 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -60,7 +60,8 @@ from pandas.core.indexes.period import PeriodIndex from pandas import compat from pandas.io.formats.terminal import get_terminal_size -from pandas.compat import zip, u, OrderedDict, StringIO +from pandas.compat import ( + zip, u, OrderedDict, StringIO, range, get_range_parameters) from pandas.compat.numpy import function as nv from pandas.core import accessor @@ -3177,6 +3178,11 @@ def _try_cast(arr, take_fast_path): subarr = maybe_cast_to_datetime(subarr, dtype) + elif isinstance(data, range): + # GH 16804 + start, stop, step = get_range_parameters(data) + arr = np.arange(start, stop, step, dtype='int64') + subarr = _try_cast(arr, False) else: subarr = _try_cast(data, False) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 7f1cc12ec4277..c55c79ef18602 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -829,7 +829,7 @@ def test_constructor_list_of_lists(self): # GH 4851 # list of 0-dim ndarrays - expected = DataFrame({0: range(10)}) + expected = DataFrame({0: np.arange(10)}) data = [np.array(x) for x in range(10)] result = DataFrame(data) tm.assert_frame_equal(result, expected) @@ -1927,6 +1927,13 @@ def test_to_frame_with_falsey_names(self): result = DataFrame(Series(name=0)).dtypes tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('dtype', [None, 'uint8', 'category']) + def test_constructor_range_dtype(self, dtype): + # GH 16804 + expected = DataFrame({'A': [0, 1, 2, 3, 4]}, dtype=dtype or 'int64') + result = DataFrame({'A': range(5)}, dtype=dtype) + tm.assert_frame_equal(result, expected) + class TestDataFrameConstructorWithDatetimeTZ(TestData): diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 41ddfe934a131..0e4957da5478c 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -532,7 +532,7 @@ def f(): def f(): df = DataFrame() - df['foo'] = Series(range(len(df))) + df['foo'] = Series(np.arange(len(df)), dtype='float64') return df tm.assert_frame_equal(f(), expected) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index f33e19c7f6223..f3be7bb9905f4 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -219,7 +219,7 @@ def test_reorder_levels(self): labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], names=['L0', 'L0', 'L0']) - expected = Series(range(6), index=e_idx) + expected = Series(np.arange(6), index=e_idx) assert_series_equal(result, expected) result = s.reorder_levels(['L0', 'L0', 'L0']) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index df7d7a946e881..d296086021349 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -846,3 +846,10 @@ def test_constructor_generic_timestamp_deprecated(self): msg = "cannot convert datetimelike" with tm.assert_raises_regex(TypeError, msg): Series([], dtype='M8[ps]') + + @pytest.mark.parametrize('dtype', [None, 'uint8', 'category']) + def test_constructor_range_dtype(self, dtype): + # GH 16804 + expected = Series([0, 1, 2, 3, 4], dtype=dtype or 'int64') + result = Series(range(5), dtype=dtype) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index ff9d09c033164..ead9ba1e26e2d 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -3,9 +3,10 @@ Testing that functions from compat work as expected """ +import pytest from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap, lfilter, builtins, iterkeys, itervalues, iteritems, - next) + next, get_range_parameters, PY2) class TestBuiltinIterators(object): @@ -69,3 +70,22 @@ def test_dict_iterators(self): assert next(itervalues({1: 2})) == 2 assert next(iterkeys({1: 2})) == 1 assert next(iteritems({1: 2})) == (1, 2) + + +class TestCompatFunctions(object): + + @pytest.mark.parametrize( + 'start,stop,step', [(0, 10, 2), (11, -2, -1), (0, -5, 1), (2, 4, 8)]) + def test_get_range_parameters(self, start, stop, step): + rng = range(start, stop, step) + if PY2 and len(rng) == 0: + start_expected, stop_expected, step_expected = 0, 0, 1 + elif PY2 and len(rng) == 1: + start_expected, stop_expected, step_expected = start, start + 1, 1 + else: + start_expected, stop_expected, step_expected = start, stop, step + + start_result, stop_result, step_result = get_range_parameters(rng) + assert start_result == start_expected + assert stop_result == stop_expected + assert step_result == step_expected diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 4e26689badb3c..ac8297a53de37 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -3033,7 +3033,7 @@ def test_nearest(self): result = pd.Series(range(3), index=index).resample('20s').nearest() expected = pd.Series( - np.array([0, 0, 1, 1, 1, 2, 2]), + [0, 0, 1, 1, 1, 2, 2], index=pd.DatetimeIndex( ['2000-01-01 00:00:00', '2000-01-01 00:00:20', '2000-01-01 00:00:40', '2000-01-01 00:01:00', diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 432350b4849d8..c567613acebd1 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -698,8 +698,8 @@ def get_expects(self): return expects def _create_dtype_data(self, dtype): - sr1 = Series(range(5), dtype=dtype) - sr2 = Series(range(10, 0, -2), dtype=dtype) + sr1 = Series(np.arange(5), dtype=dtype) + sr2 = Series(np.arange(10, 0, -2), dtype=dtype) df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype) data = {
- [X] closes #16804 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Had to modify a few existing tests that use `range`, as this caused them to fail due to dtypes mismatch; mostly just involved switching to `np.arange`.
https://api.github.com/repos/pandas-dev/pandas/pulls/17840
2017-10-10T14:58:54Z
2017-10-19T10:16:41Z
2017-10-19T10:16:41Z
2017-10-19T15:34:31Z
Set ignore floating-point error in _get_join_keys().
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 0990d2bd15ee6..be9d1a5d83b85 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1217,6 +1217,15 @@ following can be done: This means that the reindexed Series's index is the same Python object as the DataFrame's index. +.. versionadded:: 0.21.0 + +:meth:`DataFrame.reindex` also supports an "axis-style" calling convention, +where you specify a single ``labels`` argument and the ``axis`` it applies to. + +.. ipython:: python + + df.reindex(['c', 'f', 'b'], axis='index') + df.reindex(['three', 'two', 'one'], axis='columns') .. seealso:: @@ -1413,12 +1422,23 @@ Series can also be used: .. ipython:: python - df.rename(columns={'one' : 'foo', 'two' : 'bar'}, - index={'a' : 'apple', 'b' : 'banana', 'd' : 'durian'}) + df.rename(columns={'one': 'foo', 'two': 'bar'}, + index={'a': 'apple', 'b': 'banana', 'd': 'durian'}) If the mapping doesn't include a column/index label, it isn't renamed. Also extra labels in the mapping don't throw an error. +.. versionadded:: 0.21.0 + +:meth:`DataFrame.rename` also supports an "axis-style" calling convention, where +you specify a single ``mapper`` and the ``axis`` to apply that mapping to. + +.. ipython:: python + + df.rename({'one': 'foo', 'two': 'bar'}, axis='columns'}) + df.rename({'a': 'apple', 'b': 'banana', 'd': 'durian'}, axis='columns'}) + + The :meth:`~DataFrame.rename` method also provides an ``inplace`` named parameter that is by default ``False`` and copies the underlying data. Pass ``inplace=True`` to rename the data in place. diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 1c4af579d16dc..547d8bdf9317f 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -111,6 +111,40 @@ For example: # the following is now equivalent df.drop(columns=['B', 'C']) +.. _whatsnew_0210.enhancements.rename_reindex_axis: + +``rename``, ``reindex`` now also accept axis keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :meth:`DataFrame.rename` and :meth:`DataFrame.reindex` methods have gained +the ``axis`` keyword to specify the axis to target with the operation +(:issue:`12392`). + +Here's ``rename``: + +.. ipython:: python + + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df.rename(str.lower, axis='columns') + df.rename(id, axis='index') + +And ``reindex``: + +.. ipython:: python + + df.reindex(['A', 'B', 'C'], axis='columns') + df.reindex([0, 1, 3], axis='index') + +The "index, columns" style continues to work as before. + +.. ipython:: python + + df.rename(index=id, columns=str.lower) + df.reindex(index=[0, 1, 3], columns=['A', 'B', 'C']) + +We *highly* encourage using named arguments to avoid confusion when using either +style. + .. _whatsnew_0210.enhancements.categorical_dtype: ``CategoricalDtype`` for specifying categoricals @@ -964,6 +998,7 @@ Reshaping - Bug in :func:`unique` where checking a tuple of strings raised a ``TypeError`` (:issue:`17108`) - Bug in :func:`concat` where order of result index was unpredictable if it contained non-comparable elements (:issue:`17344`) - Fixes regression when sorting by multiple columns on a ``datetime64`` dtype ``Series`` with ``NaT`` values (:issue:`16836`) +- Merging two empty dataframes raised a division by zero error (:issue:`17776`) Numeric ^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c536cc9f2b82c..94ff70f287fbe 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -65,6 +65,7 @@ _values_from_object, _maybe_box_datetimelike, _dict_compat, + _all_not_none, standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -111,7 +112,13 @@ optional_by=""" by : str or list of str Name or list of names which refer to the axis items.""", - versionadded_to_excel='') + versionadded_to_excel='', + optional_labels="""labels : array-like, optional + New labels / index to conform the axis specified by 'axis' to.""", + optional_axis="""axis : int or str, optional + Axis to target. Can be either the axis name ('index', 'columns') + or number (0, 1).""", +) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use @@ -2776,6 +2783,47 @@ def reindexer(value): return np.atleast_2d(np.asarray(value)) + def _validate_axis_style_args(self, arg, arg_name, index, columns, + axis, method_name): + if axis is not None: + # Using "axis" style, along with a positional arg + # Both index and columns should be None then + axis = self._get_axis_name(axis) + if index is not None or columns is not None: + msg = ( + "Can't specify both 'axis' and 'index' or 'columns'. " + "Specify either\n" + "\t.{method_name}.rename({arg_name}, axis=axis), or\n" + "\t.{method_name}.rename(index=index, columns=columns)" + ).format(arg_name=arg_name, method_name=method_name) + raise TypeError(msg) + if axis == 'index': + index = arg + elif axis == 'columns': + columns = arg + + elif _all_not_none(arg, index, columns): + msg = ( + "Cannot specify all of '{arg_name}', 'index', and 'columns'. " + "Specify either {arg_name} and 'axis', or 'index' and " + "'columns'." + ).format(arg_name=arg_name) + raise TypeError(msg) + + elif _all_not_none(arg, index): + # This is the "ambiguous" case, so emit a warning + msg = ( + "Interpreting call to '.{method_name}(a, b)' as " + "'.{method_name}(index=a, columns=b)'. " + "Use keyword arguments to remove any ambiguity." + ).format(method_name=method_name) + warnings.warn(msg, stacklevel=3) + index, columns = arg, index + elif index is None: + # This is for the default axis, like reindex([0, 1]) + index = arg + return index, columns + @property def _series(self): result = {} @@ -2902,7 +2950,11 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, broadcast_axis=broadcast_axis) @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) - def reindex(self, index=None, columns=None, **kwargs): + def reindex(self, labels=None, index=None, columns=None, axis=None, + **kwargs): + index, columns = self._validate_axis_style_args(labels, 'labels', + index, columns, + axis, 'reindex') return super(DataFrame, self).reindex(index=index, columns=columns, **kwargs) @@ -2914,8 +2966,84 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) - @Appender(_shared_docs['rename'] % _shared_doc_kwargs) - def rename(self, index=None, columns=None, **kwargs): + def rename(self, mapper=None, index=None, columns=None, axis=None, + **kwargs): + """Alter axes labels. + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + See the :ref:`user guide <basics.rename>` for more. + + Parameters + ---------- + mapper, index, columns : dict-like or function, optional + dict-like or functions transformations to apply to + that axis' values. Use either ``mapper`` and ``axis`` to + specify the axis to target with ``mapper``, or ``index`` and + ``columns``. + axis : int or str, optional + Axis to target with ``mapper``. Can be either the axis name + ('index', 'columns') or number (0, 1). The default is 'index'. + copy : boolean, default True + Also copy underlying data + inplace : boolean, default False + Whether to return a new %(klass)s. If True then value of copy is + ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. + + Returns + ------- + renamed : DataFrame + + See Also + -------- + pandas.DataFrame.rename_axis + + Examples + -------- + + ``DataFrame.rename`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...) + * ``(mapper, axis={'index', 'columns'}, ...) + + We *highly* recommend using keyword arguments to clarify your + intent. + + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + >>> df.rename(index=str, columns={"A": "a", "B": "c"}) + a c + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename(index=str, columns={"A": "a", "C": "c"}) + a B + 0 1 4 + 1 2 5 + 2 3 6 + + Using axis-style parameters + + >>> df.rename(str.lower, axis='columns') + a b + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename({1: 2, 2: 4}, axis='index') + A B + 0 1 4 + 2 2 5 + 4 3 6 + """ + index, columns = self._validate_axis_style_args(mapper, 'mapper', + index, columns, + axis, 'rename') return super(DataFrame, self).rename(index=index, columns=columns, **kwargs) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bc0f10a3f79ab..9d9d8334fcaf4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -742,11 +742,13 @@ def swaplevel(self, i=-2, j=-1, axis=0): Parameters ---------- + %(optional_mapper)s %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame or Panel. dict-like or functions are transformations to apply to that axis' values + %(optional_axis)s copy : boolean, default True Also copy underlying data inplace : boolean, default False @@ -766,6 +768,7 @@ def swaplevel(self, i=-2, j=-1, axis=0): Examples -------- + >>> s = pd.Series([1, 2, 3]) >>> s 0 1 @@ -787,27 +790,58 @@ def swaplevel(self, i=-2, j=-1, axis=0): 3 2 5 3 dtype: int64 + + Since ``DataFrame`` doesn't have a ``.name`` attribute, + only mapping-type arguments are allowed. + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable + + ``DataFrame.rename`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...) + * ``(mapper, axis={'index', 'columns'}, ...) + + We *highly* recommend using keyword arguments to clarify your + intent. + >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 + >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 + + Using axis-style parameters + + >>> df.rename(str.lower, axis='columns') + a b + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename({1: 2, 2: 4}, axis='index') + A B + 0 1 4 + 2 2 5 + 4 3 6 + + See the :ref:`user guide <basics.rename>` for more. """ @Appender(_shared_docs['rename'] % dict(axes='axes keywords for this' - ' object', klass='NDFrame')) + ' object', klass='NDFrame', + optional_mapper='', + optional_axis='')) def rename(self, *args, **kwargs): - axes, kwargs = self._construct_axes_from_arguments(args, kwargs) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) @@ -886,6 +920,7 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): Examples -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename_axis("foo") # scalar, alters df.index.name A B @@ -2746,10 +2781,11 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, Parameters ---------- - %(axes)s : array-like, optional (can be specified in order, or as - keywords) + %(optional_labels)s + %(axes)s : array-like, optional (should be specified using keywords) New labels / index to conform to. Preferably an Index object to avoid duplicating data + %(optional_axis)s method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a @@ -2781,6 +2817,14 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, Examples -------- + ``DataFrame.reindex`` supports two calling conventions + + * ``(index=index_labels, columns=column_labels, ...) + * ``(labels, axis={'index', 'columns'}, ...) + + We *highly* recommend using keyword arguments to clarify your + intent. + Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] @@ -2831,6 +2875,26 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, IE10 404 0.08 Chrome 200 0.02 + We can also reindex the columns. + + >>> df.reindex(columns=['http_status', 'user_agent']) + http_status user_agent + Firefox 200 NaN + Chrome 200 NaN + Safari 404 NaN + IE10 404 NaN + Konqueror 301 NaN + + Or we can use "axis-style" keyword arguments + + >>> df.reindex(['http_status', 'user_agent'], axis="columns") + http_status user_agent + Firefox 200 NaN + Chrome 200 NaN + Safari 404 NaN + IE10 404 NaN + Konqueror 301 NaN + To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence @@ -2893,6 +2957,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. + See the :ref:`user guide <basics.reindexing>` for more. + Returns ------- reindexed : %(klass)s @@ -2901,7 +2967,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, # TODO: Decide if we care about having different examples for different # kinds - @Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame")) + @Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame", + optional_labels="", + optional_axis="")) def reindex(self, *args, **kwargs): # construct the args diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 14fba9560cae2..b2f50eaf733d8 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -39,7 +39,8 @@ _shared_doc_kwargs = dict( axes='items, major_axis, minor_axis', klass="Panel", - axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}") + axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}", + optional_mapper='', optional_axis='', optional_labels='') _shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one" "of\n%s" % _shared_doc_kwargs['axes_single_arg']) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 6bb6988a7442a..19bb329f94a30 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1514,6 +1514,7 @@ def _sort_labels(uniques, left, right): def _get_join_keys(llab, rlab, shape, sort): + np.seterr(divide='ignore') # how many levels can be done without overflow pred = lambda i: not is_int64_overflow_possible(shape[:i]) @@ -1528,7 +1529,7 @@ def _get_join_keys(llab, rlab, shape, sort): stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride - + np.seterr(divide='warn') if nlev == len(shape): # all done! return lkey, rkey diff --git a/pandas/core/series.py b/pandas/core/series.py index be4066f0c39b9..93afdc5151b35 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -85,7 +85,7 @@ inplace="""inplace : boolean, default False If True, performs operation inplace and returns None.""", unique='np.ndarray', duplicated='Series', - optional_by='', + optional_by='', optional_mapper='', optional_labels='', optional_axis='', versionadded_to_excel='\n .. versionadded:: 0.20.0\n') @@ -2525,8 +2525,67 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) - @Appender(generic._shared_docs['rename'] % _shared_doc_kwargs) def rename(self, index=None, **kwargs): + """Alter Series index labels or name + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + Alternatively, change ``Series.name`` with a scalar value. + + See the :ref:`user guide <basics.rename>` for more. + + Parameters + ---------- + index : scalar, hashable sequence, dict-like or function, optional + dict-like or functions are transformations to apply to + the index. + Scalar or hashable sequence-like will alter the ``Series.name`` + attribute. + copy : boolean, default True + Also copy underlying data + inplace : boolean, default False + Whether to return a new %(klass)s. If True then value of copy is + ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. + + Returns + ------- + renamed : Series (new object) + + See Also + -------- + pandas.Series.rename_axis + + Examples + -------- + + >>> s = pd.Series([1, 2, 3]) + >>> s + 0 1 + 1 2 + 2 3 + dtype: int64 + >>> s.rename("my_name") # scalar, changes Series.name + 0 1 + 1 2 + 2 3 + Name: my_name, dtype: int64 + >>> s.rename(lambda x: x ** 2) # function, changes labels + 0 1 + 1 2 + 4 3 + dtype: int64 + >>> s.rename({1: 3, 2: 5}) # mapping, changes labels + 0 1 + 3 2 + 5 3 + dtype: int64 + + """ kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False), 'inplace') diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 3255bd6bd17e8..5c76cca08f609 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -35,7 +35,8 @@ _shared_doc_kwargs = dict(axes='index', klass='SparseSeries', - axes_single_arg="{0, 'index'}") + axes_single_arg="{0, 'index'}", + optional_labels='', optional_axis='') # ----------------------------------------------------------------------------- # Wrapper function for Series arithmetic methods diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 27906838abb2d..feb32324ff1b1 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -837,6 +837,106 @@ def test_rename_objects(self): assert 'FOO' in renamed assert 'foo' not in renamed + def test_rename_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['X', 'Y']) + expected = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y']) + + result = df.rename(str.lower, axis=1) + assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis='columns') + assert_frame_equal(result, expected) + + result = df.rename({"A": 'a', 'B': 'b'}, axis=1) + assert_frame_equal(result, expected) + + result = df.rename({"A": 'a', 'B': 'b'}, axis='columns') + assert_frame_equal(result, expected) + + # Index + expected = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y']) + result = df.rename(str.lower, axis=0) + assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis='index') + assert_frame_equal(result, expected) + + result = df.rename({'X': 'x', 'Y': 'y'}, axis=0) + assert_frame_equal(result, expected) + + result = df.rename({'X': 'x', 'Y': 'y'}, axis='index') + assert_frame_equal(result, expected) + + def test_rename_mapper_multi(self): + df = pd.DataFrame({"A": ['a', 'b'], "B": ['c', 'd'], + 'C': [1, 2]}).set_index(["A", "B"]) + result = df.rename(str.upper) + expected = df.rename(index=str.upper) + assert_frame_equal(result, expected) + + def test_rename_positional_named(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y']) + result = df.rename(str.lower, columns=str.upper) + expected = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y']) + assert_frame_equal(result, expected) + + def test_rename_axis_style_raises(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['0', '1']) + + # Named target and axis + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis=1) + + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis='columns') + + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis='columns') + + with tm.assert_raises_regex(TypeError, None): + df.rename(columns=str.lower, axis='columns') + + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis=0) + + # Multiple targets and axis + with tm.assert_raises_regex(TypeError, None): + df.rename(str.lower, str.lower, axis='columns') + + # Too many targets + with tm.assert_raises_regex(TypeError, None): + df.rename(str.lower, str.lower, str.lower) + + def test_reindex_api_equivalence(self): + # equivalence of the labels/axis and index/columns API's + df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=['a', 'b', 'c'], + columns=['d', 'e', 'f']) + + res1 = df.reindex(['b', 'a']) + res2 = df.reindex(index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a']) + res4 = df.reindex(labels=['b', 'a'], axis=0) + res5 = df.reindex(['b', 'a'], axis=0) + for res in [res2, res3, res4, res5]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(columns=['e', 'd']) + res2 = df.reindex(['e', 'd'], axis=1) + res3 = df.reindex(labels=['e', 'd'], axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(index=['b', 'a'], columns=['e', 'd']) + res2 = df.reindex(columns=['e', 'd'], index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'], + axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + def test_assign_columns(self): self.frame['hi'] = 'there' @@ -860,6 +960,14 @@ def test_set_index_preserve_categorical_dtype(self): result = result.reindex(columns=df.columns) tm.assert_frame_equal(result, df) + def test_ambiguous_warns(self): + df = pd.DataFrame({"A": [1, 2]}) + with tm.assert_produces_warning(UserWarning): + df.rename(id, id) + + with tm.assert_produces_warning(UserWarning): + df.rename({0: 10}, {"A": "B"}) + class TestIntervalIndex(object): diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index f9a4275d14f55..38ed8ee20bc50 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -447,6 +447,98 @@ def test_reindex_dups(self): # reindex fails pytest.raises(ValueError, df.reindex, index=list(range(len(df)))) + def test_reindex_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, + index=[0, 1, 3]) + result = df.reindex([0, 1, 3]) + assert_frame_equal(result, expected) + + result = df.reindex([0, 1, 3], axis=0) + assert_frame_equal(result, expected) + + result = df.reindex([0, 1, 3], axis='index') + assert_frame_equal(result, expected) + + def test_reindex_positional_warns(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5], + "C": [np.nan, np.nan]}) + with tm.assert_produces_warning(UserWarning): + result = df.reindex([0, 1], ['A', 'B', 'C']) + + assert_frame_equal(result, expected) + + def test_reindex_axis_style_raises(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]}) + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex([0, 1], ['A'], axis=1) + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex([0, 1], ['A'], axis='index') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='index') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='columns') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(columns=[0, 1], axis='columns') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], columns=[0, 1], axis='columns') + + with tm.assert_raises_regex(TypeError, 'Cannot specify all'): + df.reindex([0, 1], [0], ['A']) + + # Mixing styles + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='index') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='columns') + + def test_reindex_single_named_indexer(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}) + result = df.reindex([0, 1], columns=['A']) + expected = pd.DataFrame({"A": [1, 2]}) + assert_frame_equal(result, expected) + + def test_reindex_api_equivalence(self): + # https://github.com/pandas-dev/pandas/issues/12392 + # equivalence of the labels/axis and index/columns API's + df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=['a', 'b', 'c'], + columns=['d', 'e', 'f']) + + res1 = df.reindex(['b', 'a']) + res2 = df.reindex(index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a']) + res4 = df.reindex(labels=['b', 'a'], axis=0) + res5 = df.reindex(['b', 'a'], axis=0) + for res in [res2, res3, res4, res5]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(columns=['e', 'd']) + res2 = df.reindex(['e', 'd'], axis=1) + res3 = df.reindex(labels=['e', 'd'], axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + + with tm.assert_produces_warning(UserWarning) as m: + res1 = df.reindex(['b', 'a'], ['e', 'd']) + assert 'reindex' in str(m[0].message) + res2 = df.reindex(columns=['e', 'd'], index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'], + axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + def test_align(self): af, bf = self.frame.align(self.frame) assert af._data is not self.frame._data @@ -974,21 +1066,21 @@ def test_reindex_with_nans(self): def test_reindex_multi(self): df = DataFrame(np.random.randn(3, 3)) - result = df.reindex(lrange(4), lrange(4)) + result = df.reindex(index=lrange(4), columns=lrange(4)) expected = df.reindex(lrange(4)).reindex(columns=lrange(4)) assert_frame_equal(result, expected) df = DataFrame(np.random.randint(0, 10, (3, 3))) - result = df.reindex(lrange(4), lrange(4)) + result = df.reindex(index=lrange(4), columns=lrange(4)) expected = df.reindex(lrange(4)).reindex(columns=lrange(4)) assert_frame_equal(result, expected) df = DataFrame(np.random.randint(0, 10, (3, 3))) - result = df.reindex(lrange(2), lrange(2)) + result = df.reindex(index=lrange(2), columns=lrange(2)) expected = df.reindex(lrange(2)).reindex(columns=lrange(2)) assert_frame_equal(result, expected)
- [x] closes #17776 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17838
2017-10-10T14:39:06Z
2017-10-11T17:12:22Z
null
2017-10-11T17:12:22Z
TST: cleanup warnings on mpl 2.1
diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py index 7b04b9e1171ec..d527bc08e2f08 100644 --- a/pandas/plotting/_compat.py +++ b/pandas/plotting/_compat.py @@ -65,3 +65,11 @@ def _mpl_ge_2_0_1(): return matplotlib.__version__ >= LooseVersion('2.0.1') except ImportError: return False + + +def _mpl_ge_2_1_0(): + try: + import matplotlib + return matplotlib.__version__ >= LooseVersion('2.1') + except ImportError: + return False diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 211d9777e7515..c4cd562df7eb3 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2000,7 +2000,7 @@ def maybe_color_bp(bp): def plot_group(keys, values, ax): keys = [pprint_thing(x) for x in keys] - values = [remove_na_arraylike(v) for v in values] + values = [np.asarray(remove_na_arraylike(v)) for v in values] bp = ax.boxplot(values, **kwds) if fontsize is not None: ax.tick_params(axis='both', labelsize=fontsize) diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 8fe119d28644c..4b1cb2ccbd3dd 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -11,7 +11,6 @@ import numpy as np from numpy import random -from numpy.random import randn import pandas.plotting as plotting @@ -35,8 +34,8 @@ def _skip_if_mpl_14_or_dev_boxplot(): class TestDataFramePlots(TestPlotBase): @pytest.mark.slow - def test_boxplot_legacy(self): - df = DataFrame(randn(6, 4), + def test_boxplot_legacy1(self): + df = DataFrame(np.random.randn(6, 4), index=list(string.ascii_letters[:6]), columns=['one', 'two', 'three', 'four']) df['indic'] = ['foo', 'bar'] * 3 @@ -60,6 +59,8 @@ def test_boxplot_legacy(self): with tm.assert_produces_warning(UserWarning): _check_plot_works(df.boxplot, by='indic', notch=1) + @pytest.mark.slow + def test_boxplot_legacy2(self): df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2']) df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']) df['Y'] = Series(['A'] * 10) @@ -103,7 +104,7 @@ def test_boxplot_return_type_legacy(self): # API change in https://github.com/pandas-dev/pandas/pull/7096 import matplotlib as mpl # noqa - df = DataFrame(randn(6, 4), + df = DataFrame(np.random.randn(6, 4), index=list(string.ascii_letters[:6]), columns=['one', 'two', 'three', 'four']) with pytest.raises(ValueError): @@ -176,7 +177,7 @@ def test_fontsize(self): class TestDataFrameGroupByPlots(TestPlotBase): @pytest.mark.slow - def test_boxplot_legacy(self): + def test_boxplot_legacy1(self): grouped = self.hist_df.groupby(by='gender') with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(grouped.boxplot, return_type='axes') @@ -184,10 +185,12 @@ def test_boxplot_legacy(self): axes = _check_plot_works(grouped.boxplot, subplots=False, return_type='axes') self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_boxplot_legacy2(self): tuples = lzip(string.ascii_letters[:10], range(10)) df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples)) - grouped = df.groupby(level=1) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(grouped.boxplot, return_type='axes') @@ -197,6 +200,11 @@ def test_boxplot_legacy(self): return_type='axes') self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + @pytest.mark.slow + def test_boxplot_legacy3(self): + tuples = lzip(string.ascii_letters[:10], range(10)) + df = DataFrame(np.random.rand(10, 3), + index=MultiIndex.from_tuples(tuples)) grouped = df.unstack(level=1).groupby(level=0, axis=1) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(grouped.boxplot, return_type='axes')
https://api.github.com/repos/pandas-dev/pandas/pulls/17835
2017-10-10T12:08:42Z
2017-10-10T13:14:54Z
2017-10-10T13:14:54Z
2017-10-11T04:50:39Z
DEPR: Deprecate the convert parameter completely
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9d9d8334fcaf4..c7a27ebc65c05 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2172,6 +2172,7 @@ def _take(self, indices, axis=0, convert=True, is_copy=True): selecting rows, "1" means that we are selecting columns, etc. convert : bool, default True .. deprecated:: 0.21.0 + In the future, negative indices will always be converted. Whether to convert negative indices into positive ones. For example, ``-1`` would map to the ``len(axis) - 1``. @@ -2234,14 +2235,15 @@ class max_speed """ @Appender(_shared_docs['take']) - def take(self, indices, axis=0, convert=True, is_copy=True, **kwargs): - nv.validate_take(tuple(), kwargs) - - if not convert: + def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs): + if convert is not None: msg = ("The 'convert' parameter is deprecated " "and will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) + else: + convert = True + convert = nv.validate_take(tuple(), kwargs) return self._take(indices, axis=axis, convert=convert, is_copy=is_copy) def xs(self, key, axis=0, level=None, drop_level=True): diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 5c76cca08f609..17d0737ba7c63 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -387,7 +387,7 @@ def _ixs(self, i, axis=0): """ label = self.index[i] if isinstance(label, Index): - return self.take(i, axis=axis, convert=True) + return self.take(i, axis=axis) else: return self._get_val_at(i) @@ -629,14 +629,15 @@ def sparse_reindex(self, new_index): fill_value=self.fill_value).__finalize__(self) @Appender(generic._shared_docs['take']) - def take(self, indices, axis=0, convert=True, *args, **kwargs): - convert = nv.validate_take_with_convert(convert, args, kwargs) - - if not convert: + def take(self, indices, axis=0, convert=None, *args, **kwargs): + if convert is not None: msg = ("The 'convert' parameter is deprecated " "and will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) + else: + convert = True + nv.validate_take_with_convert(convert, args, kwargs) new_values = SparseArray.take(self.values, indices) new_index = self.index.take(indices) return self._constructor(new_values, diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 38ed8ee20bc50..cb9d22f42d452 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -944,6 +944,10 @@ def test_take(self): expected = df.reindex(df.index.take(order)) assert_frame_equal(result, expected) + with tm.assert_produces_warning(FutureWarning): + result = df.take(order, convert=True, axis=0) + assert_frame_equal(result, expected) + with tm.assert_produces_warning(FutureWarning): result = df.take(order, convert=False, axis=0) assert_frame_equal(result, expected) diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 7c7399317809f..f64b7e1381158 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -528,6 +528,9 @@ def _compare(idx): exp = pd.Series(np.repeat(nan, 5)) tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp) + with tm.assert_produces_warning(FutureWarning): + sp.take([1, 5], convert=True) + with tm.assert_produces_warning(FutureWarning): sp.take([1, 5], convert=False) @@ -535,21 +538,17 @@ def test_numpy_take(self): sp = SparseSeries([1.0, 2.0, 3.0]) indices = [1, 2] - # gh-17352: older versions of numpy don't properly - # pass in arguments to downstream .take() implementations. - warning = FutureWarning if _np_version_under1p12 else None - - with tm.assert_produces_warning(warning, check_stacklevel=False): + if not _np_version_under1p12: tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(), np.take(sp.to_dense(), indices, axis=0)) - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.take, - sp, indices, out=np.empty(sp.shape)) + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, np.take, + sp, indices, out=np.empty(sp.shape)) - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.take, - sp, indices, mode='clip') + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, np.take, + sp, indices, out=None, mode='clip') def test_setitem(self): self.bseries[5] = 7.
Previously, we weren't issuing a warning if the user happened to pass in the original default of `True`, which would cause downstream code to break. Closes #17828
https://api.github.com/repos/pandas-dev/pandas/pulls/17831
2017-10-10T05:11:05Z
2017-10-12T21:04:35Z
2017-10-12T21:04:34Z
2017-10-13T01:29:14Z
Start porting offsets to cython
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index e85ba505887b4..cbd094ec4ef49 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -42,7 +42,7 @@ Other API Changes - ``NaT`` division with :class:`datetime.timedelta` will now return ``NaN`` instead of raising (:issue:`17876`) - :class:`Timestamp` will no longer silently ignore unused or invalid `tz` or `tzinfo` arguments (:issue:`17690`) -- +- :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the `tseries.offsets` module (:issue:`17830`) - .. _whatsnew_0220.deprecations: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx new file mode 100644 index 0000000000000..9959b053707c7 --- /dev/null +++ b/pandas/_libs/tslibs/offsets.pyx @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +cimport cython + +import time +from cpython.datetime cimport time as dt_time + +import numpy as np +cimport numpy as np +np.import_array() + + +from util cimport is_string_object + + +from pandas._libs.tslib import pydt_to_i8, tz_convert_single + +# --------------------------------------------------------------------- +# Constants + +# Duplicated in tslib +_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', + 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] +_int_to_month = {(k + 1): v for k, v in enumerate(_MONTHS)} +_month_to_int = dict((v, k) for k, v in _int_to_month.items()) + + +class WeekDay(object): + MON = 0 + TUE = 1 + WED = 2 + THU = 3 + FRI = 4 + SAT = 5 + SUN = 6 + + +_int_to_weekday = { + WeekDay.MON: 'MON', + WeekDay.TUE: 'TUE', + WeekDay.WED: 'WED', + WeekDay.THU: 'THU', + WeekDay.FRI: 'FRI', + WeekDay.SAT: 'SAT', + WeekDay.SUN: 'SUN'} + +_weekday_to_int = {_int_to_weekday[key]: key for key in _int_to_weekday} + + +_offset_to_period_map = { + 'WEEKDAY': 'D', + 'EOM': 'M', + 'BM': 'M', + 'BQS': 'Q', + 'QS': 'Q', + 'BQ': 'Q', + 'BA': 'A', + 'AS': 'A', + 'BAS': 'A', + 'MS': 'M', + 'D': 'D', + 'C': 'C', + 'B': 'B', + 'T': 'T', + 'S': 'S', + 'L': 'L', + 'U': 'U', + 'N': 'N', + 'H': 'H', + 'Q': 'Q', + 'A': 'A', + 'W': 'W', + 'M': 'M', + 'Y': 'A', + 'BY': 'A', + 'YS': 'A', + 'BYS': 'A'} + +need_suffix = ['QS', 'BQ', 'BQS', 'YS', 'AS', 'BY', 'BA', 'BYS', 'BAS'] + + +for __prefix in need_suffix: + for _m in _MONTHS: + key = '%s-%s' % (__prefix, _m) + _offset_to_period_map[key] = _offset_to_period_map[__prefix] + +for __prefix in ['A', 'Q']: + for _m in _MONTHS: + _alias = '%s-%s' % (__prefix, _m) + _offset_to_period_map[_alias] = _alias + +_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] +for _d in _days: + _offset_to_period_map['W-%s' % _d] = 'W-%s' % _d + + +# --------------------------------------------------------------------- +# Misc Helpers + +def as_datetime(obj): + f = getattr(obj, 'to_pydatetime', None) + if f is not None: + obj = f() + return obj + + +def _is_normalized(dt): + if (dt.hour != 0 or dt.minute != 0 or dt.second != 0 or + dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0): + return False + return True + + +# --------------------------------------------------------------------- +# Business Helpers + +def _get_firstbday(wkday): + """ + wkday is the result of monthrange(year, month) + + If it's a saturday or sunday, increment first business day to reflect this + """ + first = 1 + if wkday == 5: # on Saturday + first = 3 + elif wkday == 6: # on Sunday + first = 2 + return first + + +def _get_calendar(weekmask, holidays, calendar): + """Generate busdaycalendar""" + if isinstance(calendar, np.busdaycalendar): + if not holidays: + holidays = tuple(calendar.holidays) + elif not isinstance(holidays, tuple): + holidays = tuple(holidays) + else: + # trust that calendar.holidays and holidays are + # consistent + pass + return calendar, holidays + + if holidays is None: + holidays = [] + try: + holidays = holidays + calendar.holidays().tolist() + except AttributeError: + pass + holidays = [_to_dt64(dt, dtype='datetime64[D]') for dt in holidays] + holidays = tuple(sorted(holidays)) + + kwargs = {'weekmask': weekmask} + if holidays: + kwargs['holidays'] = holidays + + busdaycalendar = np.busdaycalendar(**kwargs) + return busdaycalendar, holidays + + +def _to_dt64(dt, dtype='datetime64'): + # Currently + # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]') + # numpy.datetime64('2013-05-01T02:00:00.000000+0200') + # Thus astype is needed to cast datetime to datetime64[D] + if getattr(dt, 'tzinfo', None) is not None: + i8 = pydt_to_i8(dt) + dt = tz_convert_single(i8, 'UTC', dt.tzinfo) + dt = np.int64(dt).astype('datetime64[ns]') + else: + dt = np.datetime64(dt) + if dt.dtype.name != dtype: + dt = dt.astype(dtype) + return dt + + +# --------------------------------------------------------------------- +# Validation + + +def _validate_business_time(t_input): + if is_string_object(t_input): + try: + t = time.strptime(t_input, '%H:%M') + return dt_time(hour=t.tm_hour, minute=t.tm_min) + except ValueError: + raise ValueError("time data must match '%H:%M' format") + elif isinstance(t_input, dt_time): + if t_input.second != 0 or t_input.microsecond != 0: + raise ValueError( + "time data must be specified only with hour and minute") + return t_input + else: + raise ValueError("time data must be string or datetime.time") + +# --------------------------------------------------------------------- +# Mixins & Singletons + + +class ApplyTypeError(TypeError): + # sentinel class for catching the apply error to return NotImplemented + pass + + +# TODO: unused. remove? +class CacheableOffset(object): + _cacheable = True diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py index c0e682c978610..4fd3bba01602f 100644 --- a/pandas/tests/tseries/test_offsets.py +++ b/pandas/tests/tseries/test_offsets.py @@ -17,9 +17,10 @@ get_offset, get_standard_freq) from pandas.core.indexes.datetimes import ( _to_m8, DatetimeIndex, _daterange_cache) +from pandas._libs.tslibs.offsets import WeekDay, CacheableOffset from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd, BusinessHour, WeekOfMonth, CBMonthEnd, - CustomBusinessHour, WeekDay, + CustomBusinessHour, CBMonthBegin, BYearEnd, MonthEnd, MonthBegin, SemiMonthBegin, SemiMonthEnd, BYearBegin, QuarterBegin, BQuarterBegin, @@ -27,7 +28,7 @@ YearEnd, Hour, Minute, Second, Day, Micro, QuarterEnd, BusinessMonthEnd, FY5253, Milli, Nano, Easter, FY5253Quarter, - LastWeekOfMonth, CacheableOffset) + LastWeekOfMonth) from pandas.core.tools.datetimes import ( format, ole2datetime, parse_time_string, to_datetime, DateParseError) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index b055c4b4cb27f..763e6547ea2cb 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -312,7 +312,7 @@ def _get_freq_str(base, mult=1): # --------------------------------------------------------------------- # Offset names ("time rules") and related functions - +from pandas._libs.tslibs.offsets import _offset_to_period_map from pandas.tseries.offsets import (Nano, Micro, Milli, Second, # noqa Minute, Hour, Day, BDay, CDay, Week, MonthBegin, @@ -328,51 +328,6 @@ def _get_freq_str(base, mult=1): #: cache of previously seen offsets _offset_map = {} -_offset_to_period_map = { - 'WEEKDAY': 'D', - 'EOM': 'M', - 'BM': 'M', - 'BQS': 'Q', - 'QS': 'Q', - 'BQ': 'Q', - 'BA': 'A', - 'AS': 'A', - 'BAS': 'A', - 'MS': 'M', - 'D': 'D', - 'C': 'C', - 'B': 'B', - 'T': 'T', - 'S': 'S', - 'L': 'L', - 'U': 'U', - 'N': 'N', - 'H': 'H', - 'Q': 'Q', - 'A': 'A', - 'W': 'W', - 'M': 'M', - 'Y': 'A', - 'BY': 'A', - 'YS': 'A', - 'BYS': 'A', -} - -need_suffix = ['QS', 'BQ', 'BQS', 'YS', 'AS', 'BY', 'BA', 'BYS', 'BAS'] -for __prefix in need_suffix: - for _m in tslib._MONTHS: - _alias = '{prefix}-{month}'.format(prefix=__prefix, month=_m) - _offset_to_period_map[_alias] = _offset_to_period_map[__prefix] -for __prefix in ['A', 'Q']: - for _m in tslib._MONTHS: - _alias = '{prefix}-{month}'.format(prefix=__prefix, month=_m) - _offset_to_period_map[_alias] = _alias - -_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] -for _d in _days: - _alias = 'W-{day}'.format(day=_d) - _offset_to_period_map[_alias] = _alias - def get_period_alias(offset_str): """ alias to closest period strings BQ->Q etc""" diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index c65691618e654..88c878c9cb9a6 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -14,6 +14,13 @@ from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta from pandas.util._decorators import cache_readonly +from pandas._libs.tslib import _delta_to_nanoseconds +from pandas._libs.tslibs.offsets import ( + ApplyTypeError, + as_datetime, _is_normalized, + _get_firstbday, _get_calendar, _to_dt64, _validate_business_time, + _int_to_weekday, _weekday_to_int) + import functools import operator @@ -43,13 +50,6 @@ def as_timestamp(obj): return obj -def as_datetime(obj): - f = getattr(obj, 'to_pydatetime', None) - if f is not None: - obj = f() - return obj - - def apply_wraps(func): @functools.wraps(func) def wrapper(self, other): @@ -115,25 +115,10 @@ def wrapper(self, other): return wrapper -def _is_normalized(dt): - if (dt.hour != 0 or dt.minute != 0 or dt.second != 0 or - dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0): - return False - return True - # --------------------------------------------------------------------- # DateOffset -class ApplyTypeError(TypeError): - # sentinel class for catching the apply error to return NotImplemented - pass - - -class CacheableOffset(object): - _cacheable = True - - class DateOffset(object): """ Standard kind of date increment used for a date range. @@ -697,28 +682,11 @@ class BusinessHourMixin(BusinessMixin): def __init__(self, start='09:00', end='17:00', offset=timedelta(0)): # must be validated here to equality check kwds = {'offset': offset} - self.start = kwds['start'] = self._validate_time(start) - self.end = kwds['end'] = self._validate_time(end) + self.start = kwds['start'] = _validate_business_time(start) + self.end = kwds['end'] = _validate_business_time(end) self.kwds = kwds self._offset = offset - def _validate_time(self, t_input): - from datetime import time as dt_time - import time - if isinstance(t_input, compat.string_types): - try: - t = time.strptime(t_input, '%H:%M') - return dt_time(hour=t.tm_hour, minute=t.tm_min) - except ValueError: - raise ValueError("time data must match '%H:%M' format") - elif isinstance(t_input, dt_time): - if t_input.second != 0 or t_input.microsecond != 0: - raise ValueError( - "time data must be specified only with hour and minute") - return t_input - else: - raise ValueError("time data must be string or datetime.time") - def _get_daytime_flag(self): if self.start == self.end: raise ValueError('start and end must not be the same') @@ -1617,29 +1585,6 @@ def _from_name(cls, suffix=None): return cls(weekday=weekday) -class WeekDay(object): - MON = 0 - TUE = 1 - WED = 2 - THU = 3 - FRI = 4 - SAT = 5 - SUN = 6 - - -_int_to_weekday = { - WeekDay.MON: 'MON', - WeekDay.TUE: 'TUE', - WeekDay.WED: 'WED', - WeekDay.THU: 'THU', - WeekDay.FRI: 'FRI', - WeekDay.SAT: 'SAT', - WeekDay.SUN: 'SUN' -} - -_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items()) - - class WeekOfMonth(DateOffset): """ Describes monthly dates like "the Tuesday of the 2nd week of each month" @@ -2802,9 +2747,6 @@ def _delta_to_tick(delta): return Nano(nanos) -_delta_to_nanoseconds = tslib._delta_to_nanoseconds - - class Day(Tick): _inc = Timedelta(days=1) _prefix = 'D' @@ -2848,66 +2790,6 @@ class Nano(Tick): CDay = CustomBusinessDay # --------------------------------------------------------------------- -# Business Calendar helpers - - -def _get_calendar(weekmask, holidays, calendar): - """Generate busdaycalendar""" - if isinstance(calendar, np.busdaycalendar): - if not holidays: - holidays = tuple(calendar.holidays) - elif not isinstance(holidays, tuple): - holidays = tuple(holidays) - else: - # trust that calendar.holidays and holidays are - # consistent - pass - return calendar, holidays - - if holidays is None: - holidays = [] - try: - holidays = holidays + calendar.holidays().tolist() - except AttributeError: - pass - holidays = [_to_dt64(dt, dtype='datetime64[D]') for dt in holidays] - holidays = tuple(sorted(holidays)) - - kwargs = {'weekmask': weekmask} - if holidays: - kwargs['holidays'] = holidays - - busdaycalendar = np.busdaycalendar(**kwargs) - return busdaycalendar, holidays - - -def _to_dt64(dt, dtype='datetime64'): - # Currently - # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]') - # numpy.datetime64('2013-05-01T02:00:00.000000+0200') - # Thus astype is needed to cast datetime to datetime64[D] - if getattr(dt, 'tzinfo', None) is not None: - i8 = tslib.pydt_to_i8(dt) - dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo) - dt = Timestamp(dt) - dt = np.datetime64(dt) - if dt.dtype.name != dtype: - dt = dt.astype(dtype) - return dt - - -def _get_firstbday(wkday): - """ - wkday is the result of monthrange(year, month) - - If it's a saturday or sunday, increment first business day to reflect this - """ - first = 1 - if wkday == 5: # on Saturday - first = 3 - elif wkday == 6: # on Sunday - first = 2 - return first def generate_range(start=None, end=None, periods=None, diff --git a/setup.py b/setup.py index 5b9b13ee97acf..8b3ae40f01a10 100755 --- a/setup.py +++ b/setup.py @@ -347,6 +347,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/tslibs/timedeltas.pyx', 'pandas/_libs/tslibs/timezones.pyx', 'pandas/_libs/tslibs/fields.pyx', + 'pandas/_libs/tslibs/offsets.pyx', 'pandas/_libs/tslibs/frequencies.pyx', 'pandas/_libs/tslibs/parsing.pyx', 'pandas/io/sas/sas.pyx'] @@ -489,6 +490,7 @@ def pxd(name): '_libs.tslibs.strptime': {'pyxfile': '_libs/tslibs/strptime', 'depends': tseries_depends, 'sources': npdt_srces}, + '_libs.tslibs.offsets': {'pyxfile': '_libs/tslibs/offsets'}, '_libs.tslib': {'pyxfile': '_libs/tslib', 'pxdfiles': ['_libs/src/util', '_libs/lib'], 'depends': tseries_depends,
This is _mostly_ cut/paste, avoiding moving any of the classes for the time being. There is a small change in `_to_dt64` that will be described in a comment below. Defined `dtstrings` for flake8 cleanup in setup.
https://api.github.com/repos/pandas-dev/pandas/pulls/17830
2017-10-10T01:33:29Z
2017-10-28T18:53:13Z
2017-10-28T18:53:13Z
2017-11-10T16:06:31Z
DOC: slice_indexer correction + examples
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 44358593793bc..db1780e88baef 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -833,6 +833,8 @@ Of course if you need integer based selection, then use ``iloc`` IntervalIndex ~~~~~~~~~~~~~ +.. versionadded:: 0.20.0 + :class:`IntervalIndex` together with its own dtype, ``interval`` as well as the :class:`Interval` scalar type, allow first-class support in pandas for interval notation. @@ -840,8 +842,6 @@ notation. The ``IntervalIndex`` allows some unique indexing and is also used as a return type for the categories in :func:`cut` and :func:`qcut`. -.. versionadded:: 0.20.0 - .. warning:: These indexing behaviors are provisional and may change in a future version of pandas. @@ -862,7 +862,7 @@ selecting that particular interval. df.loc[2] df.loc[[2, 3]] -If you select a lable *contained* within an interval, this will also select the interval. +If you select a label *contained* within an interval, this will also select the interval. .. ipython:: python diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a995fc10a6674..7b59a9fd33f6d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3430,8 +3430,8 @@ def _get_string_slice(self, key, use_lhs=True, use_rhs=True): def slice_indexer(self, start=None, end=None, step=None, kind=None): """ - For an ordered Index, compute the slice indexer for input labels and - step + For an ordered or unique index, compute the slice indexer for input + labels and step. Parameters ---------- @@ -3444,11 +3444,28 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): Returns ------- - indexer : ndarray or slice + indexer : slice + + Raises + ------ + KeyError : If key does not exist, or key is not unique and index is + not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril + + Examples + --------- + This is a method on all index types. For example you can do: + + >>> idx = pd.Index(list('abcd')) + >>> idx.slice_indexer(start='b', end='c') + slice(1, 3) + + >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) + >>> idx.slice_indexer(start='b', end=('c', 'g')) + slice(1, 3) """ start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind)
The stated return value of ``Index.slice_indexer`` in the doc string is wrong, the method can only return slices. Also added some examples. In addition, some very minor corrections on the user guide text on ``IntervalIndex`` (moved the versionadded to the chapter top + a spelling correction). EDIT: These are not related to the ``.slice_indexer`` changes, but are attached to this PR as the changes are very small.
https://api.github.com/repos/pandas-dev/pandas/pulls/17829
2017-10-09T22:13:15Z
2017-10-30T08:31:05Z
2017-10-30T08:31:05Z
2017-11-06T21:46:42Z
Move timedelta-specific functions to tslibs.timedeltas
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 62224d75db37f..a0aae6a5de707 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -83,6 +83,7 @@ PyDateTime_IMPORT cdef int64_t NPY_NAT = util.get_nat() iNaT = NPY_NAT +from tslibs.timedeltas cimport parse_timedelta_string, cast_from_unit from tslibs.timezones cimport ( is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_dateutil, treat_tz_as_pytz, @@ -3083,239 +3084,6 @@ cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'): return iresult -cdef dict timedelta_abbrevs = { 'D': 'd', - 'd': 'd', - 'days': 'd', - 'day': 'd', - 'hours': 'h', - 'hour': 'h', - 'hr': 'h', - 'h': 'h', - 'm': 'm', - 'minute': 'm', - 'min': 'm', - 'minutes': 'm', - 's': 's', - 'seconds': 's', - 'sec': 's', - 'second': 's', - 'ms': 'ms', - 'milliseconds': 'ms', - 'millisecond': 'ms', - 'milli': 'ms', - 'millis': 'ms', - 'us': 'us', - 'microseconds': 'us', - 'microsecond': 'us', - 'micro': 'us', - 'micros': 'us', - 'ns': 'ns', - 'nanoseconds': 'ns', - 'nano': 'ns', - 'nanos': 'ns', - 'nanosecond': 'ns', - } -timedelta_abbrevs_map = timedelta_abbrevs - -cdef inline int64_t timedelta_as_neg(int64_t value, bint neg): - """ - - Parameters - ---------- - value : int64_t of the timedelta value - neg : boolean if the a negative value - """ - if neg: - return -value - return value - -cdef inline timedelta_from_spec(object number, object frac, object unit): - """ - - Parameters - ---------- - number : a list of number digits - frac : a list of frac digits - unit : a list of unit characters - """ - cdef object n - - try: - unit = ''.join(unit) - unit = timedelta_abbrevs[unit.lower()] - except KeyError: - raise ValueError("invalid abbreviation: {0}".format(unit)) - - n = ''.join(number) + '.' + ''.join(frac) - return cast_from_unit(float(n), unit) - -cdef inline parse_timedelta_string(object ts): - """ - Parse a regular format timedelta string. Return an int64_t (in ns) - or raise a ValueError on an invalid parse. - """ - - cdef: - unicode c - bint neg=0, have_dot=0, have_value=0, have_hhmmss=0 - object current_unit=None - int64_t result=0, m=0, r - list number=[], frac=[], unit=[] - - # neg : tracks if we have a leading negative for the value - # have_dot : tracks if we are processing a dot (either post hhmmss or - # inside an expression) - # have_value : track if we have at least 1 leading unit - # have_hhmmss : tracks if we have a regular format hh:mm:ss - - if len(ts) == 0 or ts in _nat_strings: - return NPY_NAT - - # decode ts if necessary - if not PyUnicode_Check(ts) and not PY3: - ts = str(ts).decode('utf-8') - - for c in ts: - - # skip whitespace / commas - if c == ' ' or c == ',': - pass - - # positive signs are ignored - elif c == '+': - pass - - # neg - elif c == '-': - - if neg or have_value or have_hhmmss: - raise ValueError("only leading negative signs are allowed") - - neg = 1 - - # number (ascii codes) - elif ord(c) >= 48 and ord(c) <= 57: - - if have_dot: - - # we found a dot, but now its just a fraction - if len(unit): - number.append(c) - have_dot = 0 - else: - frac.append(c) - - elif not len(unit): - number.append(c) - - else: - r = timedelta_from_spec(number, frac, unit) - unit, number, frac = [], [c], [] - - result += timedelta_as_neg(r, neg) - - # hh:mm:ss. - elif c == ':': - - # we flip this off if we have a leading value - if have_value: - neg = 0 - - # we are in the pattern hh:mm:ss pattern - if len(number): - if current_unit is None: - current_unit = 'h' - m = 1000000000L * 3600 - elif current_unit == 'h': - current_unit = 'm' - m = 1000000000L * 60 - elif current_unit == 'm': - current_unit = 's' - m = 1000000000L - r = <int64_t> int(''.join(number)) * m - result += timedelta_as_neg(r, neg) - have_hhmmss = 1 - else: - raise ValueError("expecting hh:mm:ss format, " - "received: {0}".format(ts)) - - unit, number = [], [] - - # after the decimal point - elif c == '.': - - if len(number) and current_unit is not None: - - # by definition we had something like - # so we need to evaluate the final field from a - # hh:mm:ss (so current_unit is 'm') - if current_unit != 'm': - raise ValueError("expected hh:mm:ss format before .") - m = 1000000000L - r = <int64_t> int(''.join(number)) * m - result += timedelta_as_neg(r, neg) - have_value = 1 - unit, number, frac = [], [], [] - - have_dot = 1 - - # unit - else: - unit.append(c) - have_value = 1 - have_dot = 0 - - # we had a dot, but we have a fractional - # value since we have an unit - if have_dot and len(unit): - r = timedelta_from_spec(number, frac, unit) - result += timedelta_as_neg(r, neg) - - # we have a dot as part of a regular format - # e.g. hh:mm:ss.fffffff - elif have_dot: - - if ((len(number) or len(frac)) and not len(unit) and - current_unit is None): - raise ValueError("no units specified") - - if len(frac) > 0 and len(frac) <= 3: - m = 10**(3 -len(frac)) * 1000L * 1000L - elif len(frac) > 3 and len(frac) <= 6: - m = 10**(6 -len(frac)) * 1000L - else: - m = 10**(9 -len(frac)) - - r = <int64_t> int(''.join(frac)) * m - result += timedelta_as_neg(r, neg) - - # we have a regular format - # we must have seconds at this point (hence the unit is still 'm') - elif current_unit is not None: - if current_unit != 'm': - raise ValueError("expected hh:mm:ss format") - m = 1000000000L - r = <int64_t> int(''.join(number)) * m - result += timedelta_as_neg(r, neg) - - # we have a last abbreviation - elif len(unit): - if len(number): - r = timedelta_from_spec(number, frac, unit) - result += timedelta_as_neg(r, neg) - else: - raise ValueError("unit abbreviation w/o a number") - - # treat as nanoseconds - # but only if we don't have anything else - else: - if have_value: - raise ValueError("have leftover units") - if len(number): - r = timedelta_from_spec(number, frac, 'ns') - result += timedelta_as_neg(r, neg) - - return result cpdef convert_to_timedelta64(object ts, object unit): """ @@ -3412,49 +3180,6 @@ cdef inline _get_datetime64_nanos(object val): else: return ival -cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: - """ return a casting of the unit represented to nanoseconds - round the fractional part of a float to our precision, p """ - cdef: - int64_t m - int p - - if unit == 'D' or unit == 'd': - m = 1000000000L * 86400 - p = 6 - elif unit == 'h': - m = 1000000000L * 3600 - p = 6 - elif unit == 'm': - m = 1000000000L * 60 - p = 6 - elif unit == 's': - m = 1000000000L - p = 6 - elif unit == 'ms': - m = 1000000L - p = 3 - elif unit == 'us': - m = 1000L - p = 0 - elif unit == 'ns' or unit is None: - m = 1L - p = 0 - else: - raise ValueError("cannot cast unit {0}".format(unit)) - - # just give me the unit back - if ts is None: - return m - - # cast the unit, multiply base/frace separately - # to avoid precision issues from float -> int - base = <int64_t> ts - frac = ts -base - if p: - frac = round(frac, p) - return <int64_t> (base *m) + <int64_t> (frac *m) - def cast_to_nanoseconds(ndarray arr): cdef: diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd new file mode 100644 index 0000000000000..7f1d6bc926894 --- /dev/null +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +from numpy cimport int64_t + +# Exposed for tslib, not intended for outside use. +cdef parse_timedelta_string(object ts) +cpdef int64_t cast_from_unit(object ts, object unit) except? -1 diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx new file mode 100644 index 0000000000000..1785c85da4949 --- /dev/null +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -0,0 +1,299 @@ +# -*- coding: utf-8 -*- +# cython: profile=False +import sys +cdef bint PY3 = (sys.version_info[0] >= 3) + +from cpython cimport PyUnicode_Check + +from numpy cimport int64_t + +cimport util + +# ---------------------------------------------------------------------- +# Constants + +# TODO: Get this from tslibs.nattype once available +_nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) + +cdef int64_t NPY_NAT = util.get_nat() + +cdef dict timedelta_abbrevs = { 'D': 'd', + 'd': 'd', + 'days': 'd', + 'day': 'd', + 'hours': 'h', + 'hour': 'h', + 'hr': 'h', + 'h': 'h', + 'm': 'm', + 'minute': 'm', + 'min': 'm', + 'minutes': 'm', + 's': 's', + 'seconds': 's', + 'sec': 's', + 'second': 's', + 'ms': 'ms', + 'milliseconds': 'ms', + 'millisecond': 'ms', + 'milli': 'ms', + 'millis': 'ms', + 'us': 'us', + 'microseconds': 'us', + 'microsecond': 'us', + 'micro': 'us', + 'micros': 'us', + 'ns': 'ns', + 'nanoseconds': 'ns', + 'nano': 'ns', + 'nanos': 'ns', + 'nanosecond': 'ns'} + +# ---------------------------------------------------------------------- + + +cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: + """ return a casting of the unit represented to nanoseconds + round the fractional part of a float to our precision, p """ + cdef: + int64_t m + int p + + if unit == 'D' or unit == 'd': + m = 1000000000L * 86400 + p = 6 + elif unit == 'h': + m = 1000000000L * 3600 + p = 6 + elif unit == 'm': + m = 1000000000L * 60 + p = 6 + elif unit == 's': + m = 1000000000L + p = 6 + elif unit == 'ms': + m = 1000000L + p = 3 + elif unit == 'us': + m = 1000L + p = 0 + elif unit == 'ns' or unit is None: + m = 1L + p = 0 + else: + raise ValueError("cannot cast unit {0}".format(unit)) + + # just give me the unit back + if ts is None: + return m + + # cast the unit, multiply base/frace separately + # to avoid precision issues from float -> int + base = <int64_t> ts + frac = ts -base + if p: + frac = round(frac, p) + return <int64_t> (base *m) + <int64_t> (frac *m) + + +cdef inline parse_timedelta_string(object ts): + """ + Parse a regular format timedelta string. Return an int64_t (in ns) + or raise a ValueError on an invalid parse. + """ + + cdef: + unicode c + bint neg=0, have_dot=0, have_value=0, have_hhmmss=0 + object current_unit=None + int64_t result=0, m=0, r + list number=[], frac=[], unit=[] + + # neg : tracks if we have a leading negative for the value + # have_dot : tracks if we are processing a dot (either post hhmmss or + # inside an expression) + # have_value : track if we have at least 1 leading unit + # have_hhmmss : tracks if we have a regular format hh:mm:ss + + if len(ts) == 0 or ts in _nat_strings: + return NPY_NAT + + # decode ts if necessary + if not PyUnicode_Check(ts) and not PY3: + ts = str(ts).decode('utf-8') + + for c in ts: + + # skip whitespace / commas + if c == ' ' or c == ',': + pass + + # positive signs are ignored + elif c == '+': + pass + + # neg + elif c == '-': + + if neg or have_value or have_hhmmss: + raise ValueError("only leading negative signs are allowed") + + neg = 1 + + # number (ascii codes) + elif ord(c) >= 48 and ord(c) <= 57: + + if have_dot: + + # we found a dot, but now its just a fraction + if len(unit): + number.append(c) + have_dot = 0 + else: + frac.append(c) + + elif not len(unit): + number.append(c) + + else: + r = timedelta_from_spec(number, frac, unit) + unit, number, frac = [], [c], [] + + result += timedelta_as_neg(r, neg) + + # hh:mm:ss. + elif c == ':': + + # we flip this off if we have a leading value + if have_value: + neg = 0 + + # we are in the pattern hh:mm:ss pattern + if len(number): + if current_unit is None: + current_unit = 'h' + m = 1000000000L * 3600 + elif current_unit == 'h': + current_unit = 'm' + m = 1000000000L * 60 + elif current_unit == 'm': + current_unit = 's' + m = 1000000000L + r = <int64_t> int(''.join(number)) * m + result += timedelta_as_neg(r, neg) + have_hhmmss = 1 + else: + raise ValueError("expecting hh:mm:ss format, " + "received: {0}".format(ts)) + + unit, number = [], [] + + # after the decimal point + elif c == '.': + + if len(number) and current_unit is not None: + + # by definition we had something like + # so we need to evaluate the final field from a + # hh:mm:ss (so current_unit is 'm') + if current_unit != 'm': + raise ValueError("expected hh:mm:ss format before .") + m = 1000000000L + r = <int64_t> int(''.join(number)) * m + result += timedelta_as_neg(r, neg) + have_value = 1 + unit, number, frac = [], [], [] + + have_dot = 1 + + # unit + else: + unit.append(c) + have_value = 1 + have_dot = 0 + + # we had a dot, but we have a fractional + # value since we have an unit + if have_dot and len(unit): + r = timedelta_from_spec(number, frac, unit) + result += timedelta_as_neg(r, neg) + + # we have a dot as part of a regular format + # e.g. hh:mm:ss.fffffff + elif have_dot: + + if ((len(number) or len(frac)) and not len(unit) + and current_unit is None): + raise ValueError("no units specified") + + if len(frac) > 0 and len(frac) <= 3: + m = 10**(3 -len(frac)) * 1000L * 1000L + elif len(frac) > 3 and len(frac) <= 6: + m = 10**(6 -len(frac)) * 1000L + else: + m = 10**(9 -len(frac)) + + r = <int64_t> int(''.join(frac)) * m + result += timedelta_as_neg(r, neg) + + # we have a regular format + # we must have seconds at this point (hence the unit is still 'm') + elif current_unit is not None: + if current_unit != 'm': + raise ValueError("expected hh:mm:ss format") + m = 1000000000L + r = <int64_t> int(''.join(number)) * m + result += timedelta_as_neg(r, neg) + + # we have a last abbreviation + elif len(unit): + if len(number): + r = timedelta_from_spec(number, frac, unit) + result += timedelta_as_neg(r, neg) + else: + raise ValueError("unit abbreviation w/o a number") + + # treat as nanoseconds + # but only if we don't have anything else + else: + if have_value: + raise ValueError("have leftover units") + if len(number): + r = timedelta_from_spec(number, frac, 'ns') + result += timedelta_as_neg(r, neg) + + return result + + +cdef inline int64_t timedelta_as_neg(int64_t value, bint neg): + """ + + Parameters + ---------- + value : int64_t of the timedelta value + neg : boolean if the a negative value + """ + if neg: + return -value + return value + + +cdef inline timedelta_from_spec(object number, object frac, object unit): + """ + + Parameters + ---------- + number : a list of number digits + frac : a list of frac digits + unit : a list of unit characters + """ + cdef object n + + try: + unit = ''.join(unit) + unit = timedelta_abbrevs[unit.lower()] + except KeyError: + raise ValueError("invalid abbreviation: {0}".format(unit)) + + n = ''.join(number) + '.' + ''.join(frac) + return cast_from_unit(float(n), unit) diff --git a/setup.py b/setup.py index 365d387dc54d6..158ee9493b6ac 100755 --- a/setup.py +++ b/setup.py @@ -342,6 +342,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/sparse.pyx', 'pandas/_libs/parsers.pyx', 'pandas/_libs/tslibs/strptime.pyx', + 'pandas/_libs/tslibs/timedeltas.pyx', 'pandas/_libs/tslibs/timezones.pyx', 'pandas/_libs/tslibs/fields.pyx', 'pandas/_libs/tslibs/frequencies.pyx', @@ -486,6 +487,7 @@ def pxd(name): 'depends': tseries_depends, 'sources': ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c']}, + '_libs.tslibs.timedeltas': {'pyxfile': '_libs/tslibs/timedeltas'}, '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'}, '_libs.tslibs.fields': {'pyxfile': '_libs/tslibs/fields', 'depends': tseries_depends,
Once #17793 is merged then the remaining timedeltas functions can be moved: convert_to_timedelta64 and array_to_timedelta64. Most of Timedelta and _Timedelta can also be moved over in a follow up.
https://api.github.com/repos/pandas-dev/pandas/pulls/17827
2017-10-09T16:35:13Z
2017-10-21T14:40:42Z
2017-10-21T14:40:42Z
2017-10-27T22:11:22Z
Remove keep_tz kwarg from DatetimeIndex.to_frame
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index dae62176722e1..25897bee29845 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -915,46 +915,6 @@ def to_series(self, keep_tz=False): index=self._shallow_copy(), name=self.name) - def to_frame(self, index=True, keep_tz=False): - """ - Create a DataFrame with a column containing the DatetimeIndex. - - .. versionadded:: 0.21.0 - - Parameters - ---------- - index : boolean, default True - Set the index of the returned DataFrame - as the original DatetimeIndex. - - keep_tz : optional, defaults False. - return the data keeping the timezone. - - If keep_tz is True: - - If the timezone is not set, the resulting - Series will have a datetime64[ns] dtype. - - Otherwise the DataFrame will have an datetime64[ns, tz] dtype; - the tz will be preserved. - - If keep_tz is False: - - DataFrame will have a datetime64[ns] dtype. TZ aware - objects will have the tz removed. - - Returns - ------- - DataFrame : a DataFrame containing the original DatetimeIndex data. - """ - - from pandas import DataFrame - result = DataFrame(self._to_embed(keep_tz), columns=[self.name or 0]) - - if index: - result.index = self - return result - def _to_embed(self, keep_tz=False): """ return an array repr of this object, potentially casting to object
xref https://github.com/pandas-dev/pandas/pull/17815#discussion_r143396690
https://api.github.com/repos/pandas-dev/pandas/pulls/17826
2017-10-09T15:03:18Z
2017-10-10T07:34:37Z
2017-10-10T07:34:37Z
2017-10-10T07:40:43Z
TST: Local conftest for test_offsets.py
diff --git a/pandas/tests/tseries/conftest.py b/pandas/tests/tseries/conftest.py new file mode 100644 index 0000000000000..25446c24b28c0 --- /dev/null +++ b/pandas/tests/tseries/conftest.py @@ -0,0 +1,13 @@ +import pytest +import pandas.tseries.offsets as offsets + + +@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__]) +def offset_types(request): + return request.param + + +@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', 'US/Eastern', + 'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']) +def tz(request): + return request.param diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py index 543d21e162f04..c0e682c978610 100644 --- a/pandas/tests/tseries/test_offsets.py +++ b/pandas/tests/tseries/test_offsets.py @@ -101,15 +101,9 @@ def test_to_m8(): class Base(object): _offset = None - _offset_types = [getattr(offsets, o) for o in offsets.__all__] - timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Tokyo', 'dateutil/US/Pacific'] - @property - def offset_types(self): - return self._offset_types - def _get_offset(self, klass, value=1, normalize=False): # create instance from offset class if klass is FY5253: @@ -134,7 +128,7 @@ def _get_offset(self, klass, value=1, normalize=False): klass = klass(normalize=normalize) return klass - def test_apply_out_of_range(self): + def test_apply_out_of_range(self, tz): if self._offset is None: return @@ -153,11 +147,10 @@ def test_apply_out_of_range(self): assert result.tzinfo is None # Check tz is preserved - for tz in self.timezones: - t = Timestamp('20080101', tz=tz) - result = t + offset - assert isinstance(result, datetime) - assert t.tzinfo == result.tzinfo + t = Timestamp('20080101', tz=tz) + result = t + offset + assert isinstance(result, datetime) + assert t.tzinfo == result.tzinfo except tslib.OutOfBoundsDatetime: raise @@ -214,42 +207,39 @@ def setup_method(self, method): 'Nano': Timestamp(np_datetime64_compat( '2011-01-01T09:00:00.000000001Z'))} - def test_return_type(self): - for offset in self.offset_types: - offset = self._get_offset(offset) + def test_return_type(self, offset_types): + offset = self._get_offset(offset_types) - # make sure that we are returning a Timestamp - result = Timestamp('20080101') + offset - assert isinstance(result, Timestamp) + # make sure that we are returning a Timestamp + result = Timestamp('20080101') + offset + assert isinstance(result, Timestamp) - # make sure that we are returning NaT - assert NaT + offset is NaT - assert offset + NaT is NaT + # make sure that we are returning NaT + assert NaT + offset is NaT + assert offset + NaT is NaT - assert NaT - offset is NaT - assert (-offset).apply(NaT) is NaT + assert NaT - offset is NaT + assert (-offset).apply(NaT) is NaT - def test_offset_n(self): - for offset_klass in self.offset_types: - offset = self._get_offset(offset_klass) - assert offset.n == 1 + def test_offset_n(self, offset_types): + offset = self._get_offset(offset_types) + assert offset.n == 1 - neg_offset = offset * -1 - assert neg_offset.n == -1 + neg_offset = offset * -1 + assert neg_offset.n == -1 - mul_offset = offset * 3 - assert mul_offset.n == 3 + mul_offset = offset * 3 + assert mul_offset.n == 3 - def test_offset_freqstr(self): - for offset_klass in self.offset_types: - offset = self._get_offset(offset_klass) + def test_offset_freqstr(self, offset_types): + offset = self._get_offset(offset_types) - freqstr = offset.freqstr - if freqstr not in ('<Easter>', - "<DateOffset: kwds={'days': 1}>", - 'LWOM-SAT', ): - code = get_offset(freqstr) - assert offset.rule_code == code + freqstr = offset.freqstr + if freqstr not in ('<Easter>', + "<DateOffset: kwds={'days': 1}>", + 'LWOM-SAT', ): + code = get_offset(freqstr) + assert offset.rule_code == code def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=False): @@ -319,20 +309,19 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, else: assert result == expected_localize - def test_apply(self): + def test_apply(self, offset_types): sdt = datetime(2011, 1, 1, 9, 0) ndt = np_datetime64_compat('2011-01-01 09:00Z') - for offset in self.offset_types: - for dt in [sdt, ndt]: - expected = self.expecteds[offset.__name__] - self._check_offsetfunc_works(offset, 'apply', dt, expected) + for dt in [sdt, ndt]: + expected = self.expecteds[offset_types.__name__] + self._check_offsetfunc_works(offset_types, 'apply', dt, expected) - expected = Timestamp(expected.date()) - self._check_offsetfunc_works(offset, 'apply', dt, expected, - normalize=True) + expected = Timestamp(expected.date()) + self._check_offsetfunc_works(offset_types, 'apply', dt, expected, + normalize=True) - def test_rollforward(self): + def test_rollforward(self, offset_types): expecteds = self.expecteds.copy() # result will not be changed if the target is on the offset @@ -366,16 +355,15 @@ def test_rollforward(self): sdt = datetime(2011, 1, 1, 9, 0) ndt = np_datetime64_compat('2011-01-01 09:00Z') - for offset in self.offset_types: - for dt in [sdt, ndt]: - expected = expecteds[offset.__name__] - self._check_offsetfunc_works(offset, 'rollforward', dt, - expected) - expected = norm_expected[offset.__name__] - self._check_offsetfunc_works(offset, 'rollforward', dt, - expected, normalize=True) + for dt in [sdt, ndt]: + expected = expecteds[offset_types.__name__] + self._check_offsetfunc_works(offset_types, 'rollforward', dt, + expected) + expected = norm_expected[offset_types.__name__] + self._check_offsetfunc_works(offset_types, 'rollforward', dt, + expected, normalize=True) - def test_rollback(self): + def test_rollback(self, offset_types): expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'), 'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'), 'CustomBusinessMonthEnd': @@ -428,66 +416,62 @@ def test_rollback(self): sdt = datetime(2011, 1, 1, 9, 0) ndt = np_datetime64_compat('2011-01-01 09:00Z') - for offset in self.offset_types: - for dt in [sdt, ndt]: - expected = expecteds[offset.__name__] - self._check_offsetfunc_works(offset, 'rollback', dt, expected) + for dt in [sdt, ndt]: + expected = expecteds[offset_types.__name__] + self._check_offsetfunc_works(offset_types, 'rollback', dt, + expected) - expected = norm_expected[offset.__name__] - self._check_offsetfunc_works(offset, 'rollback', dt, expected, - normalize=True) + expected = norm_expected[offset_types.__name__] + self._check_offsetfunc_works(offset_types, 'rollback', dt, + expected, normalize=True) - def test_onOffset(self): - for offset in self.offset_types: - dt = self.expecteds[offset.__name__] - offset_s = self._get_offset(offset) - assert offset_s.onOffset(dt) - - # when normalize=True, onOffset checks time is 00:00:00 - offset_n = self._get_offset(offset, normalize=True) - assert not offset_n.onOffset(dt) - - if offset in (BusinessHour, CustomBusinessHour): - # In default BusinessHour (9:00-17:00), normalized time - # cannot be in business hour range - continue - date = datetime(dt.year, dt.month, dt.day) - assert offset_n.onOffset(date) + def test_onOffset(self, offset_types): + dt = self.expecteds[offset_types.__name__] + offset_s = self._get_offset(offset_types) + assert offset_s.onOffset(dt) + + # when normalize=True, onOffset checks time is 00:00:00 + offset_n = self._get_offset(offset_types, normalize=True) + assert not offset_n.onOffset(dt) - def test_add(self): + if offset_types in (BusinessHour, CustomBusinessHour): + # In default BusinessHour (9:00-17:00), normalized time + # cannot be in business hour range + return + date = datetime(dt.year, dt.month, dt.day) + assert offset_n.onOffset(date) + + def test_add(self, offset_types, tz): dt = datetime(2011, 1, 1, 9, 0) - for offset in self.offset_types: - offset_s = self._get_offset(offset) - expected = self.expecteds[offset.__name__] + offset_s = self._get_offset(offset_types) + expected = self.expecteds[offset_types.__name__] - result_dt = dt + offset_s - result_ts = Timestamp(dt) + offset_s - for result in [result_dt, result_ts]: - assert isinstance(result, Timestamp) - assert result == expected + result_dt = dt + offset_s + result_ts = Timestamp(dt) + offset_s + for result in [result_dt, result_ts]: + assert isinstance(result, Timestamp) + assert result == expected - for tz in self.timezones: - expected_localize = expected.tz_localize(tz) - result = Timestamp(dt, tz=tz) + offset_s - assert isinstance(result, Timestamp) - assert result == expected_localize + expected_localize = expected.tz_localize(tz) + result = Timestamp(dt, tz=tz) + offset_s + assert isinstance(result, Timestamp) + assert result == expected_localize - # normalize=True - offset_s = self._get_offset(offset, normalize=True) - expected = Timestamp(expected.date()) + # normalize=True + offset_s = self._get_offset(offset_types, normalize=True) + expected = Timestamp(expected.date()) - result_dt = dt + offset_s - result_ts = Timestamp(dt) + offset_s - for result in [result_dt, result_ts]: - assert isinstance(result, Timestamp) - assert result == expected + result_dt = dt + offset_s + result_ts = Timestamp(dt) + offset_s + for result in [result_dt, result_ts]: + assert isinstance(result, Timestamp) + assert result == expected - for tz in self.timezones: - expected_localize = expected.tz_localize(tz) - result = Timestamp(dt, tz=tz) + offset_s - assert isinstance(result, Timestamp) - assert result == expected_localize + expected_localize = expected.tz_localize(tz) + result = Timestamp(dt, tz=tz) + offset_s + assert isinstance(result, Timestamp) + assert result == expected_localize def test_pickle_v0_15_2(self): offsets = {'DateOffset': DateOffset(years=1),
- [x] closes xxxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Continued from #17622 per comments. Looks like `offset_types` property can be replaced entirely with a fixture.
https://api.github.com/repos/pandas-dev/pandas/pulls/17825
2017-10-09T07:11:13Z
2017-10-09T12:09:02Z
2017-10-09T12:09:02Z
2017-10-27T06:38:03Z
BUG: Coerce to numeric despite uint64 conflict
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 595fab9e18ea4..bf73c675697f4 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -828,6 +828,7 @@ Conversion - Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`) - Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`) - Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). +- Bug in :meth:`to_numeric` in which elements were not always being coerced to numeric when ``errors='coerce'`` (:issue:`17007`, :issue:`17125`) Indexing ^^^^^^^^ diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 7990fd3b1b5c9..b0a64e1ccc225 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -165,20 +165,8 @@ cdef class Seen(object): two conflict cases was also detected. However, we are trying to force conversion to a numeric dtype. """ - if self.uint_ and (self.null_ or self.sint_): - if not self.coerce_numeric: - return True - - if self.null_: - msg = ("uint64 array detected, and such an " - "array cannot contain NaN.") - else: # self.sint_ = 1 - msg = ("uint64 and negative values detected. " - "Cannot safely return a numeric array " - "without truncating data.") - - raise ValueError(msg) - return False + return (self.uint_ and (self.null_ or self.sint_) + and not self.coerce_numeric) cdef inline saw_null(self): """ @@ -1103,10 +1091,17 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, seen.saw_int(val) if val >= 0: - uints[i] = val + if val <= oUINT64_MAX: + uints[i] = val + else: + seen.float_ = True if val <= oINT64_MAX: ints[i] = val + + if seen.sint_ and seen.uint_: + seen.float_ = True + elif util.is_bool_object(val): floats[i] = uints[i] = ints[i] = bools[i] = val seen.bool_ = True @@ -1154,6 +1149,8 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, uints[i] = as_int if as_int <= oINT64_MAX: ints[i] = as_int + + seen.float_ = seen.float_ or (seen.uint_ and seen.sint_) else: seen.float_ = True except (TypeError, ValueError) as e: diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 857f7a283aa95..70273f9e999cf 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -39,6 +39,11 @@ from pandas.util import testing as tm +@pytest.fixture(params=[True, False], ids=lambda val: str(val)) +def coerce(request): + return request.param + + def test_is_sequence(): is_seq = inference.is_sequence assert (is_seq((1, 2))) @@ -340,44 +345,38 @@ def test_convert_numeric_uint64(self): exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) - def test_convert_numeric_uint64_nan(self): - msg = 'uint64 array detected' - cases = [(np.array([2**63, np.nan], dtype=object), set()), - (np.array([str(2**63), np.nan], dtype=object), set()), - (np.array([np.nan, 2**63], dtype=object), set()), - (np.array([np.nan, str(2**63)], dtype=object), set()), - (np.array([2**63, 2**63 + 1], dtype=object), set([2**63])), - (np.array([str(2**63), str(2**63 + 1)], - dtype=object), set([2**63]))] - - for coerce in (True, False): - for arr, na_values in cases: - if coerce: - with tm.assert_raises_regex(ValueError, msg): - lib.maybe_convert_numeric(arr, na_values, - coerce_numeric=coerce) - else: - tm.assert_numpy_array_equal(lib.maybe_convert_numeric( - arr, na_values), arr) - - def test_convert_numeric_int64_uint64(self): - msg = 'uint64 and negative values detected' - cases = [np.array([2**63, -1], dtype=object), - np.array([str(2**63), -1], dtype=object), - np.array([str(2**63), str(-1)], dtype=object), - np.array([-1, 2**63], dtype=object), - np.array([-1, str(2**63)], dtype=object), - np.array([str(-1), str(2**63)], dtype=object)] - - for coerce in (True, False): - for case in cases: - if coerce: - with tm.assert_raises_regex(ValueError, msg): - lib.maybe_convert_numeric(case, set(), - coerce_numeric=coerce) - else: - tm.assert_numpy_array_equal(lib.maybe_convert_numeric( - case, set()), case) + @pytest.mark.parametrize("arr", [ + np.array([2**63, np.nan], dtype=object), + np.array([str(2**63), np.nan], dtype=object), + np.array([np.nan, 2**63], dtype=object), + np.array([np.nan, str(2**63)], dtype=object)]) + def test_convert_numeric_uint64_nan(self, coerce, arr): + expected = arr.astype(float) if coerce else arr.copy() + result = lib.maybe_convert_numeric(arr, set(), + coerce_numeric=coerce) + tm.assert_almost_equal(result, expected) + + def test_convert_numeric_uint64_nan_values(self, coerce): + arr = np.array([2**63, 2**63 + 1], dtype=object) + na_values = set([2**63]) + + expected = (np.array([np.nan, 2**63 + 1], dtype=float) + if coerce else arr.copy()) + result = lib.maybe_convert_numeric(arr, na_values, + coerce_numeric=coerce) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("case", [ + np.array([2**63, -1], dtype=object), + np.array([str(2**63), -1], dtype=object), + np.array([str(2**63), str(-1)], dtype=object), + np.array([-1, 2**63], dtype=object), + np.array([-1, str(2**63)], dtype=object), + np.array([str(-1), str(2**63)], dtype=object)]) + def test_convert_numeric_int64_uint64(self, case, coerce): + expected = case.astype(float) if coerce else case.copy() + result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce) + tm.assert_almost_equal(result, expected) def test_maybe_convert_objects_uint64(self): # see gh-4471 diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py index 1d13ba93ba759..b306dba0be7f1 100644 --- a/pandas/tests/tools/test_numeric.py +++ b/pandas/tests/tools/test_numeric.py @@ -381,3 +381,28 @@ def test_downcast_limits(self): for dtype, downcast, min_max in dtype_downcast_min_max: series = pd.to_numeric(pd.Series(min_max), downcast=downcast) assert series.dtype == dtype + + def test_coerce_uint64_conflict(self): + # see gh-17007 and gh-17125 + # + # Still returns float despite the uint64-nan conflict, + # which would normally force the casting to object. + df = pd.DataFrame({"a": [200, 300, "", "NaN", 30000000000000000000]}) + expected = pd.Series([200, 300, np.nan, np.nan, + 30000000000000000000], dtype=float, name="a") + result = to_numeric(df["a"], errors="coerce") + tm.assert_series_equal(result, expected) + + s = pd.Series(["12345678901234567890", "1234567890", "ITEM"]) + expected = pd.Series([12345678901234567890, + 1234567890, np.nan], dtype=float) + result = to_numeric(s, errors="coerce") + tm.assert_series_equal(result, expected) + + # For completeness, check against "ignore" and "raise" + result = to_numeric(s, errors="ignore") + tm.assert_series_equal(result, s) + + msg = "Unable to parse string" + with tm.assert_raises_regex(ValueError, msg): + to_numeric(s, errors="raise")
Previously, `to_numeric` was not coercing elements to numeric if the conversion was going to be lossy (e.g. `uint64` combined with `nan`), even when `errors='coerce'`. Now the `errors` parameter takes precedence. Closes #17007. Closes #17125.
https://api.github.com/repos/pandas-dev/pandas/pulls/17823
2017-10-09T03:09:39Z
2017-10-09T22:57:36Z
2017-10-09T22:57:36Z
2017-10-10T03:20:12Z
TST: Add the default separator test for PythonParser
diff --git a/doc/source/io.rst b/doc/source/io.rst index e6b51b7e2f45c..a7f8d9da15328 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -84,7 +84,8 @@ filepath_or_buffer : various sep : str, defaults to ``','`` for :func:`read_csv`, ``\t`` for :func:`read_table` Delimiter to use. If sep is ``None``, the C engine cannot automatically detect the separator, but the Python parsing engine can, meaning the latter will be - used automatically. In addition, separators longer than 1 character and + used and automatically detect the separator by Python's builtin sniffer tool, + :class:`python:csv.Sniffer`. In addition, separators longer than 1 character and different from ``'\s+'`` will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``. diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9c76d3126890c..867974500de9c 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -311,7 +311,8 @@ _sep_doc = r"""sep : str, default {default} Delimiter to use. If sep is None, the C engine cannot automatically detect the separator, but the Python parsing engine can, meaning the latter will - be used automatically. In addition, separators longer than 1 character and + be used and automatically detect the separator by Python's builtin sniffer + tool, ``csv.Sniffer``. In addition, separators longer than 1 character and different from ``'\s+'`` will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'`` diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py index a0784d3aeae2d..0584a4d6fa3dc 100644 --- a/pandas/tests/io/parser/python_parser_only.py +++ b/pandas/tests/io/parser/python_parser_only.py @@ -19,6 +19,16 @@ class PythonParserTests(object): + def test_default_separator(self): + # GH17333 + # csv.Sniffer in Python treats 'o' as separator. + text = 'aob\n1o2\n3o4' + expected = DataFrame({'a': [1, 3], 'b': [2, 4]}) + + result = self.read_csv(StringIO(text), sep=None) + + tm.assert_frame_equal(result, expected) + def test_invalid_skipfooter(self): text = "a\n1\n2"
- [x] closes #17333 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17822
2017-10-09T02:41:49Z
2017-10-11T15:21:27Z
2017-10-11T15:21:27Z
2017-10-11T15:21:27Z
BUG: Fix default encoding for CSVFormatter.save
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 8b2c4d16f4e1a..ad184d0a6a792 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -334,6 +334,7 @@ I/O - Bug in :func:`read_csv` in which memory management issues in exception handling, under certain conditions, would cause the interpreter to segfault (:issue:`14696`, :issue:`16798`). - Bug in :func:`read_csv` when called with ``low_memory=False`` in which a CSV with at least one column > 2GB in size would incorrectly raise a ``MemoryError`` (:issue:`16798`). - Bug in :func:`read_csv` when called with a single-element list ``header`` would return a ``DataFrame`` of all NaN values (:issue:`7757`) +- Bug in :meth:`DataFrame.to_csv` defaulting to 'ascii' encoding in Python 3, instead of 'utf-8' (:issue:`17097`) - Bug in :func:`read_stata` where value labels could not be read when using an iterator (:issue:`16923`) - Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 733fd3bd39b52..73837efd633fe 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1573,12 +1573,20 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', def save(self): # create the writer & save + if self.encoding is None: + if compat.PY2: + encoding = 'ascii' + else: + encoding = 'utf-8' + else: + encoding = self.encoding + if hasattr(self.path_or_buf, 'write'): f = self.path_or_buf close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, - encoding=self.encoding, + encoding=encoding, compression=self.compression) close = True @@ -1588,11 +1596,11 @@ def save(self): doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) - if self.encoding is not None: - writer_kwargs['encoding'] = self.encoding - self.writer = UnicodeWriter(f, **writer_kwargs) - else: + if encoding == 'ascii': self.writer = csv.writer(f, **writer_kwargs) + else: + writer_kwargs['encoding'] = encoding + self.writer = UnicodeWriter(f, **writer_kwargs) self._save() diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 1073fbcef5aec..b82d9895ddcf5 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + from pandas import DataFrame import numpy as np import pandas as pd @@ -6,6 +8,21 @@ class TestToCSV(object): + def test_to_csv_defualt_encoding(self): + # GH17097 + df = DataFrame({'col': [u"AAAAA", u"ÄÄÄÄÄ", u"ßßßßß", u"聞聞聞聞聞"]}) + + with tm.ensure_clean('test.csv') as path: + # the default to_csv encoding in Python 2 is ascii, and that in + # Python 3 is uft-8. + if pd.compat.PY2: + # the encoding argument parameter should be utf-8 + with tm.assert_raises_regex(UnicodeEncodeError, 'ascii'): + df.to_csv(path) + else: + df.to_csv(path) + tm.assert_frame_equal(pd.read_csv(path, index_col=0), df) + def test_to_csv_quotechar(self): df = DataFrame({'col': [1, 2]}) expected = """\
- [x] closes #17097 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17821
2017-10-09T01:43:54Z
2017-10-11T15:19:57Z
2017-10-11T15:19:56Z
2017-10-11T15:19:57Z
DEPR: Deprecate tupleize_cols in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index 0aa4ea72e3b13..08d00138b7cd8 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -343,6 +343,10 @@ dialect : str or :class:`python:csv.Dialect` instance, default ``None`` override values, a ParserWarning will be issued. See :class:`python:csv.Dialect` documentation for more details. tupleize_cols : boolean, default ``False`` + .. deprecated:: 0.21.0 + + This argument will be removed and will always convert to MultiIndex + Leave a list of tuples on columns as is (default is to convert to a MultiIndex on the columns). diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 595fab9e18ea4..f86847d8b8274 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -717,6 +717,7 @@ Deprecations - :func:`read_excel()` has deprecated ``sheetname`` in favor of ``sheet_name`` for consistency with ``.to_excel()`` (:issue:`10559`). - :func:`read_excel()` has deprecated ``parse_cols`` in favor of ``usecols`` for consistency with :func:`read_csv` (:issue:`4988`) +- :func:`read_csv()` has deprecated the ``tupleize_cols`` argument. Column tuples will always be converted to a ``MultiIndex`` (:issue:`17060`) - The ``convert`` parameter has been deprecated in the ``.take()`` method, as it was not being respected (:issue:`16948`) - ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`). - :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`). diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c8b2987d591ef..4b6c358ea7dcd 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -260,8 +260,11 @@ override values, a ParserWarning will be issued. See csv.Dialect documentation for more details. tupleize_cols : boolean, default False + .. deprecated:: 0.21.0 + This argument will be removed and will always convert to MultiIndex + Leave a list of tuples on columns as is (default is to convert to - a Multi Index on the columns) + a MultiIndex on the columns) error_bad_lines : boolean, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. @@ -510,6 +513,7 @@ def _read(filepath_or_buffer, kwds): 'buffer_lines': None, 'error_bad_lines': True, 'warn_bad_lines': True, + 'tupleize_cols': False, 'float_precision': None } @@ -529,6 +533,7 @@ def _read(filepath_or_buffer, kwds): 'buffer_lines', 'compact_ints', 'use_unsigned', + 'tupleize_cols', } @@ -962,6 +967,9 @@ def _clean_options(self, options, engine): if arg == 'as_recarray': msg += ' Please call pd.to_csv(...).to_records() instead.' + elif arg == 'tupleize_cols': + msg += (' Column tuples will then ' + 'always be converted to MultiIndex') if result.get(arg, parser_default) != parser_default: depr_warning += msg + '\n\n' diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 6a4b1686a31e2..a61a157181253 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -555,8 +555,12 @@ def _make_frame(names=None): # tupleize_cols=True and index=False df = _make_frame(True) df.to_csv(path, tupleize_cols=True, index=False) - result = read_csv( - path, header=0, tupleize_cols=True, index_col=None) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = read_csv(path, header=0, + tupleize_cols=True, + index_col=None) result.columns = df.columns assert_frame_equal(df, result) @@ -576,8 +580,11 @@ def _make_frame(names=None): # column & index are multi-index (compatibility) df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) df.to_csv(path, tupleize_cols=True) - result = read_csv(path, header=0, index_col=[ - 0, 1], tupleize_cols=True) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = read_csv(path, header=0, index_col=[0, 1], + tupleize_cols=True) result.columns = df.columns assert_frame_equal(df, result) diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py index 50ae4dae541ac..ff3beb70b774f 100644 --- a/pandas/tests/io/parser/header.py +++ b/pandas/tests/io/parser/header.py @@ -105,13 +105,13 @@ def test_header_multi_index(self): R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2 """ - df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[ - 0, 1], tupleize_cols=False) + df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], + index_col=[0, 1]) tm.assert_frame_equal(df, expected) # skipping lines in the header - df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[ - 0, 1], tupleize_cols=False) + df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], + index_col=[0, 1]) tm.assert_frame_equal(df, expected) # INVALID OPTIONS @@ -121,25 +121,22 @@ def test_header_multi_index(self): FutureWarning, check_stacklevel=False): pytest.raises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3], - index_col=[0, 1], as_recarray=True, - tupleize_cols=False) + index_col=[0, 1], as_recarray=True) # names pytest.raises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3], - index_col=[0, 1], names=['foo', 'bar'], - tupleize_cols=False) + index_col=[0, 1], names=['foo', 'bar']) # usecols pytest.raises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3], - index_col=[0, 1], usecols=['foo', 'bar'], - tupleize_cols=False) + index_col=[0, 1], usecols=['foo', 'bar']) # non-numeric index_col pytest.raises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3], - index_col=['foo', 'bar'], tupleize_cols=False) + index_col=['foo', 'bar']) def test_header_multiindex_common_format(self): diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py index c3dc91b3f188c..267b589ee91f4 100644 --- a/pandas/tests/io/parser/python_parser_only.py +++ b/pandas/tests/io/parser/python_parser_only.py @@ -232,9 +232,7 @@ def test_none_delimiter(self): result = self.read_csv(StringIO(data), header=0, sep=None, error_bad_lines=False, - warn_bad_lines=True, - engine='python', - tupleize_cols=True) + warn_bad_lines=True) tm.assert_frame_equal(result, expected) def test_skipfooter_bad_row(self): diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 5d248f2fef59c..2e73ce6aa19b0 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -127,32 +127,25 @@ def read(self): class TestDeprecatedFeatures(object): - def test_deprecated_args(self): - data = '1,2,3' - - # deprecated arguments with non-default values - deprecated = { - 'as_recarray': True, - 'buffer_lines': True, - 'compact_ints': True, - 'use_unsigned': True, - 'skip_footer': 1, - } - - engines = 'c', 'python' - - for engine in engines: - for arg, non_default_val in deprecated.items(): - if engine == 'c' and arg == 'skip_footer': - # unsupported --> exception is raised - continue - - if engine == 'python' and arg == 'buffer_lines': - # unsupported --> exception is raised - continue - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - kwargs = {arg: non_default_val} - read_csv(StringIO(data), engine=engine, - **kwargs) + @pytest.mark.parametrize("engine", ["c", "python"]) + @pytest.mark.parametrize("kwargs", [{"as_recarray": True}, + {"buffer_lines": True}, + {"compact_ints": True}, + {"use_unsigned": True}, + {"tupleize_cols": True}, + {"skip_footer": 1}]) + def test_deprecated_args(self, engine, kwargs): + data = "1,2,3" + arg, _ = list(kwargs.items())[0] + + if engine == "c" and arg == "skip_footer": + # unsupported --> exception is raised + return + + if engine == "python" and arg == "buffer_lines": + # unsupported --> exception is raised + return + + with tm.assert_produces_warning( + FutureWarning, check_stacklevel=False): + read_csv(StringIO(data), engine=engine, **kwargs)
xref <a href="https://github.com/pandas-dev/pandas/pull/17060#issuecomment-317692667">#17060 (comment)</a>
https://api.github.com/repos/pandas-dev/pandas/pulls/17820
2017-10-08T23:40:01Z
2017-10-09T19:17:19Z
2017-10-09T19:17:19Z
2017-10-09T19:27:16Z
ERR: Raise ValueError when week is passed in to_datetime format witho…
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 1e9c402dac73e..033e428bcbbb0 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -701,6 +701,7 @@ Other API Changes - :func:`Series.argmin` and :func:`Series.argmax` will now raise a ``TypeError`` when used with ``object`` dtypes, instead of a ``ValueError`` (:issue:`13595`) - :class:`Period` is now immutable, and will now raise an ``AttributeError`` when a user tries to assign a new value to the ``ordinal`` or ``freq`` attributes (:issue:`17116`). - :func:`to_datetime` when passed a tz-aware ``origin=`` kwarg will now raise a more informative ``ValueError`` rather than a ``TypeError`` (:issue:`16842`) +- :func:`to_datetime` now raises a ``ValueError`` when format includes ``%W`` or ``%U`` without also including day of the week and calendar year (:issue:`16774`) - Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`) - Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`) - Restricted DateOffset keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`). diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 20b24d6be9a58..59a7376280da0 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -83,6 +83,16 @@ def array_strptime(ndarray[object] values, object fmt, assert is_raise or is_ignore or is_coerce + if fmt is not None: + if '%W' in fmt or '%U' in fmt: + if '%Y' not in fmt and '%y' not in fmt: + raise ValueError("Cannot use '%W' or '%U' without " + "day and year") + if ('%A' not in fmt and '%a' not in fmt and '%w' not + in fmt): + raise ValueError("Cannot use '%W' or '%U' without " + "day and year") + global _TimeRE_cache, _regex_cache with _cache_lock: if _getlang() != _TimeRE_cache.locale_time.lang: diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index b8ce1f0af6ea8..330ec9f357655 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -372,6 +372,20 @@ def test_datetime_invalid_datatype(self): with pytest.raises(TypeError): pd.to_datetime(pd.to_datetime) + @pytest.mark.parametrize('date, format', + [('2017-20', '%Y-%W'), + ('20 Sunday', '%W %A'), + ('20 Sun', '%W %a'), + ('2017-21', '%Y-%U'), + ('20 Sunday', '%U %A'), + ('20 Sun', '%U %a')]) + def test_week_without_day_and_calendar_year(self, date, format): + # GH16774 + + msg = "Cannot use '%W' or '%U' without day and year" + with tm.assert_raises_regex(ValueError, msg): + pd.to_datetime(date, format=format) + class TestToDatetimeUnit(object):
…ut day or year (#16774) - [x] closes #16774 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17819
2017-10-08T20:28:55Z
2017-10-14T18:53:05Z
2017-10-14T18:53:05Z
2017-10-14T19:48:55Z
Typo in error message
diff --git a/pandas/core/window.py b/pandas/core/window.py index 869296503225d..e3a091573aa2f 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -1138,8 +1138,8 @@ def _validate_freq(self): try: return to_offset(self.window) except (TypeError, ValueError): - raise ValueError("passed window {0} in not " - "compat with a datetimelike " + raise ValueError("passed window {0} is not " + "compatible with a datetimelike " "index".format(self.window)) _agg_doc = dedent("""
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17817
2017-10-08T07:08:05Z
2017-10-08T11:07:25Z
2017-10-08T11:07:25Z
2017-10-08T11:07:31Z
CLN: Use pandas.core.common for None checks
diff --git a/pandas/core/common.py b/pandas/core/common.py index e0dc420bc53f8..7b96700313012 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -223,17 +223,36 @@ def _mut_exclusive(**kwargs): def _not_none(*args): + """Returns a generator consisting of the arguments that are not None""" return (arg for arg in args if arg is not None) def _any_none(*args): + """Returns a boolean indicating if any argument is None""" for arg in args: if arg is None: return True return False +def _all_none(*args): + """Returns a boolean indicating if all arguments are None""" + for arg in args: + if arg is not None: + return False + return True + + +def _any_not_none(*args): + """Returns a boolean indicating if any argument is not None""" + for arg in args: + if arg is not None: + return True + return False + + def _all_not_none(*args): + """Returns a boolean indicating if all arguments are not None""" for arg in args: if arg is None: return False @@ -241,6 +260,7 @@ def _all_not_none(*args): def _count_not_none(*args): + """Returns the count of arguments that are not None""" return sum(x is not None for x in args) @@ -459,13 +479,6 @@ def _apply_if_callable(maybe_callable, obj, **kwargs): return maybe_callable -def _all_none(*args): - for arg in args: - if arg is not None: - return False - return True - - def _where_compat(mask, arr1, arr2): if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE: new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8')) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5fe5718d46bcb..90e91e8cd6180 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -28,12 +28,10 @@ from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame - -from pandas.core.common import (_all_not_none, - _values_from_object, - _maybe_box_datetimelike, - SettingWithCopyError, SettingWithCopyWarning, - AbstractMethodError) +from pandas.core.common import (_all_not_none, _count_not_none, + _maybe_box_datetimelike, _values_from_object, + AbstractMethodError, SettingWithCopyError, + SettingWithCopyWarning) from pandas.core.base import PandasObject, SelectionMixin from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -3250,7 +3248,7 @@ def filter(self, items=None, like=None, regex=None, axis=None): """ import re - nkw = sum([x is not None for x in [items, like, regex]]) + nkw = _count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index ccaf90b4482a7..3b7d3685db3b7 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -39,7 +39,8 @@ from pandas.core.dtypes.missing import isna, notna, _maybe_fill from pandas.core.common import (_values_from_object, AbstractMethodError, - _default_index) + _default_index, _not_none, _get_callable_name, + _asarray_tuplesafe) from pandas.core.base import (PandasObject, SelectionMixin, GroupByError, DataError, SpecificationError) @@ -60,7 +61,6 @@ from pandas.util._validators import validate_kwargs import pandas.core.algorithms as algorithms -import pandas.core.common as com from pandas.core.config import option_context from pandas.plotting._core import boxplot_frame_groupby @@ -877,10 +877,9 @@ def _concat_objects(self, keys, values, not_indexed_same=False): def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing - for v in values: - if v is not None: - ax = v._get_axis(self.axis) - ax._reset_identity() + for v in _not_none(*values): + ax = v._get_axis(self.axis) + ax._reset_identity() return values if not not_indexed_same: @@ -1806,7 +1805,7 @@ def apply(self, f, data, axis=0): group_keys = self._get_group_keys() # oh boy - f_name = com._get_callable_name(f) + f_name = _get_callable_name(f) if (f_name not in _plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: @@ -2533,7 +2532,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = self.obj[self.name] elif isinstance(self.grouper, (list, tuple)): - self.grouper = com._asarray_tuplesafe(self.grouper) + self.grouper = _asarray_tuplesafe(self.grouper) # a passed Categorical elif is_categorical_dtype(self.grouper): @@ -2739,7 +2738,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if not any_callable and not all_in_columns_index and \ not any_arraylike and not any_groupers and \ match_axis_length and level is None: - keys = [com._asarray_tuplesafe(keys)] + keys = [_asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: @@ -3028,7 +3027,7 @@ def _aggregate_multiple_funcs(self, arg, _level): columns.append(f) else: # protect against callables without names - columns.append(com._get_callable_name(f)) + columns.append(_get_callable_name(f)) arg = lzip(columns, arg) results = {} @@ -3686,14 +3685,13 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): key_names = self.grouper.names # GH12824. - def first_non_None_value(values): + def first_not_none(values): try: - v = next(v for v in values if v is not None) + return next(_not_none(*values)) except StopIteration: return None - return v - v = first_non_None_value(values) + v = first_not_none(values) if v is None: # GH9684. If all values are None, then this will throw an error. @@ -3726,7 +3724,7 @@ def first_non_None_value(values): key_index = None # make Nones an empty object - v = first_non_None_value(values) + v = first_not_none(values) if v is None: return DataFrame() elif isinstance(v, NDFrame): diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index d20a0b0a2c73d..08cda8a06ba64 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -123,7 +123,7 @@ def _get_consensus_names(indexes): # find the non-none names, need to tupleify to make # the set hashable, then reverse on return consensus_names = set([tuple(i.names) for i in indexes - if any(n is not None for n in i.names)]) + if com._any_not_none(*i.names)]) if len(consensus_names) == 1: return list(list(consensus_names)[0]) return [None] * indexes[0].nlevels diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index df0e963e7628d..c3343f149005c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -42,16 +42,15 @@ needs_i8_conversion, is_iterator, is_list_like, is_scalar) -from pandas.core.common import (is_bool_indexer, - _values_from_object, - _asarray_tuplesafe) +from pandas.core.common import (is_bool_indexer, _values_from_object, + _asarray_tuplesafe, _not_none, + _index_labels_to_array) from pandas.core.base import PandasObject, IndexOpsMixin import pandas.core.base as base from pandas.util._decorators import ( Appender, Substitution, cache_readonly, deprecate_kwarg) from pandas.core.indexes.frozen import FrozenList -import pandas.core.common as com import pandas.core.dtypes.concat as _concat import pandas.core.missing as missing import pandas.core.algorithms as algos @@ -3168,8 +3167,8 @@ def _join_multi(self, other, how, return_indexers=True): other_is_mi = isinstance(other, MultiIndex) # figure out join names - self_names = [n for n in self.names if n is not None] - other_names = [n for n in other.names if n is not None] + self_names = _not_none(*self.names) + other_names = _not_none(*other.names) overlap = list(set(self_names) & set(other_names)) # need at least 1 in common, but not more than 1 @@ -3714,7 +3713,7 @@ def drop(self, labels, errors='raise'): ------- dropped : Index """ - labels = com._index_labels_to_array(labels) + labels = _index_labels_to_array(labels) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 06b208b4d174e..4cc59f5297058 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -21,7 +21,8 @@ is_scalar) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.errors import PerformanceWarning, UnsortedIndexError -from pandas.core.common import (_values_from_object, +from pandas.core.common import (_any_not_none, + _values_from_object, is_bool_indexer, is_null_slice, is_true_slices) @@ -509,7 +510,7 @@ def _format_attrs(self): max_seq_items=False)), ('labels', ibase.default_pprint(self._labels, max_seq_items=False))] - if not all(name is None for name in self.names): + if _any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', ibase.default_pprint(self.sortorder))) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9f7bac641ae08..b2e55d4826670 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -12,6 +12,7 @@ from pandas import compat from pandas.compat import lrange, range from pandas.compat.numpy import function as nv +from pandas.core.common import _all_none from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat @@ -83,7 +84,7 @@ def _ensure_int(value, field): return new_value - if start is None and stop is None and step is None: + if _all_none(start, stop, step): msg = "RangeIndex(...) must be called with integers" raise TypeError(msg) elif start is None: diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1f22cb49d0196..997dd9c8e0f67 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -15,13 +15,13 @@ is_string_like, is_scalar) from pandas.core.dtypes.missing import notna -import pandas.core.common as com import pandas.core.ops as ops import pandas.core.missing as missing from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) from pandas.compat.numpy import function as nv -from pandas.core.common import _try_sort, _default_index +from pandas.core.common import (_try_sort, _default_index, _all_not_none, + _any_not_none, _apply_if_callable) from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -166,7 +166,7 @@ def _init_data(self, data, copy, dtype, **kwargs): axes = None if isinstance(data, BlockManager): - if any(x is not None for x in passed_axes): + if _any_not_none(*passed_axes): axes = [x if x is not None else y for x, y in zip(passed_axes, data.axes)] mgr = data @@ -178,7 +178,7 @@ def _init_data(self, data, copy, dtype, **kwargs): mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy) copy = False dtype = None - elif is_scalar(data) and all(x is not None for x in passed_axes): + elif is_scalar(data) and _all_not_none(*passed_axes): values = cast_scalar_to_array([len(x) for x in passed_axes], data, dtype=dtype) mgr = self._init_matrix(values, passed_axes, dtype=values.dtype, @@ -279,7 +279,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): return cls(**d) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = _apply_if_callable(key, self) if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) @@ -594,7 +594,7 @@ def _box_item_values(self, key, values): return self._constructor_sliced(values, **d) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = _apply_if_callable(key, self) shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( @@ -616,7 +616,9 @@ def __setitem__(self, key, value): def _unpickle_panel_compat(self, state): # pragma: no cover "Unpickle the panel" - _unpickle = com._unpickle_array + from pandas.io.pickle import _unpickle_array + + _unpickle = _unpickle_array vals, items, major, minor = state items = _unpickle(items) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 4040c65136617..4c3c662c87373 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -241,7 +241,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, raise ValueError('No objects to concatenate') if keys is None: - objs = [obj for obj in objs if obj is not None] + objs = list(com._not_none(*objs)) else: # #1649 clean_keys = [] diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 6bb6988a7442a..e409090e76944 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1550,4 +1550,4 @@ def _should_fill(lname, rname): def _any(x): - return x is not None and len(x) > 0 and any([y is not None for y in x]) + return x is not None and com._any_not_none(*x) diff --git a/pandas/core/series.py b/pandas/core/series.py index 8499f8b55d2d0..76baa89f165d4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -45,7 +45,8 @@ SettingWithCopyError, _maybe_box_datetimelike, _dict_compat, - standardize_mapping) + standardize_mapping, + _any_none) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, Float64Index, _ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices @@ -713,7 +714,7 @@ def _get_with(self, key): def _get_values_tuple(self, key): # mpl hackaround - if any(k is None for k in key): + if _any_none(*key): return self._get_values(key) if not isinstance(self.index, MultiIndex): diff --git a/pandas/core/window.py b/pandas/core/window.py index e3a091573aa2f..5143dddc5e866 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -32,7 +32,7 @@ from pandas.core.base import (PandasObject, SelectionMixin, GroupByMixin) -import pandas.core.common as com +from pandas.core.common import _asarray_tuplesafe, _count_not_none import pandas._libs.window as _window from pandas import compat @@ -535,7 +535,7 @@ def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): - return com._asarray_tuplesafe(window).astype(float) + return _asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig @@ -1972,8 +1972,7 @@ def dataframe_from_int_dict(data, frame_template): def _get_center_of_mass(com, span, halflife, alpha): - valid_count = len([x for x in [com, span, halflife, alpha] - if x is not None]) + valid_count = _count_not_none(com, span, halflife, alpha) if valid_count > 1: raise ValueError("com, span, halflife, and alpha " "are mutually exclusive") diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 9e888c38edaa7..af24537cabf90 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -10,6 +10,7 @@ from pandas.compat import reduce from pandas.io.formats.css import CSSResolver, CSSWarning from pandas.io.formats.printing import pprint_thing +from pandas.core.common import _any_not_none from pandas.core.dtypes.common import is_float import pandas._libs.lib as lib from pandas import Index, MultiIndex, PeriodIndex @@ -548,8 +549,7 @@ def _format_hierarchical_rows(self): self.rowcounter += 1 # if index labels are not empty go ahead and dump - if (any(x is not None for x in index_labels) and - self.header is not False): + if _any_not_none(*index_labels) and self.header is not False: for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 4608c3fe0ceb8..c5d4a0ecf44ab 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -26,6 +26,7 @@ is_list_like) from pandas.core.dtypes.generic import ABCSparseArray from pandas.core.base import PandasObject +from pandas.core.common import _any_not_none, sentinel_factory from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat from pandas.compat import (StringIO, lzip, range, map, zip, u, @@ -36,7 +37,6 @@ _stringify_path) from pandas.io.formats.printing import adjoin, justify, pprint_thing from pandas.io.formats.common import get_level_lengths -import pandas.core.common as com import pandas._libs.lib as lib from pandas._libs.tslib import (iNaT, Timestamp, Timedelta, format_array_from_datetime) @@ -1257,7 +1257,7 @@ def _column_header(): if self.fmt.sparsify: # GH3547 - sentinel = com.sentinel_factory() + sentinel = sentinel_factory() else: sentinel = None levels = self.columns.format(sparsify=sentinel, adjoin=False, @@ -1426,7 +1426,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): if self.fmt.sparsify: # GH3547 - sentinel = com.sentinel_factory() + sentinel = sentinel_factory() levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) @@ -2352,7 +2352,7 @@ def single_row_table(row): # pragma: no cover def _has_names(index): if isinstance(index, MultiIndex): - return any([x is not None for x in index.names]) + return _any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d7677e3642c26..2e87b3b925edd 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -27,7 +27,7 @@ from pandas.compat import range from pandas.core.config import get_option from pandas.core.generic import _shared_docs -import pandas.core.common as com +from pandas.core.common import _any_not_none, sentinel_factory from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice from pandas.util._decorators import Appender try: @@ -259,8 +259,7 @@ def format_attr(pair): row_es.append(es) head.append(row_es) - if self.data.index.names and not all(x is None - for x in self.data.index.names): + if self.data.index.names and _any_not_none(*self.data.index.names): index_header_row = [] for c, name in enumerate(self.data.index.names): @@ -1204,7 +1203,7 @@ def _get_level_lengths(index): Result is a dictionary of (level, inital_position): span """ - sentinel = com.sentinel_factory() + sentinel = sentinel_factory() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if index.nlevels == 1: diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index c3865afa9c0c0..9cec5b3d6ba49 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -3,6 +3,7 @@ http://specs.frictionlessdata.io/json-table-schema/ """ +from pandas.core.common import _all_not_none from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -61,7 +62,7 @@ def as_json_table_type(x): def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" - if all(name is not None for name in data.index.names): + if _all_not_none(*data.index.names): return data data = data.copy() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 39d088e00b219..ad8af05343808 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -34,7 +34,7 @@ from pandas.core.base import StringMixin from pandas.io.formats.printing import adjoin, pprint_thing from pandas.errors import PerformanceWarning -from pandas.core.common import _asarray_tuplesafe +from pandas.core.common import _asarray_tuplesafe, _all_none from pandas.core.algorithms import match, unique from pandas.core.categorical import Categorical, _factorize_from_iterables from pandas.core.internals import (BlockManager, make_block, @@ -905,7 +905,7 @@ def remove(self, key, where=None, start=None, stop=None): raise KeyError('No object named %s in the file' % key) # remove the node - if where is None and start is None and stop is None: + if _all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table @@ -2363,7 +2363,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): support fully deleting the node in its entirety (only) - where specification must be None """ - if where is None and start is None and stop is None: + if _all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 0d77b5f41a08e..ad3c4f0ecb05f 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -20,7 +20,7 @@ is_iterator) from pandas.core.dtypes.generic import ABCSeries -from pandas.core.common import AbstractMethodError, _try_sort +from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex @@ -607,7 +607,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): def _get_index_name(self): if isinstance(self.data.index, MultiIndex): name = self.data.index.names - if any(x is not None for x in name): + if _any_not_none(*name): name = ','.join([pprint_thing(x) for x in name]) else: name = None @@ -955,7 +955,7 @@ def _make_plot(self): it = self._iter_data() stacking_id = self._get_stacking_id() - is_errorbar = any(e is not None for e in self.errors.values()) + is_errorbar = _any_not_none(*self.errors.values()) colors = self._get_colors() for i, (label, y) in enumerate(it): diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index ab34ce877a726..a8449d6f874df 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -9,6 +9,7 @@ import numpy as np from pandas.compat import (lmap, range, lrange, StringIO, u) +from pandas.core.common import _all_none from pandas.errors import ParserError from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp, date_range, read_csv, compat, to_datetime) @@ -570,7 +571,7 @@ def _make_frame(names=None): df = _make_frame(True) df.to_csv(path, tupleize_cols=False, index=False) result = read_csv(path, header=[0, 1], tupleize_cols=False) - assert all([x is None for x in result.columns.names]) + assert _all_none(*result.columns.names) result.columns.names = df.columns.names assert_frame_equal(df, result) diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index ffc9703abff41..659ce36de6bab 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -8,6 +8,7 @@ import pytest from pandas.compat import intern +from pandas.core.common import _all_none from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf from pandas.util._decorators import deprecate_kwarg, make_signature from pandas.util._validators import (validate_args, validate_kwargs, @@ -437,7 +438,7 @@ def test_set_locale(self): pytest.skip("Only a single locale found, no point in " "trying to test setting another locale") - if all(x is None for x in self.current_locale): + if _all_none(*self.current_locale): # Not sure why, but on some travis runs with pytest, # getlocale() returned (None, None). pytest.skip("Current locale is not set.") diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 3c23462e10d35..730d2782e85d2 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -33,6 +33,7 @@ is_list_like) from pandas.io.formats.printing import pprint_thing from pandas.core.algorithms import take_1d +from pandas.core.common import _all_not_none import pandas.compat as compat from pandas.compat import ( @@ -579,7 +580,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL): except ValueError: yield new_locale else: - if all(lc is not None for lc in normalized_locale): + if _all_not_none(*normalized_locale): yield '.'.join(normalized_locale) else: yield new_locale
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Used `None` checking functions from `pandas.core.common` where applicable. Common patterns changed: - `all(v is not None for v in values)` -> `_all_not_none(*values)` - Used `*` for argument unpacking in many cases, not sure if that's frowned upon. - `a is not None and b is not None and c is not None` -> `_all_not_none(a, b, c)` - Only did this for 3+ checks against `None`. Left checks of 2 as-is. Also created an `any_not_none` function similar to the other three that exist. I suppose the is equivalent to `not all_none(...)`, but I feel like using `any_not_none` is more readable and indicative of intent.
https://api.github.com/repos/pandas-dev/pandas/pulls/17816
2017-10-08T02:20:49Z
2017-10-13T11:32:11Z
2017-10-13T11:32:11Z
2017-10-19T23:16:11Z
ENH: Add Index.to_frame method
diff --git a/doc/source/api.rst b/doc/source/api.rst index d98a18e6f7e36..646a28686bb06 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1376,6 +1376,7 @@ Conversion Index.tolist Index.to_datetime Index.to_series + Index.to_frame Sorting ~~~~~~~ @@ -1591,6 +1592,7 @@ Conversion DatetimeIndex.to_perioddelta DatetimeIndex.to_pydatetime DatetimeIndex.to_series + DatetimeIndex.to_frame TimedeltaIndex -------------- @@ -1623,6 +1625,7 @@ Conversion TimedeltaIndex.round TimedeltaIndex.floor TimedeltaIndex.ceil + TimedeltaIndex.to_frame .. currentmodule:: pandas diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 1e9c402dac73e..595fab9e18ea4 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -31,6 +31,7 @@ New features - Added ``skipna`` parameter to :func:`~pandas.api.types.infer_dtype` to support type inference in the presence of missing values (:issue:`17059`). - :class:`~pandas.Resampler.nearest` is added to support nearest-neighbor upsampling (:issue:`17496`). +- :class:`~pandas.Index` has added support for a ``to_frame`` method (:issue:`15230`) .. _whatsnew_0210.enhancements.infer_objects: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0a55559750d7c..df0e963e7628d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1005,6 +1005,29 @@ def to_series(self, **kwargs): index=self._shallow_copy(), name=self.name) + def to_frame(self, index=True): + """ + Create a DataFrame with a column containing the Index. + + .. versionadded:: 0.21.0 + + Parameters + ---------- + index : boolean, default True + Set the index of the returned DataFrame as the original Index. + + Returns + ------- + DataFrame : a DataFrame containing the original Index data. + """ + + from pandas import DataFrame + result = DataFrame(self._shallow_copy(), columns=[self.name or 0]) + + if index: + result.index = self + return result + def _to_embed(self, keep_tz=False): """ *this is an internal non-public method* diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 25897bee29845..dae62176722e1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -915,6 +915,46 @@ def to_series(self, keep_tz=False): index=self._shallow_copy(), name=self.name) + def to_frame(self, index=True, keep_tz=False): + """ + Create a DataFrame with a column containing the DatetimeIndex. + + .. versionadded:: 0.21.0 + + Parameters + ---------- + index : boolean, default True + Set the index of the returned DataFrame + as the original DatetimeIndex. + + keep_tz : optional, defaults False. + return the data keeping the timezone. + + If keep_tz is True: + + If the timezone is not set, the resulting + Series will have a datetime64[ns] dtype. + + Otherwise the DataFrame will have an datetime64[ns, tz] dtype; + the tz will be preserved. + + If keep_tz is False: + + DataFrame will have a datetime64[ns] dtype. TZ aware + objects will have the tz removed. + + Returns + ------- + DataFrame : a DataFrame containing the original DatetimeIndex data. + """ + + from pandas import DataFrame + result = DataFrame(self._to_embed(keep_tz), columns=[self.name or 0]) + + if index: + result.index = self + return result + def _to_embed(self, keep_tz=False): """ return an array repr of this object, potentially casting to object diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4b6e31133ba4b..06b208b4d174e 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1010,18 +1010,18 @@ def _to_safe_for_reshape(self): def to_frame(self, index=True): """ - Create a DataFrame with the columns the levels of the MultiIndex + Create a DataFrame with the levels of the MultiIndex as columns. .. versionadded:: 0.20.0 Parameters ---------- index : boolean, default True - return this MultiIndex as the index + Set the index of the returned DataFrame as the original MultiIndex. Returns ------- - DataFrame + DataFrame : a DataFrame containing the original MultiIndex data. """ from pandas import DataFrame diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 970dd7b63225a..456e5a9bd6439 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -51,6 +51,21 @@ def test_to_series(self): assert s.index is not idx assert s.name == idx.name + def test_to_frame(self): + # see gh-15230 + idx = self.create_index() + name = idx.name or 0 + + df = idx.to_frame() + + assert df.index is idx + assert len(df.columns) == 1 + assert df.columns[0] == name + assert df[name].values is not idx.values + + df = idx.to_frame(index=False) + assert df.index is not idx + def test_shift(self): # GH8083 test the base class for shift
Title is self-explanatory. Closes #15230.
https://api.github.com/repos/pandas-dev/pandas/pulls/17815
2017-10-08T01:58:02Z
2017-10-09T12:17:05Z
2017-10-09T12:17:05Z
2017-10-09T16:34:09Z
DOC: Fix docstring for tsplot
diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 3d04973ed0009..0d087f5d10123 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -22,16 +22,22 @@ def tsplot(series, plotf, ax=None, **kwargs): """ - Plots a Series on the given Matplotlib axes or the current axes + Plots a Series on the given Matplotlib axes or the current axes. Parameters ---------- - axes : Axes series : Series - - Notes - _____ - Supports same kwargs as Axes.plot + plotf : function + Function to plot the given Series. + ax : matplotlib axes object, optional, default=None + Axes to plot upon. If none, plots on current axis. + kwargs + Additional keyword arguments passed to plotf. + + Returns + ------- + lines : list of matplotlib.lines.Line2D object + Resultant figure. """ # Used inferred freq is possible, need a test case for inferred
Browsing through pandas, I noticed a small plotting function who's docs were incomplete / out of date. This is a small update to the docstring for tsplot in plotting/_timeseries. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17814
2017-10-07T20:24:53Z
2017-12-02T17:33:48Z
null
2017-12-04T11:17:17Z
DEPR: Deprecate from_csv in favor of read_csv
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index ed3be71852299..2eefc7ec1b636 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -715,6 +715,7 @@ Other API Changes Deprecations ~~~~~~~~~~~~ +- :meth:`DataFrame.from_csv` and :meth:`Series.from_csv` have been deprecated in favor of :func:`read_csv()` (:issue:`4191`) - :func:`read_excel()` has deprecated ``sheetname`` in favor of ``sheet_name`` for consistency with ``.to_excel()`` (:issue:`10559`). - :func:`read_excel()` has deprecated ``parse_cols`` in favor of ``usecols`` for consistency with :func:`read_csv` (:issue:`4988`) - :func:`read_csv()` has deprecated the ``tupleize_cols`` argument. Column tuples will always be converted to a ``MultiIndex`` (:issue:`17060`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d907492759dbd..c536cc9f2b82c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -298,7 +298,7 @@ def _constructor(self): _constructor_sliced = Series _deprecations = NDFrame._deprecations | frozenset( - ['sortlevel', 'get_value', 'set_value']) + ['sortlevel', 'get_value', 'set_value', 'from_csv']) @property def _constructor_expanddim(self): @@ -1291,7 +1291,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=False, infer_datetime_format=False): """ - Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv` + Read CSV file (DEPRECATED, please use :func:`pandas.read_csv` instead). It is preferable to use the more powerful :func:`pandas.read_csv` @@ -1339,6 +1339,13 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, y : DataFrame """ + + warnings.warn("from_csv is deprecated. Please use read_csv(...) " + "instead. Note that some of the default arguments are " + "different, so please refer to the documentation " + "for from_csv when changing your function calls", + FutureWarning, stacklevel=2) + from pandas.io.parsers import read_table return read_table(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, diff --git a/pandas/core/series.py b/pandas/core/series.py index 49b6a6651367b..be4066f0c39b9 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -147,7 +147,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _metadata = ['name'] _accessors = frozenset(['dt', 'cat', 'str']) _deprecations = generic.NDFrame._deprecations | frozenset( - ['sortlevel', 'reshape', 'get_value', 'set_value']) + ['sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv']) _allow_index_ops = True def __init__(self, data=None, index=None, dtype=None, name=None, @@ -2688,7 +2688,7 @@ def between(self, left, right, inclusive=True): def from_csv(cls, path, sep=',', parse_dates=True, header=None, index_col=0, encoding=None, infer_datetime_format=False): """ - Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv` + Read CSV file (DEPRECATED, please use :func:`pandas.read_csv` instead). It is preferable to use the more powerful :func:`pandas.read_csv` @@ -2736,6 +2736,9 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, ------- y : Series """ + + # We're calling `DataFrame.from_csv` in the implementation, + # which will propagate a warning regarding `from_csv` deprecation. from pandas.core.frame import DataFrame df = DataFrame.from_csv(path, header=header, index_col=index_col, sep=sep, parse_dates=parse_dates, diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index a61a157181253..ab34ce877a726 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -31,6 +31,21 @@ class TestDataFrameToCSV(TestData): + def read_csv(self, path, **kwargs): + params = dict(index_col=0, parse_dates=True) + params.update(**kwargs) + + return pd.read_csv(path, **params) + + def test_from_csv_deprecation(self): + # see gh-17812 + with ensure_clean('__tmp_from_csv_deprecation__') as path: + self.tsframe.to_csv(path) + + with tm.assert_produces_warning(FutureWarning): + depr_recons = DataFrame.from_csv(path) + assert_frame_equal(self.tsframe, depr_recons) + def test_to_csv_from_csv1(self): with ensure_clean('__tmp_to_csv_from_csv1__') as path: @@ -43,24 +58,25 @@ def test_to_csv_from_csv1(self): # test roundtrip self.tsframe.to_csv(path) - recons = DataFrame.from_csv(path) - + recons = self.read_csv(path) assert_frame_equal(self.tsframe, recons) self.tsframe.to_csv(path, index_label='index') - recons = DataFrame.from_csv(path, index_col=None) + recons = self.read_csv(path, index_col=None) + assert(len(recons.columns) == len(self.tsframe.columns) + 1) # no index self.tsframe.to_csv(path, index=False) - recons = DataFrame.from_csv(path, index_col=None) + recons = self.read_csv(path, index_col=None) assert_almost_equal(self.tsframe.values, recons.values) # corner case dm = DataFrame({'s1': Series(lrange(3), lrange(3)), 's2': Series(lrange(2), lrange(2))}) dm.to_csv(path) - recons = DataFrame.from_csv(path) + + recons = self.read_csv(path) assert_frame_equal(dm, recons) def test_to_csv_from_csv2(self): @@ -71,27 +87,26 @@ def test_to_csv_from_csv2(self): df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'], columns=['x', 'y', 'z']) df.to_csv(path) - result = DataFrame.from_csv(path) + result = self.read_csv(path) assert_frame_equal(result, df) midx = MultiIndex.from_tuples( [('A', 1, 2), ('A', 1, 2), ('B', 1, 2)]) df = DataFrame(np.random.randn(3, 3), index=midx, columns=['x', 'y', 'z']) + df.to_csv(path) - result = DataFrame.from_csv(path, index_col=[0, 1, 2], - parse_dates=False) - # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it - # ? + result = self.read_csv(path, index_col=[0, 1, 2], + parse_dates=False) assert_frame_equal(result, df, check_names=False) # column aliases col_aliases = Index(['AA', 'X', 'Y', 'Z']) self.frame2.to_csv(path, header=col_aliases) - rs = DataFrame.from_csv(path) + + rs = self.read_csv(path) xp = self.frame2.copy() xp.columns = col_aliases - assert_frame_equal(xp, rs) pytest.raises(ValueError, self.frame2.to_csv, path, @@ -231,8 +246,9 @@ def make_dtnat_arr(n, nnat=None): with ensure_clean('1.csv') as pth: df = DataFrame(dict(a=s1, b=s2)) df.to_csv(pth, chunksize=chunksize) - recons = DataFrame.from_csv(pth)._convert(datetime=True, - coerce=True) + + recons = self.read_csv(pth)._convert(datetime=True, + coerce=True) assert_frame_equal(df, recons, check_names=False, check_less_precise=True) @@ -247,16 +263,17 @@ def _do_test(df, r_dtype=None, c_dtype=None, if rnlvl is not None: kwargs['index_col'] = lrange(rnlvl) kwargs['header'] = lrange(cnlvl) + with ensure_clean('__tmp_to_csv_moar__') as path: df.to_csv(path, encoding='utf8', chunksize=chunksize, tupleize_cols=False) - recons = DataFrame.from_csv( - path, tupleize_cols=False, **kwargs) + recons = self.read_csv(path, tupleize_cols=False, **kwargs) else: kwargs['header'] = 0 + with ensure_clean('__tmp_to_csv_moar__') as path: df.to_csv(path, encoding='utf8', chunksize=chunksize) - recons = DataFrame.from_csv(path, **kwargs) + recons = self.read_csv(path, **kwargs) def _to_uni(x): if not isinstance(x, compat.text_type): @@ -398,7 +415,7 @@ def test_to_csv_from_csv_w_some_infs(self): with ensure_clean() as path: self.frame.to_csv(path) - recons = DataFrame.from_csv(path) + recons = self.read_csv(path) # TODO to_csv drops column name assert_frame_equal(self.frame, recons, check_names=False) @@ -413,7 +430,7 @@ def test_to_csv_from_csv_w_all_infs(self): with ensure_clean() as path: self.frame.to_csv(path) - recons = DataFrame.from_csv(path) + recons = self.read_csv(path) # TODO to_csv drops column name assert_frame_equal(self.frame, recons, check_names=False) @@ -448,11 +465,13 @@ def test_to_csv_headers(self): to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y']) with ensure_clean('__tmp_to_csv_headers__') as path: from_df.to_csv(path, header=['X', 'Y']) - recons = DataFrame.from_csv(path) + recons = self.read_csv(path) + assert_frame_equal(to_df, recons) from_df.to_csv(path, index=False, header=['X', 'Y']) - recons = DataFrame.from_csv(path) + recons = self.read_csv(path) + recons.reset_index(inplace=True) assert_frame_equal(to_df, recons) @@ -471,13 +490,15 @@ def test_to_csv_multiindex(self): # round trip frame.to_csv(path) - df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False) + + df = self.read_csv(path, index_col=[0, 1], + parse_dates=False) # TODO to_csv drops column name assert_frame_equal(frame, df, check_names=False) assert frame.index.names == df.index.names - # needed if setUP becomes a classmethod + # needed if setUp becomes a class method self.frame.index = old_index # try multiindex with dates @@ -487,21 +508,22 @@ def test_to_csv_multiindex(self): tsframe.index = MultiIndex.from_arrays(new_index) tsframe.to_csv(path, index_label=['time', 'foo']) - recons = DataFrame.from_csv(path, index_col=[0, 1]) + recons = self.read_csv(path, index_col=[0, 1]) + # TODO to_csv drops column name assert_frame_equal(tsframe, recons, check_names=False) # do not load index tsframe.to_csv(path) - recons = DataFrame.from_csv(path, index_col=None) + recons = self.read_csv(path, index_col=None) assert len(recons.columns) == len(tsframe.columns) + 2 # no index tsframe.to_csv(path, index=False) - recons = DataFrame.from_csv(path, index_col=None) + recons = self.read_csv(path, index_col=None) assert_almost_equal(recons.values, self.tsframe.values) - # needed if setUP becomes classmethod + # needed if setUp becomes class method self.tsframe.index = old_index with ensure_clean('__tmp_to_csv_multiindex__') as path: @@ -606,7 +628,8 @@ def _make_frame(names=None): with ensure_clean('__tmp_to_csv_multiindex__') as path: # empty tsframe[:0].to_csv(path) - recons = DataFrame.from_csv(path) + recons = self.read_csv(path) + exp = tsframe[:0] exp.index = [] @@ -631,7 +654,7 @@ def test_to_csv_withcommas(self): with ensure_clean('__tmp_to_csv_withcommas__.csv') as path: df.to_csv(path) - df2 = DataFrame.from_csv(path) + df2 = self.read_csv(path) assert_frame_equal(df2, df) def test_to_csv_mixed(self): @@ -746,7 +769,7 @@ def test_to_csv_wide_frame_formatting(self): def test_to_csv_bug(self): f1 = StringIO('a,1.0\nb,2.0') - df = DataFrame.from_csv(f1, header=None) + df = self.read_csv(f1, header=None) newdf = DataFrame({'t': df[df.columns[0]]}) with ensure_clean() as path: diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 5b7fd1ec94a90..ad51261a47c5c 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -20,43 +20,73 @@ class TestSeriesToCSV(TestData): + def read_csv(self, path, **kwargs): + params = dict(squeeze=True, index_col=0, + header=None, parse_dates=True) + params.update(**kwargs) + + header = params.get("header") + out = pd.read_csv(path, **params) + + if header is None: + out.name = out.index.name = None + + return out + + def test_from_csv_deprecation(self): + # see gh-17812 + with ensure_clean() as path: + self.ts.to_csv(path) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + ts = self.read_csv(path) + depr_ts = Series.from_csv(path) + assert_series_equal(depr_ts, ts) + def test_from_csv(self): with ensure_clean() as path: self.ts.to_csv(path) - ts = Series.from_csv(path) + ts = self.read_csv(path) assert_series_equal(self.ts, ts, check_names=False) + assert ts.name is None assert ts.index.name is None - # GH10483 + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + depr_ts = Series.from_csv(path) + assert_series_equal(depr_ts, ts) + + # see gh-10483 self.ts.to_csv(path, header=True) - ts_h = Series.from_csv(path, header=0) - assert ts_h.name == 'ts' + ts_h = self.read_csv(path, header=0) + assert ts_h.name == "ts" self.series.to_csv(path) - series = Series.from_csv(path) - assert series.name is None - assert series.index.name is None + series = self.read_csv(path) assert_series_equal(self.series, series, check_names=False) + assert series.name is None assert series.index.name is None self.series.to_csv(path, header=True) - series_h = Series.from_csv(path, header=0) - assert series_h.name == 'series' + series_h = self.read_csv(path, header=0) + assert series_h.name == "series" - outfile = open(path, 'w') - outfile.write('1998-01-01|1.0\n1999-01-01|2.0') + outfile = open(path, "w") + outfile.write("1998-01-01|1.0\n1999-01-01|2.0") outfile.close() - series = Series.from_csv(path, sep='|') - checkseries = Series({datetime(1998, 1, 1): 1.0, - datetime(1999, 1, 1): 2.0}) - assert_series_equal(checkseries, series) - series = Series.from_csv(path, sep='|', parse_dates=False) - checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0}) - assert_series_equal(checkseries, series) + series = self.read_csv(path, sep="|") + check_series = Series({datetime(1998, 1, 1): 1.0, + datetime(1999, 1, 1): 2.0}) + assert_series_equal(check_series, series) + + series = self.read_csv(path, sep="|", parse_dates=False) + check_series = Series({"1998-01-01": 1.0, "1999-01-01": 2.0}) + assert_series_equal(check_series, series) def test_to_csv(self): import io @@ -76,20 +106,19 @@ def test_to_csv_unicode_index(self): buf = StringIO() s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")]) - s.to_csv(buf, encoding='UTF-8') + s.to_csv(buf, encoding="UTF-8") buf.seek(0) - s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8') - + s2 = self.read_csv(buf, index_col=0, encoding="UTF-8") assert_series_equal(s, s2) def test_to_csv_float_format(self): with ensure_clean() as filename: ser = Series([0.123456, 0.234567, 0.567567]) - ser.to_csv(filename, float_format='%.2f') + ser.to_csv(filename, float_format="%.2f") - rs = Series.from_csv(filename) + rs = self.read_csv(filename) xp = Series([0.12, 0.23, 0.57]) assert_series_equal(rs, xp)
Title is self-explanatory. Closes #4191.
https://api.github.com/repos/pandas-dev/pandas/pulls/17812
2017-10-07T09:02:07Z
2017-10-10T04:49:22Z
2017-10-10T04:49:21Z
2017-10-10T04:52:58Z
Fix a small typo
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d280c4f3f73d7..4eb35daba2282 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1080,7 +1080,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, prefix : string, list of strings, or dict of strings, default None String to append DataFrame column names Pass a list with length equal to the number of columns - when calling get_dummies on a DataFrame. Alternativly, `prefix` + when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : string, default '_' If appending prefix, separator/delimiter to use. Or pass a
- [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR fixes a small typo : Alternativly -> Alternatively. Thanks for the awesome project!
https://api.github.com/repos/pandas-dev/pandas/pulls/17811
2017-10-07T07:28:34Z
2017-10-07T11:02:58Z
2017-10-07T11:02:58Z
2017-10-08T02:06:24Z
[WIP] TST/MAINT: split up test_resample.py (GH17806)
diff --git a/pandas/tests/resample/__init__.py b/pandas/tests/resample/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/resample/base.py b/pandas/tests/resample/base.py new file mode 100644 index 0000000000000..d15810316bc67 --- /dev/null +++ b/pandas/tests/resample/base.py @@ -0,0 +1,220 @@ +# pylint: disable=E1101 + +from datetime import datetime, timedelta + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas import DataFrame, Series +from pandas.compat import range +from pandas.core.base import AbstractMethodError +from pandas.core.groupby import DataError +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.tseries.frequencies import to_offset +from pandas.util.testing import (assert_almost_equal, assert_frame_equal, + assert_index_equal, assert_series_equal) + +from pandas.tests.resample.common import (downsample_methods, resample_methods, + upsample_methods) + + +class Base(object): + """ + base class for resampling testing, calling + .create_series() generates a series of each index type + """ + + def create_index(self, *args, **kwargs): + """ return the _index_factory created using the args, kwargs """ + factory = self._index_factory() + return factory(*args, **kwargs) + + @pytest.fixture + def _index_start(self): + return datetime(2005, 1, 1) + + @pytest.fixture + def _index_end(self): + return datetime(2005, 1, 10) + + @pytest.fixture + def _index_freq(self): + return 'D' + + @pytest.fixture + def index(self, _index_start, _index_end, _index_freq): + return self.create_index(_index_start, _index_end, freq=_index_freq) + + @pytest.fixture + def _series_name(self): + raise AbstractMethodError(self) + + @pytest.fixture + def _static_values(self, index): + return np.arange(len(index)) + + @pytest.fixture + def series(self, index, _series_name, _static_values): + return Series(_static_values, index=index, name=_series_name) + + @pytest.fixture + def frame(self, index, _static_values): + return DataFrame({'value': _static_values}, index=index) + + @pytest.fixture(params=[Series, DataFrame]) + def series_and_frame(self, request, index, _series_name, _static_values): + if request.param == Series: + return Series(_static_values, index=index, name=_series_name) + if request.param == DataFrame: + return DataFrame({'value': _static_values}, index=index) + + @pytest.mark.parametrize('freq', ['2D', '1H']) + def test_asfreq(self, series_and_frame, freq): + obj = series_and_frame + + result = obj.resample(freq).asfreq() + if freq == '2D': + new_index = obj.index.take(np.arange(0, len(obj.index), 2)) + new_index.freq = to_offset('2D') + else: + new_index = self.create_index(obj.index[0], obj.index[-1], + freq=freq) + expected = obj.reindex(new_index) + assert_almost_equal(result, expected) + + def test_asfreq_fill_value(self): + # test for fill value during resampling, issue 3715 + + s = self.create_series() + + result = s.resample('1H').asfreq() + new_index = self.create_index(s.index[0], s.index[-1], freq='1H') + expected = s.reindex(new_index) + assert_series_equal(result, expected) + + frame = s.to_frame('value') + frame.iloc[1] = None + result = frame.resample('1H').asfreq(fill_value=4.0) + new_index = self.create_index(frame.index[0], + frame.index[-1], freq='1H') + expected = frame.reindex(new_index, fill_value=4.0) + assert_frame_equal(result, expected) + + def test_resample_interpolate(self): + # # 12925 + df = self.create_series().to_frame('value') + assert_frame_equal( + df.resample('1T').asfreq().interpolate(), + df.resample('1T').interpolate()) + + def test_raises_on_non_datetimelike_index(self): + # this is a non datetimelike index + xp = DataFrame() + pytest.raises(TypeError, lambda: xp.resample('A').mean()) + + def test_resample_empty_series(self): + # GH12771 & GH12868 + + s = self.create_series()[:0] + + for freq in ['M', 'D', 'H']: + # need to test for ohlc from GH13083 + methods = [method for method in resample_methods + if method != 'ohlc'] + for method in methods: + result = getattr(s.resample(freq), method)() + + expected = s.copy() + expected.index = s.index._shallow_copy(freq=freq) + assert_index_equal(result.index, expected.index) + assert result.index.freq == expected.index.freq + assert_series_equal(result, expected, check_dtype=False) + + def test_resample_empty_dataframe(self): + # GH13212 + index = self.create_series().index[:0] + f = DataFrame(index=index) + + for freq in ['M', 'D', 'H']: + # count retains dimensions too + methods = downsample_methods + upsample_methods + for method in methods: + result = getattr(f.resample(freq), method)() + if method != 'size': + expected = f.copy() + else: + # GH14962 + expected = Series([]) + + expected.index = f.index._shallow_copy(freq=freq) + assert_index_equal(result.index, expected.index) + assert result.index.freq == expected.index.freq + assert_almost_equal(result, expected, check_dtype=False) + + # test size for GH13212 (currently stays as df) + + def test_resample_empty_dtypes(self): + + # Empty series were sometimes causing a segfault (for the functions + # with Cython bounds-checking disabled) or an IndexError. We just run + # them to ensure they no longer do. (GH #10228) + for index in tm.all_timeseries_index_generator(0): + for dtype in (np.float, np.int, np.object, 'datetime64[ns]'): + for how in downsample_methods + upsample_methods: + empty_series = pd.Series([], index, dtype) + try: + getattr(empty_series.resample('d'), how)() + except DataError: + # Ignore these since some combinations are invalid + # (ex: doing mean with dtype of np.object) + pass + + def test_resample_loffset_arg_type(self): + # GH 13218, 15002 + df = self.create_series().to_frame('value') + expected_means = [df.values[i:i + 2].mean() + for i in range(0, len(df.values), 2)] + expected_index = self.create_index(df.index[0], + periods=len(df.index) / 2, + freq='2D') + + # loffset coerces PeriodIndex to DateTimeIndex + if isinstance(expected_index, PeriodIndex): + expected_index = expected_index.to_timestamp() + + expected_index += timedelta(hours=2) + expected = DataFrame({'value': expected_means}, index=expected_index) + + for arg in ['mean', {'value': 'mean'}, ['mean']]: + + result_agg = df.resample('2D', loffset='2H').agg(arg) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result_how = df.resample('2D', how=arg, loffset='2H') + + if isinstance(arg, list): + expected.columns = pd.MultiIndex.from_tuples([('value', + 'mean')]) + + # GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex + if isinstance(expected.index, TimedeltaIndex): + with pytest.raises(AssertionError): + assert_frame_equal(result_agg, expected) + assert_frame_equal(result_how, expected) + else: + assert_frame_equal(result_agg, expected) + assert_frame_equal(result_how, expected) + + def test_apply_to_empty_series(self): + # GH 14313 + series = self.create_series()[:0] + + for freq in ['M', 'D', 'H']: + result = series.resample(freq).apply(lambda x: 1) + expected = series.resample(freq).apply(np.sum) + + assert_series_equal(result, expected, check_dtype=False) diff --git a/pandas/tests/resample/common.py b/pandas/tests/resample/common.py new file mode 100644 index 0000000000000..9d5ac771bb4f7 --- /dev/null +++ b/pandas/tests/resample/common.py @@ -0,0 +1,27 @@ +# pylint: disable=E1101 + +import numpy as np + +from pandas import Series +from pandas.core.indexes.datetimes import date_range +from pandas.core.indexes.period import period_range +from pandas.tseries.offsets import BDay + +bday = BDay() + +# The various methods we support +downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', + 'median', 'prod', 'var', 'ohlc'] +upsample_methods = ['count', 'size'] +series_methods = ['nunique'] +resample_methods = downsample_methods + upsample_methods + series_methods + + +def _simple_ts(start, end, freq='D'): + rng = date_range(start, end, freq=freq) + return Series(np.random.randn(len(rng)), index=rng) + + +def _simple_pts(start, end, freq='D'): + rng = period_range(start, end, freq=freq) + return Series(np.random.randn(len(rng)), index=rng) diff --git a/pandas/tests/resample/test_datetime.py b/pandas/tests/resample/test_datetime.py new file mode 100644 index 0000000000000..08080bcb679bd --- /dev/null +++ b/pandas/tests/resample/test_datetime.py @@ -0,0 +1,1378 @@ +# pylint: disable=E1101 + +from datetime import datetime, timedelta +from functools import partial +from warnings import catch_warnings + +import numpy as np +import pytest + +import pandas as pd +import pandas.tseries.offsets as offsets +import pandas.util.testing as tm +from pandas import DataFrame, Index, Panel, Series, Timestamp, isna, notna +from pandas.compat import product, range +from pandas.core.indexes.datetimes import date_range +from pandas.core.indexes.period import period_range, Period +from pandas.core.indexes.timedeltas import timedelta_range +from pandas.core.resample import DatetimeIndex, TimeGrouper +from pandas.errors import UnsupportedFunctionCall +from pandas.tseries.offsets import Minute +from pandas.util.testing import (assert_almost_equal, assert_frame_equal, + assert_series_equal) + +from pandas.tests.resample.base import Base +from pandas.tests.resample.common import (bday, downsample_methods, + _simple_pts, _simple_ts) + + +class TestDatetimeIndex(Base): + _index_factory = lambda x: date_range + + @pytest.fixture + def _series_name(self): + return 'dti' + + def setup_method(self, method): + dti = DatetimeIndex(start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10), freq='Min') + + self.series = Series(np.random.rand(len(dti)), dti) + + def create_series(self): + i = date_range(datetime(2005, 1, 1), + datetime(2005, 1, 10), freq='D') + + return Series(np.arange(len(i)), index=i, name='dti') + + def test_custom_grouper(self): + + dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10)) + + s = Series(np.array([1] * len(dti)), index=dti, dtype='int64') + + b = TimeGrouper(Minute(5)) + g = s.groupby(b) + + # check all cython functions work + funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var'] + for f in funcs: + g._cython_agg_general(f) + + b = TimeGrouper(Minute(5), closed='right', label='right') + g = s.groupby(b) + # check all cython functions work + funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var'] + for f in funcs: + g._cython_agg_general(f) + + assert g.ngroups == 2593 + assert notna(g.mean()).all() + + # construct expected val + arr = [1] + [5] * 2592 + idx = dti[0:-1:5] + idx = idx.append(dti[-1:]) + expect = Series(arr, index=idx) + + # GH2763 - return in put dtype if we can + result = g.agg(np.sum) + assert_series_equal(result, expect) + + df = DataFrame(np.random.rand(len(dti), 10), + index=dti, dtype='float64') + r = df.groupby(b).agg(np.sum) + + assert len(r.columns) == 10 + assert len(r.index) == 2593 + + def test_resample_basic(self): + rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', + name='index') + s = Series(np.random.randn(14), index=rng) + result = s.resample('5min', closed='right', label='right').mean() + + exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index') + expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], + index=exp_idx) + assert_series_equal(result, expected) + assert result.index.name == 'index' + + result = s.resample('5min', closed='left', label='right').mean() + + exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min', + name='index') + expected = Series([s[:5].mean(), s[5:10].mean(), + s[10:].mean()], index=exp_idx) + assert_series_equal(result, expected) + + s = self.series + result = s.resample('5Min').last() + grouper = TimeGrouper(Minute(5), closed='left', label='left') + expect = s.groupby(grouper).agg(lambda x: x[-1]) + assert_series_equal(result, expect) + + def test_resample_how(self): + rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', + name='index') + s = Series(np.random.randn(14), index=rng) + grouplist = np.ones_like(s) + grouplist[0] = 0 + grouplist[1:6] = 1 + grouplist[6:11] = 2 + grouplist[11:] = 3 + args = downsample_methods + + def _ohlc(group): + if isna(group).all(): + return np.repeat(np.nan, 4) + return [group[0], group.max(), group.min(), group[-1]] + + inds = date_range('1/1/2000', periods=4, freq='5min', name='index') + + for arg in args: + if arg == 'ohlc': + func = _ohlc + else: + func = arg + try: + result = getattr(s.resample( + '5min', closed='right', label='right'), arg)() + + expected = s.groupby(grouplist).agg(func) + assert result.index.name == 'index' + if arg == 'ohlc': + expected = DataFrame(expected.values.tolist()) + expected.columns = ['open', 'high', 'low', 'close'] + expected.index = Index(inds, name='index') + assert_frame_equal(result, expected) + else: + expected.index = inds + assert_series_equal(result, expected) + except BaseException as exc: + + exc.args += ('how=%s' % arg,) + raise + + def test_numpy_compat(self): + # see gh-12811 + s = Series([1, 2, 3, 4, 5], index=date_range( + '20130101', periods=5, freq='s')) + r = s.resample('2s') + + msg = "numpy operations are not valid with resample" + + for func in ('min', 'max', 'sum', 'prod', + 'mean', 'var', 'std'): + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(r, func), + func, 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(r, func), axis=1) + + def test_resample_how_callables(self): + # GH 7929 + data = np.arange(5, dtype=np.int64) + ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d') + df = pd.DataFrame({"A": data, "B": data}, index=ind) + + def fn(x, a=1): + return str(type(x)) + + class fn_class: + + def __call__(self, x): + return str(type(x)) + + df_standard = df.resample("M").apply(fn) + df_lambda = df.resample("M").apply(lambda x: str(type(x))) + df_partial = df.resample("M").apply(partial(fn)) + df_partial2 = df.resample("M").apply(partial(fn, a=2)) + df_class = df.resample("M").apply(fn_class()) + + assert_frame_equal(df_standard, df_lambda) + assert_frame_equal(df_standard, df_partial) + assert_frame_equal(df_standard, df_partial2) + assert_frame_equal(df_standard, df_class) + + def test_resample_with_timedeltas(self): + + expected = DataFrame({'A': np.arange(1480)}) + expected = expected.groupby(expected.index // 30).sum() + expected.index = pd.timedelta_range('0 days', freq='30T', periods=50) + + df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta( + np.arange(1480), unit='T')) + result = df.resample('30T').sum() + + assert_frame_equal(result, expected) + + s = df['A'] + result = s.resample('30T').sum() + assert_series_equal(result, expected['A']) + + def test_resample_single_period_timedelta(self): + + s = Series(list(range(5)), index=pd.timedelta_range( + '1 day', freq='s', periods=5)) + result = s.resample('2s').sum() + expected = Series([1, 5, 4], index=pd.timedelta_range( + '1 day', freq='2s', periods=3)) + assert_series_equal(result, expected) + + def test_resample_timedelta_idempotency(self): + + # GH 12072 + index = pd.timedelta_range('0', periods=9, freq='10L') + series = pd.Series(range(9), index=index) + result = series.resample('10L').mean() + expected = series + assert_series_equal(result, expected) + + def test_resample_rounding(self): + # GH 8371 + # odd results when rounding is needed + + data = """date,time,value +11-08-2014,00:00:01.093,1 +11-08-2014,00:00:02.159,1 +11-08-2014,00:00:02.667,1 +11-08-2014,00:00:03.175,1 +11-08-2014,00:00:07.058,1 +11-08-2014,00:00:07.362,1 +11-08-2014,00:00:08.324,1 +11-08-2014,00:00:08.830,1 +11-08-2014,00:00:08.982,1 +11-08-2014,00:00:09.815,1 +11-08-2014,00:00:10.540,1 +11-08-2014,00:00:11.061,1 +11-08-2014,00:00:11.617,1 +11-08-2014,00:00:13.607,1 +11-08-2014,00:00:14.535,1 +11-08-2014,00:00:15.525,1 +11-08-2014,00:00:17.960,1 +11-08-2014,00:00:20.674,1 +11-08-2014,00:00:21.191,1""" + + from pandas.compat import StringIO + df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [ + 'date', 'time']}, index_col='timestamp') + df.index.name = None + result = df.resample('6s').sum() + expected = DataFrame({'value': [ + 4, 9, 4, 2 + ]}, index=date_range('2014-11-08', freq='6s', periods=4)) + assert_frame_equal(result, expected) + + result = df.resample('7s').sum() + expected = DataFrame({'value': [ + 4, 10, 4, 1 + ]}, index=date_range('2014-11-08', freq='7s', periods=4)) + assert_frame_equal(result, expected) + + result = df.resample('11s').sum() + expected = DataFrame({'value': [ + 11, 8 + ]}, index=date_range('2014-11-08', freq='11s', periods=2)) + assert_frame_equal(result, expected) + + result = df.resample('13s').sum() + expected = DataFrame({'value': [ + 13, 6 + ]}, index=date_range('2014-11-08', freq='13s', periods=2)) + assert_frame_equal(result, expected) + + result = df.resample('17s').sum() + expected = DataFrame({'value': [ + 16, 3 + ]}, index=date_range('2014-11-08', freq='17s', periods=2)) + assert_frame_equal(result, expected) + + def test_resample_basic_from_daily(self): + # from daily + dti = DatetimeIndex(start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10), freq='D', name='index') + + s = Series(np.random.rand(len(dti)), dti) + + # to weekly + result = s.resample('w-sun').last() + + assert len(result) == 3 + assert (result.index.dayofweek == [6, 6, 6]).all() + assert result.iloc[0] == s['1/2/2005'] + assert result.iloc[1] == s['1/9/2005'] + assert result.iloc[2] == s.iloc[-1] + + result = s.resample('W-MON').last() + assert len(result) == 2 + assert (result.index.dayofweek == [0, 0]).all() + assert result.iloc[0] == s['1/3/2005'] + assert result.iloc[1] == s['1/10/2005'] + + result = s.resample('W-TUE').last() + assert len(result) == 2 + assert (result.index.dayofweek == [1, 1]).all() + assert result.iloc[0] == s['1/4/2005'] + assert result.iloc[1] == s['1/10/2005'] + + result = s.resample('W-WED').last() + assert len(result) == 2 + assert (result.index.dayofweek == [2, 2]).all() + assert result.iloc[0] == s['1/5/2005'] + assert result.iloc[1] == s['1/10/2005'] + + result = s.resample('W-THU').last() + assert len(result) == 2 + assert (result.index.dayofweek == [3, 3]).all() + assert result.iloc[0] == s['1/6/2005'] + assert result.iloc[1] == s['1/10/2005'] + + result = s.resample('W-FRI').last() + assert len(result) == 2 + assert (result.index.dayofweek == [4, 4]).all() + assert result.iloc[0] == s['1/7/2005'] + assert result.iloc[1] == s['1/10/2005'] + + # to biz day + result = s.resample('B').last() + assert len(result) == 7 + assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all() + + assert result.iloc[0] == s['1/2/2005'] + assert result.iloc[1] == s['1/3/2005'] + assert result.iloc[5] == s['1/9/2005'] + assert result.index.name == 'index' + + def test_resample_upsampling_picked_but_not_correct(self): + + # Test for issue #3020 + dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D') + series = Series(1, index=dates) + + result = series.resample('D').mean() + assert result.index[0] == dates[0] + + # GH 5955 + # incorrect deciding to upsample when the axis frequency matches the + # resample frequency + + import datetime + s = Series(np.arange(1., 6), index=[datetime.datetime( + 1975, 1, i, 12, 0) for i in range(1, 6)]) + expected = Series(np.arange(1., 6), index=date_range( + '19750101', periods=5, freq='D')) + + result = s.resample('D').count() + assert_series_equal(result, Series(1, index=expected.index)) + + result1 = s.resample('D').sum() + result2 = s.resample('D').mean() + assert_series_equal(result1, expected) + assert_series_equal(result2, expected) + + def test_resample_frame_basic(self): + df = tm.makeTimeDataFrame() + + b = TimeGrouper('M') + g = df.groupby(b) + + # check all cython functions work + funcs = ['add', 'mean', 'prod', 'min', 'max', 'var'] + for f in funcs: + g._cython_agg_general(f) + + result = df.resample('A').mean() + assert_series_equal(result['A'], df['A'].resample('A').mean()) + + result = df.resample('M').mean() + assert_series_equal(result['A'], df['A'].resample('M').mean()) + + df.resample('M', kind='period').mean() + df.resample('W-WED', kind='period').mean() + + def test_resample_loffset(self): + rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min') + s = Series(np.random.randn(14), index=rng) + + result = s.resample('5min', closed='right', label='right', + loffset=timedelta(minutes=1)).mean() + idx = date_range('1/1/2000', periods=4, freq='5min') + expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], + index=idx + timedelta(minutes=1)) + assert_series_equal(result, expected) + + expected = s.resample( + '5min', closed='right', label='right', + loffset='1min').mean() + assert_series_equal(result, expected) + + expected = s.resample( + '5min', closed='right', label='right', + loffset=Minute(1)).mean() + assert_series_equal(result, expected) + + assert result.index.freq == Minute(5) + + # from daily + dti = DatetimeIndex(start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10), freq='D') + ser = Series(np.random.rand(len(dti)), dti) + + # to weekly + result = ser.resample('w-sun').last() + expected = ser.resample('w-sun', loffset=-bday).last() + assert result.index[0] - bday == expected.index[0] + + def test_resample_loffset_count(self): + # GH 12725 + start_time = '1/1/2000 00:00:00' + rng = date_range(start_time, periods=100, freq='S') + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.resample('10S', loffset='1s').count() + + expected_index = ( + date_range(start_time, periods=10, freq='10S') + + timedelta(seconds=1) + ) + expected = pd.Series(10, index=expected_index) + + assert_series_equal(result, expected) + + # Same issue should apply to .size() since it goes through + # same code path + result = ts.resample('10S', loffset='1s').size() + + assert_series_equal(result, expected) + + def test_resample_upsample(self): + # from daily + dti = DatetimeIndex(start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10), freq='D', name='index') + + s = Series(np.random.rand(len(dti)), dti) + + # to minutely, by padding + result = s.resample('Min').pad() + assert len(result) == 12961 + assert result[0] == s[0] + assert result[-1] == s[-1] + + assert result.index.name == 'index' + + def test_resample_how_method(self): + # GH9915 + s = pd.Series([11, 22], + index=[Timestamp('2015-03-31 21:48:52.672000'), + Timestamp('2015-03-31 21:49:52.739000')]) + expected = pd.Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22], + index=[Timestamp('2015-03-31 21:48:50'), + Timestamp('2015-03-31 21:49:00'), + Timestamp('2015-03-31 21:49:10'), + Timestamp('2015-03-31 21:49:20'), + Timestamp('2015-03-31 21:49:30'), + Timestamp('2015-03-31 21:49:40'), + Timestamp('2015-03-31 21:49:50')]) + assert_series_equal(s.resample("10S").mean(), expected) + + def test_resample_extra_index_point(self): + # GH 9756 + index = DatetimeIndex(start='20150101', end='20150331', freq='BM') + expected = DataFrame({'A': Series([21, 41, 63], index=index)}) + + index = DatetimeIndex(start='20150101', end='20150331', freq='B') + df = DataFrame( + {'A': Series(range(len(index)), index=index)}, dtype='int64') + result = df.resample('BM').last() + assert_frame_equal(result, expected) + + def test_upsample_with_limit(self): + rng = date_range('1/1/2000', periods=3, freq='5t') + ts = Series(np.random.randn(len(rng)), rng) + + result = ts.resample('t').ffill(limit=2) + expected = ts.reindex(result.index, method='ffill', limit=2) + assert_series_equal(result, expected) + + def test_nearest_upsample_with_limit(self): + rng = date_range('1/1/2000', periods=3, freq='5t') + ts = Series(np.random.randn(len(rng)), rng) + + result = ts.resample('t').nearest(limit=2) + expected = ts.reindex(result.index, method='nearest', limit=2) + assert_series_equal(result, expected) + + def test_resample_ohlc(self): + s = self.series + + grouper = TimeGrouper(Minute(5)) + expect = s.groupby(grouper).agg(lambda x: x[-1]) + result = s.resample('5Min').ohlc() + + assert len(result) == len(expect) + assert len(result.columns) == 4 + + xs = result.iloc[-2] + assert xs['open'] == s[-6] + assert xs['high'] == s[-6:-1].max() + assert xs['low'] == s[-6:-1].min() + assert xs['close'] == s[-2] + + xs = result.iloc[0] + assert xs['open'] == s[0] + assert xs['high'] == s[:5].max() + assert xs['low'] == s[:5].min() + assert xs['close'] == s[4] + + def test_resample_ohlc_result(self): + + # GH 12332 + index = pd.date_range('1-1-2000', '2-15-2000', freq='h') + index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h')) + s = Series(range(len(index)), index=index) + + a = s.loc[:'4-15-2000'].resample('30T').ohlc() + assert isinstance(a, DataFrame) + + b = s.loc[:'4-14-2000'].resample('30T').ohlc() + assert isinstance(b, DataFrame) + + # GH12348 + # raising on odd period + rng = date_range('2013-12-30', '2014-01-07') + index = rng.drop([Timestamp('2014-01-01'), + Timestamp('2013-12-31'), + Timestamp('2014-01-04'), + Timestamp('2014-01-05')]) + df = DataFrame(data=np.arange(len(index)), index=index) + result = df.resample('B').mean() + expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B')) + assert_frame_equal(result, expected) + + def test_resample_ohlc_dataframe(self): + df = ( + pd.DataFrame({ + 'PRICE': { + Timestamp('2011-01-06 10:59:05', tz=None): 24990, + Timestamp('2011-01-06 12:43:33', tz=None): 25499, + Timestamp('2011-01-06 12:54:09', tz=None): 25499}, + 'VOLUME': { + Timestamp('2011-01-06 10:59:05', tz=None): 1500000000, + Timestamp('2011-01-06 12:43:33', tz=None): 5000000000, + Timestamp('2011-01-06 12:54:09', tz=None): 100000000}}) + ).reindex_axis(['VOLUME', 'PRICE'], axis=1) + res = df.resample('H').ohlc() + exp = pd.concat([df['VOLUME'].resample('H').ohlc(), + df['PRICE'].resample('H').ohlc()], + axis=1, + keys=['VOLUME', 'PRICE']) + assert_frame_equal(exp, res) + + df.columns = [['a', 'b'], ['c', 'd']] + res = df.resample('H').ohlc() + exp.columns = pd.MultiIndex.from_tuples([ + ('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'), + ('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'), + ('b', 'd', 'low'), ('b', 'd', 'close')]) + assert_frame_equal(exp, res) + + # dupe columns fail atm + # df.columns = ['PRICE', 'PRICE'] + + def test_resample_dup_index(self): + + # GH 4812 + # dup columns with resample raising + df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000], + columns=[Period(year=2000, month=i + 1, freq='M') + for i in range(12)]) + df.iloc[3, :] = np.nan + result = df.resample('Q', axis=1).mean() + expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean() + expected.columns = [ + Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)] + assert_frame_equal(result, expected) + + def test_resample_reresample(self): + dti = DatetimeIndex(start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10), freq='D') + s = Series(np.random.rand(len(dti)), dti) + bs = s.resample('B', closed='right', label='right').mean() + result = bs.resample('8H').mean() + assert len(result) == 22 + assert isinstance(result.index.freq, offsets.DateOffset) + assert result.index.freq == offsets.Hour(8) + + def test_resample_timestamp_to_period(self): + ts = _simple_ts('1/1/1990', '1/1/2000') + + result = ts.resample('A-DEC', kind='period').mean() + expected = ts.resample('A-DEC').mean() + expected.index = period_range('1990', '2000', freq='a-dec') + assert_series_equal(result, expected) + + result = ts.resample('A-JUN', kind='period').mean() + expected = ts.resample('A-JUN').mean() + expected.index = period_range('1990', '2000', freq='a-jun') + assert_series_equal(result, expected) + + result = ts.resample('M', kind='period').mean() + expected = ts.resample('M').mean() + expected.index = period_range('1990-01', '2000-01', freq='M') + assert_series_equal(result, expected) + + result = ts.resample('M', kind='period').mean() + expected = ts.resample('M').mean() + expected.index = period_range('1990-01', '2000-01', freq='M') + assert_series_equal(result, expected) + + def test_ohlc_5min(self): + def _ohlc(group): + if isna(group).all(): + return np.repeat(np.nan, 4) + return [group[0], group.max(), group.min(), group[-1]] + + rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s') + ts = Series(np.random.randn(len(rng)), index=rng) + + resampled = ts.resample('5min', closed='right', + label='right').ohlc() + + assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all() + + exp = _ohlc(ts[1:31]) + assert (resampled.loc['1/1/2000 00:05'] == exp).all() + + exp = _ohlc(ts['1/1/2000 5:55:01':]) + assert (resampled.loc['1/1/2000 6:00:00'] == exp).all() + + def test_downsample_non_unique(self): + rng = date_range('1/1/2000', '2/29/2000') + rng2 = rng.repeat(5).values + ts = Series(np.random.randn(len(rng2)), index=rng2) + + result = ts.resample('M').mean() + + expected = ts.groupby(lambda x: x.month).mean() + assert len(result) == 2 + assert_almost_equal(result[0], expected[1]) + assert_almost_equal(result[1], expected[2]) + + def test_asfreq_non_unique(self): + # GH #1077 + rng = date_range('1/1/2000', '2/29/2000') + rng2 = rng.repeat(2).values + ts = Series(np.random.randn(len(rng2)), index=rng2) + + pytest.raises(Exception, ts.asfreq, 'B') + + def test_resample_axis1(self): + rng = date_range('1/1/2000', '2/29/2000') + df = DataFrame(np.random.randn(3, len(rng)), columns=rng, + index=['a', 'b', 'c']) + + result = df.resample('M', axis=1).mean() + expected = df.T.resample('M').mean().T + tm.assert_frame_equal(result, expected) + + def test_resample_panel(self): + rng = date_range('1/1/2000', '6/30/2000') + n = len(rng) + + with catch_warnings(record=True): + panel = Panel(np.random.randn(3, n, 5), + items=['one', 'two', 'three'], + major_axis=rng, + minor_axis=['a', 'b', 'c', 'd', 'e']) + + result = panel.resample('M', axis=1).mean() + + def p_apply(panel, f): + result = {} + for item in panel.items: + result[item] = f(panel[item]) + return Panel(result, items=panel.items) + + expected = p_apply(panel, lambda x: x.resample('M').mean()) + tm.assert_panel_equal(result, expected) + + panel2 = panel.swapaxes(1, 2) + result = panel2.resample('M', axis=2).mean() + expected = p_apply(panel2, + lambda x: x.resample('M', axis=1).mean()) + tm.assert_panel_equal(result, expected) + + def test_resample_panel_numpy(self): + rng = date_range('1/1/2000', '6/30/2000') + n = len(rng) + + with catch_warnings(record=True): + panel = Panel(np.random.randn(3, n, 5), + items=['one', 'two', 'three'], + major_axis=rng, + minor_axis=['a', 'b', 'c', 'd', 'e']) + + result = panel.resample('M', axis=1).apply(lambda x: x.mean(1)) + expected = panel.resample('M', axis=1).mean() + tm.assert_panel_equal(result, expected) + + panel = panel.swapaxes(1, 2) + result = panel.resample('M', axis=2).apply(lambda x: x.mean(2)) + expected = panel.resample('M', axis=2).mean() + tm.assert_panel_equal(result, expected) + + def test_resample_anchored_ticks(self): + # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should + # "anchor" the origin at midnight so we get regular intervals rather + # than starting from the first timestamp which might start in the + # middle of a desired interval + + rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s') + ts = Series(np.random.randn(len(rng)), index=rng) + ts[:2] = np.nan # so results are the same + + freqs = ['t', '5t', '15t', '30t', '4h', '12h'] + for freq in freqs: + result = ts[2:].resample(freq, closed='left', label='left').mean() + expected = ts.resample(freq, closed='left', label='left').mean() + assert_series_equal(result, expected) + + def test_resample_single_group(self): + mysum = lambda x: x.sum() + + rng = date_range('2000-1-1', '2000-2-10', freq='D') + ts = Series(np.random.randn(len(rng)), index=rng) + assert_series_equal(ts.resample('M').sum(), + ts.resample('M').apply(mysum)) + + rng = date_range('2000-1-1', '2000-1-10', freq='D') + ts = Series(np.random.randn(len(rng)), index=rng) + assert_series_equal(ts.resample('M').sum(), + ts.resample('M').apply(mysum)) + + # GH 3849 + s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'), + Timestamp('20070915 15:40:00')]) + expected = Series([0.75], index=[Timestamp('20070915')]) + result = s.resample('D').apply(lambda x: np.std(x)) + assert_series_equal(result, expected) + + def test_resample_base(self): + rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s') + ts = Series(np.random.randn(len(rng)), index=rng) + + resampled = ts.resample('5min', base=2).mean() + exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57', + freq='5min') + tm.assert_index_equal(resampled.index, exp_rng) + + def test_resample_base_with_timedeltaindex(self): + + # GH 10530 + rng = timedelta_range(start='0s', periods=25, freq='s') + ts = Series(np.random.randn(len(rng)), index=rng) + + with_base = ts.resample('2s', base=5).mean() + without_base = ts.resample('2s').mean() + + exp_without_base = timedelta_range(start='0s', end='25s', freq='2s') + exp_with_base = timedelta_range(start='5s', end='29s', freq='2s') + + tm.assert_index_equal(without_base.index, exp_without_base) + tm.assert_index_equal(with_base.index, exp_with_base) + + def test_resample_categorical_data_with_timedeltaindex(self): + # GH #12169 + df = DataFrame({'Group_obj': 'A'}, + index=pd.to_timedelta(list(range(20)), unit='s')) + df['Group'] = df['Group_obj'].astype('category') + result = df.resample('10s').agg(lambda x: (x.value_counts().index[0])) + expected = DataFrame({'Group_obj': ['A', 'A'], + 'Group': ['A', 'A']}, + index=pd.to_timedelta([0, 10], unit='s')) + expected = expected.reindex_axis(['Group_obj', 'Group'], 1) + tm.assert_frame_equal(result, expected) + + def test_resample_daily_anchored(self): + rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T') + ts = Series(np.random.randn(len(rng)), index=rng) + ts[:2] = np.nan # so results are the same + + result = ts[2:].resample('D', closed='left', label='left').mean() + expected = ts.resample('D', closed='left', label='left').mean() + assert_series_equal(result, expected) + + def test_resample_to_period_monthly_buglet(self): + # GH #1259 + + rng = date_range('1/1/2000', '12/31/2000') + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.resample('M', kind='period').mean() + exp_index = period_range('Jan-2000', 'Dec-2000', freq='M') + tm.assert_index_equal(result.index, exp_index) + + def test_period_with_agg(self): + + # aggregate a period resampler with a lambda + s2 = pd.Series(np.random.randint(0, 5, 50), + index=pd.period_range('2012-01-01', + freq='H', + periods=50), + dtype='float64') + + expected = s2.to_timestamp().resample('D').mean().to_period() + result = s2.resample('D').agg(lambda x: x.mean()) + assert_series_equal(result, expected) + + def test_resample_segfault(self): + # GH 8573 + # segfaulting in older versions + all_wins_and_wagers = [ + (1, datetime(2013, 10, 1, 16, 20), 1, 0), + (2, datetime(2013, 10, 1, 16, 10), 1, 0), + (2, datetime(2013, 10, 1, 18, 15), 1, 0), + (2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)] + + df = pd.DataFrame.from_records(all_wins_and_wagers, + columns=("ID", "timestamp", "A", "B") + ).set_index("timestamp") + result = df.groupby("ID").resample("5min").sum() + expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum()) + assert_frame_equal(result, expected) + + def test_resample_dtype_preservation(self): + + # GH 12202 + # validation tests for dtype preservation + + df = DataFrame({'date': pd.date_range(start='2016-01-01', + periods=4, freq='W'), + 'group': [1, 1, 2, 2], + 'val': Series([5, 6, 7, 8], + dtype='int32')} + ).set_index('date') + + result = df.resample('1D').ffill() + assert result.val.dtype == np.int32 + + result = df.groupby('group').resample('1D').ffill() + assert result.val.dtype == np.int32 + + def test_resample_dtype_coerceion(self): + + pytest.importorskip('scipy.interpolate') + + # GH 16361 + df = {"a": [1, 3, 1, 4]} + df = pd.DataFrame( + df, index=pd.date_range("2017-01-01", "2017-01-04")) + + expected = (df.astype("float64") + .resample("H") + .mean() + ["a"] + .interpolate("cubic") + ) + + result = df.resample("H")["a"].mean().interpolate("cubic") + tm.assert_series_equal(result, expected) + + result = df.resample("H").mean()["a"].interpolate("cubic") + tm.assert_series_equal(result, expected) + + def test_weekly_resample_buglet(self): + # #1327 + rng = date_range('1/1/2000', freq='B', periods=20) + ts = Series(np.random.randn(len(rng)), index=rng) + + resampled = ts.resample('W').mean() + expected = ts.resample('W-SUN').mean() + assert_series_equal(resampled, expected) + + def test_monthly_resample_error(self): + # #1451 + dates = date_range('4/16/2012 20:00', periods=5000, freq='h') + ts = Series(np.random.randn(len(dates)), index=dates) + # it works! + ts.resample('M') + + def test_nanosecond_resample_error(self): + # GH 12307 - Values falls after last bin when + # Resampling using pd.tseries.offsets.Nano as period + start = 1443707890427 + exp_start = 1443707890400 + indx = pd.date_range( + start=pd.to_datetime(start), + periods=10, + freq='100n' + ) + ts = pd.Series(range(len(indx)), index=indx) + r = ts.resample(pd.tseries.offsets.Nano(100)) + result = r.agg('mean') + + exp_indx = pd.date_range( + start=pd.to_datetime(exp_start), + periods=10, + freq='100n' + ) + exp = pd.Series(range(len(exp_indx)), index=exp_indx) + + assert_series_equal(result, exp) + + def test_resample_anchored_intraday(self): + # #1471, #1458 + + rng = date_range('1/1/2012', '4/1/2012', freq='100min') + df = DataFrame(rng.month, index=rng) + + result = df.resample('M').mean() + expected = df.resample( + 'M', kind='period').mean().to_timestamp(how='end') + tm.assert_frame_equal(result, expected) + + result = df.resample('M', closed='left').mean() + exp = df.tshift(1, freq='D').resample('M', kind='period').mean() + exp = exp.to_timestamp(how='end') + + tm.assert_frame_equal(result, exp) + + rng = date_range('1/1/2012', '4/1/2012', freq='100min') + df = DataFrame(rng.month, index=rng) + + result = df.resample('Q').mean() + expected = df.resample( + 'Q', kind='period').mean().to_timestamp(how='end') + tm.assert_frame_equal(result, expected) + + result = df.resample('Q', closed='left').mean() + expected = df.tshift(1, freq='D').resample('Q', kind='period', + closed='left').mean() + expected = expected.to_timestamp(how='end') + tm.assert_frame_equal(result, expected) + + ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h') + resampled = ts.resample('M').mean() + assert len(resampled) == 1 + + def test_resample_anchored_monthstart(self): + ts = _simple_ts('1/1/2000', '12/31/2002') + + freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN'] + + for freq in freqs: + ts.resample(freq).mean() + + def test_resample_anchored_multiday(self): + # When resampling a range spanning multiple days, ensure that the + # start date gets used to determine the offset. Fixes issue where + # a one day period is not a multiple of the frequency. + # + # See: https://github.com/pandas-dev/pandas/issues/8683 + + index = pd.date_range( + '2014-10-14 23:06:23.206', periods=3, freq='400L' + ) | pd.date_range( + '2014-10-15 23:00:00', periods=2, freq='2200L') + + s = pd.Series(np.random.randn(5), index=index) + + # Ensure left closing works + result = s.resample('2200L').mean() + assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:02.000') + + # Ensure right closing works + result = s.resample('2200L', label='right').mean() + assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:04.200') + + def test_corner_cases(self): + # miscellaneous test coverage + + rng = date_range('1/1/2000', periods=12, freq='t') + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.resample('5t', closed='right', label='left').mean() + ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t') + tm.assert_index_equal(result.index, ex_index) + + len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0] + # it works + result = len0pts.resample('A-DEC').mean() + assert len(result) == 0 + + # resample to periods + ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h') + result = ts.resample('M', kind='period').mean() + assert len(result) == 1 + assert result.index[0] == Period('2000-04', freq='M') + + def test_anchored_lowercase_buglet(self): + dates = date_range('4/16/2012 20:00', periods=50000, freq='s') + ts = Series(np.random.randn(len(dates)), index=dates) + # it works! + ts.resample('d').mean() + + def test_upsample_apply_functions(self): + # #1596 + rng = pd.date_range('2012-06-12', periods=4, freq='h') + + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.resample('20min').aggregate(['mean', 'sum']) + assert isinstance(result, DataFrame) + + def test_resample_not_monotonic(self): + rng = pd.date_range('2012-06-12', periods=200, freq='h') + ts = Series(np.random.randn(len(rng)), index=rng) + + ts = ts.take(np.random.permutation(len(ts))) + + result = ts.resample('D').sum() + exp = ts.sort_index().resample('D').sum() + assert_series_equal(result, exp) + + def test_resample_median_bug_1688(self): + + for dtype in ['int64', 'int32', 'float64', 'float32']: + df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), + datetime(2012, 1, 1, 0, 5, 0)], + dtype=dtype) + + result = df.resample("T").apply(lambda x: x.mean()) + exp = df.asfreq('T') + tm.assert_frame_equal(result, exp) + + result = df.resample("T").median() + exp = df.asfreq('T') + tm.assert_frame_equal(result, exp) + + def test_how_lambda_functions(self): + + ts = _simple_ts('1/1/2000', '4/1/2000') + + result = ts.resample('M').apply(lambda x: x.mean()) + exp = ts.resample('M').mean() + tm.assert_series_equal(result, exp) + + foo_exp = ts.resample('M').mean() + foo_exp.name = 'foo' + bar_exp = ts.resample('M').std() + bar_exp.name = 'bar' + + result = ts.resample('M').apply( + [lambda x: x.mean(), lambda x: x.std(ddof=1)]) + result.columns = ['foo', 'bar'] + tm.assert_series_equal(result['foo'], foo_exp) + tm.assert_series_equal(result['bar'], bar_exp) + + # this is a MI Series, so comparing the names of the results + # doesn't make sense + result = ts.resample('M').aggregate({'foo': lambda x: x.mean(), + 'bar': lambda x: x.std(ddof=1)}) + tm.assert_series_equal(result['foo'], foo_exp, check_names=False) + tm.assert_series_equal(result['bar'], bar_exp, check_names=False) + + def test_resample_unequal_times(self): + # #1772 + start = datetime(1999, 3, 1, 5) + # end hour is less than start + end = datetime(2012, 7, 31, 4) + bad_ind = date_range(start, end, freq="30min") + df = DataFrame({'close': 1}, index=bad_ind) + + # it works! + df.resample('AS').sum() + + def test_resample_consistency(self): + + # GH 6418 + # resample with bfill / limit / reindex consistency + + i30 = pd.date_range('2002-02-02', periods=4, freq='30T') + s = pd.Series(np.arange(4.), index=i30) + s[2] = np.NaN + + # Upsample by factor 3 with reindex() and resample() methods: + i10 = pd.date_range(i30[0], i30[-1], freq='10T') + + s10 = s.reindex(index=i10, method='bfill') + s10_2 = s.reindex(index=i10, method='bfill', limit=2) + rl = s.reindex_like(s10, method='bfill', limit=2) + r10_2 = s.resample('10Min').bfill(limit=2) + r10 = s.resample('10Min').bfill() + + # s10_2, r10, r10_2, rl should all be equal + assert_series_equal(s10_2, r10) + assert_series_equal(s10_2, r10_2) + assert_series_equal(s10_2, rl) + + def test_resample_timegrouper(self): + # GH 7227 + dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3), + datetime(2014, 11, 5), datetime(2014, 9, 5), + datetime(2014, 10, 8), datetime(2014, 7, 15)] + + dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:] + dates3 = [pd.NaT] + dates1 + [pd.NaT] + + for dates in [dates1, dates2, dates3]: + df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) + result = df.set_index('A').resample('M').count() + exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', + '2014-09-30', + '2014-10-31', '2014-11-30'], + freq='M', name='A') + expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx) + assert_frame_equal(result, expected) + + result = df.groupby(pd.Grouper(freq='M', key='A')).count() + assert_frame_equal(result, expected) + + df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange( + len(dates)))) + result = df.set_index('A').resample('M').count() + expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]}, + index=exp_idx, columns=['B', 'C']) + assert_frame_equal(result, expected) + + result = df.groupby(pd.Grouper(freq='M', key='A')).count() + assert_frame_equal(result, expected) + + def test_resample_nunique(self): + + # GH 12352 + df = DataFrame({ + 'ID': {pd.Timestamp('2015-06-05 00:00:00'): '0010100903', + pd.Timestamp('2015-06-08 00:00:00'): '0010150847'}, + 'DATE': {pd.Timestamp('2015-06-05 00:00:00'): '2015-06-05', + pd.Timestamp('2015-06-08 00:00:00'): '2015-06-08'}}) + r = df.resample('D') + g = df.groupby(pd.Grouper(freq='D')) + expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x: + x.nunique()) + assert expected.name == 'ID' + + for t in [r, g]: + result = r.ID.nunique() + assert_series_equal(result, expected) + + result = df.ID.resample('D').nunique() + assert_series_equal(result, expected) + + result = df.ID.groupby(pd.Grouper(freq='D')).nunique() + assert_series_equal(result, expected) + + def test_resample_nunique_with_date_gap(self): + # GH 13453 + index = pd.date_range('1-1-2000', '2-15-2000', freq='h') + index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h') + index3 = index.append(index2) + s = pd.Series(range(len(index3)), index=index3, dtype='int64') + r = s.resample('M') + + # Since all elements are unique, these should all be the same + results = [ + r.count(), + r.nunique(), + r.agg(pd.Series.nunique), + r.agg('nunique') + ] + + assert_series_equal(results[0], results[1]) + assert_series_equal(results[0], results[2]) + assert_series_equal(results[0], results[3]) + + def test_resample_group_info(self): # GH10914 + for n, k in product((10000, 100000), (10, 100, 1000)): + dr = date_range(start='2015-08-27', periods=n // 10, freq='T') + ts = Series(np.random.randint(0, n // k, n).astype('int64'), + index=np.random.choice(dr, n)) + + left = ts.resample('30T').nunique() + ix = date_range(start=ts.index.min(), end=ts.index.max(), + freq='30T') + + vals = ts.values + bins = np.searchsorted(ix.values, ts.index, side='right') + + sorter = np.lexsort((vals, bins)) + vals, bins = vals[sorter], bins[sorter] + + mask = np.r_[True, vals[1:] != vals[:-1]] + mask |= np.r_[True, bins[1:] != bins[:-1]] + + arr = np.bincount(bins[mask] - 1, + minlength=len(ix)).astype('int64', copy=False) + right = Series(arr, index=ix) + + assert_series_equal(left, right) + + def test_resample_size(self): + n = 10000 + dr = date_range('2015-09-19', periods=n, freq='T') + ts = Series(np.random.randn(n), index=np.random.choice(dr, n)) + + left = ts.resample('7T').size() + ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T') + + bins = np.searchsorted(ix.values, ts.index.values, side='right') + val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64', + copy=False) + + right = Series(val, index=ix) + assert_series_equal(left, right) + + def test_resample_across_dst(self): + # The test resamples a DatetimeIndex with values before and after a + # DST change + # Issue: 14682 + + # The DatetimeIndex we will start with + # (note that DST happens at 03:00+02:00 -> 02:00+01:00) + # 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00 + df1 = DataFrame([1477786980, 1477790580], columns=['ts']) + dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s') + .dt.tz_localize('UTC') + .dt.tz_convert('Europe/Madrid')) + + # The expected DatetimeIndex after resampling. + # 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00 + df2 = DataFrame([1477785600, 1477789200], columns=['ts']) + dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s') + .dt.tz_localize('UTC') + .dt.tz_convert('Europe/Madrid')) + df = DataFrame([5, 5], index=dti1) + + result = df.resample(rule='H').sum() + expected = DataFrame([5, 5], index=dti2) + + assert_frame_equal(result, expected) + + def test_resample_dst_anchor(self): + # 5172 + dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern') + df = DataFrame([5], index=dti) + assert_frame_equal(df.resample(rule='D').sum(), + DataFrame([5], index=df.index.normalize())) + df.resample(rule='MS').sum() + assert_frame_equal( + df.resample(rule='MS').sum(), + DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)], + tz='US/Eastern'))) + + dti = date_range('2013-09-30', '2013-11-02', freq='30Min', + tz='Europe/Paris') + values = range(dti.size) + df = DataFrame({"a": values, + "b": values, + "c": values}, index=dti, dtype='int64') + how = {"a": "min", "b": "max", "c": "count"} + + assert_frame_equal( + df.resample("W-MON").agg(how)[["a", "b", "c"]], + DataFrame({"a": [0, 48, 384, 720, 1056, 1394], + "b": [47, 383, 719, 1055, 1393, 1586], + "c": [48, 336, 336, 336, 338, 193]}, + index=date_range('9/30/2013', '11/4/2013', + freq='W-MON', tz='Europe/Paris')), + 'W-MON Frequency') + + assert_frame_equal( + df.resample("2W-MON").agg(how)[["a", "b", "c"]], + DataFrame({"a": [0, 48, 720, 1394], + "b": [47, 719, 1393, 1586], + "c": [48, 672, 674, 193]}, + index=date_range('9/30/2013', '11/11/2013', + freq='2W-MON', tz='Europe/Paris')), + '2W-MON Frequency') + + assert_frame_equal( + df.resample("MS").agg(how)[["a", "b", "c"]], + DataFrame({"a": [0, 48, 1538], + "b": [47, 1537, 1586], + "c": [48, 1490, 49]}, + index=date_range('9/1/2013', '11/1/2013', + freq='MS', tz='Europe/Paris')), + 'MS Frequency') + + assert_frame_equal( + df.resample("2MS").agg(how)[["a", "b", "c"]], + DataFrame({"a": [0, 1538], + "b": [1537, 1586], + "c": [1538, 49]}, + index=date_range('9/1/2013', '11/1/2013', + freq='2MS', tz='Europe/Paris')), + '2MS Frequency') + + df_daily = df['10/26/2013':'10/29/2013'] + assert_frame_equal( + df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"}) + [["a", "b", "c"]], + DataFrame({"a": [1248, 1296, 1346, 1394], + "b": [1295, 1345, 1393, 1441], + "c": [48, 50, 48, 48]}, + index=date_range('10/26/2013', '10/29/2013', + freq='D', tz='Europe/Paris')), + 'D Frequency') + + def test_resample_with_nat(self): + # GH 13020 + index = DatetimeIndex([pd.NaT, + '1970-01-01 00:00:00', + pd.NaT, + '1970-01-01 00:00:01', + '1970-01-01 00:00:02']) + frame = DataFrame([2, 3, 5, 7, 11], index=index) + + index_1s = DatetimeIndex(['1970-01-01 00:00:00', + '1970-01-01 00:00:01', + '1970-01-01 00:00:02']) + frame_1s = DataFrame([3, 7, 11], index=index_1s) + assert_frame_equal(frame.resample('1s').mean(), frame_1s) + + index_2s = DatetimeIndex(['1970-01-01 00:00:00', + '1970-01-01 00:00:02']) + frame_2s = DataFrame([5, 11], index=index_2s) + assert_frame_equal(frame.resample('2s').mean(), frame_2s) + + index_3s = DatetimeIndex(['1970-01-01 00:00:00']) + frame_3s = DataFrame([7], index=index_3s) + assert_frame_equal(frame.resample('3s').mean(), frame_3s) + + assert_frame_equal(frame.resample('60s').mean(), frame_3s) + + def test_resample_timedelta_values(self): + # GH 13119 + # check that timedelta dtype is preserved when NaT values are + # introduced by the resampling + + times = timedelta_range('1 day', '4 day', freq='4D') + df = DataFrame({'time': times}, index=times) + + times2 = timedelta_range('1 day', '4 day', freq='2D') + exp = Series(times2, index=times2, name='time') + exp.iloc[1] = pd.NaT + + res = df.resample('2D').first()['time'] + tm.assert_series_equal(res, exp) + res = df['time'].resample('2D').first() + tm.assert_series_equal(res, exp) + + def test_resample_datetime_values(self): + # GH 13119 + # check that datetime dtype is preserved when NaT values are + # introduced by the resampling + + dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)] + df = DataFrame({'timestamp': dates}, index=dates) + + exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)], + index=date_range('2016-01-15', periods=3, freq='2D'), + name='timestamp') + + res = df.resample('2D').first()['timestamp'] + tm.assert_series_equal(res, exp) + res = df['timestamp'].resample('2D').first() + tm.assert_series_equal(res, exp) diff --git a/pandas/tests/resample/test_period.py b/pandas/tests/resample/test_period.py new file mode 100644 index 0000000000000..1332739263d8e --- /dev/null +++ b/pandas/tests/resample/test_period.py @@ -0,0 +1,677 @@ +# pylint: disable=E1101 + +from datetime import datetime, timedelta + +import dateutil +import numpy as np +import pytest +import pytz + +import pandas as pd +import pandas.util.testing as tm +from pandas import DataFrame, Series, Timestamp +from pandas.compat import lrange, product, range, zip +from pandas.core.indexes.datetimes import date_range +from pandas.core.indexes.period import period_range, PeriodIndex, Period +from pandas.core.resample import DatetimeIndex +from pandas.tseries.frequencies import DAYS, MONTHS +from pandas.util.testing import (assert_almost_equal, assert_frame_equal, + assert_series_equal) +from pandas._libs.period import IncompatibleFrequency + +from pandas.tests.resample.base import Base +from pandas.tests.resample.common import resample_methods, _simple_pts + + +class TestPeriodIndex(Base): + _index_factory = lambda x: period_range + + @pytest.fixture + def _series_name(self): + return 'pi' + + def create_series(self): + # TODO: replace calls to .create_series() by injecting the series + # fixture + i = period_range(datetime(2005, 1, 1), + datetime(2005, 1, 10), freq='D') + + return Series(np.arange(len(i)), index=i, name='pi') + + @pytest.mark.parametrize('freq', ['2D', '1H', '2H']) + @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) + def test_asfreq(self, series_and_frame, freq, kind): + # GH 12884, 15944 + # make sure .asfreq() returns PeriodIndex (except kind='timestamp') + + obj = series_and_frame + if kind == 'timestamp': + expected = obj.to_timestamp().resample(freq).asfreq() + else: + start = obj.index[0].to_timestamp(how='start') + end = (obj.index[-1] + 1).to_timestamp(how='start') + new_index = date_range(start=start, end=end, freq=freq, + closed='left') + expected = obj.to_timestamp().reindex(new_index).to_period(freq) + result = obj.resample(freq, kind=kind).asfreq() + assert_almost_equal(result, expected) + + def test_asfreq_fill_value(self): + # test for fill value during resampling, issue 3715 + + s = self.create_series() + new_index = date_range(s.index[0].to_timestamp(how='start'), + (s.index[-1]).to_timestamp(how='start'), + freq='1H') + expected = s.to_timestamp().reindex(new_index, fill_value=4.0) + result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0) + assert_series_equal(result, expected) + + frame = s.to_frame('value') + new_index = date_range(frame.index[0].to_timestamp(how='start'), + (frame.index[-1]).to_timestamp(how='start'), + freq='1H') + expected = frame.to_timestamp().reindex(new_index, fill_value=3.0) + result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W']) + @pytest.mark.parametrize('kind', [None, 'period', 'timestamp']) + def test_selection(self, index, freq, kind): + # This is a bug, these should be implemented + # GH 14008 + df = pd.DataFrame({'date': index, + 'a': np.arange(len(index), dtype=np.int64)}, + index=pd.MultiIndex.from_arrays([ + np.arange(len(index), dtype=np.int64), + index], names=['v', 'd'])) + with pytest.raises(NotImplementedError): + df.resample(freq, on='date', kind=kind) + with pytest.raises(NotImplementedError): + df.resample(freq, level='d', kind=kind) + + def test_annual_upsample_D_s_f(self): + self._check_annual_upsample_cases('D', 'start', 'ffill') + + def test_annual_upsample_D_e_f(self): + self._check_annual_upsample_cases('D', 'end', 'ffill') + + def test_annual_upsample_D_s_b(self): + self._check_annual_upsample_cases('D', 'start', 'bfill') + + def test_annual_upsample_D_e_b(self): + self._check_annual_upsample_cases('D', 'end', 'bfill') + + def test_annual_upsample_B_s_f(self): + self._check_annual_upsample_cases('B', 'start', 'ffill') + + def test_annual_upsample_B_e_f(self): + self._check_annual_upsample_cases('B', 'end', 'ffill') + + def test_annual_upsample_B_s_b(self): + self._check_annual_upsample_cases('B', 'start', 'bfill') + + def test_annual_upsample_B_e_b(self): + self._check_annual_upsample_cases('B', 'end', 'bfill') + + def test_annual_upsample_M_s_f(self): + self._check_annual_upsample_cases('M', 'start', 'ffill') + + def test_annual_upsample_M_e_f(self): + self._check_annual_upsample_cases('M', 'end', 'ffill') + + def test_annual_upsample_M_s_b(self): + self._check_annual_upsample_cases('M', 'start', 'bfill') + + def test_annual_upsample_M_e_b(self): + self._check_annual_upsample_cases('M', 'end', 'bfill') + + def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'): + for month in MONTHS: + ts = _simple_pts('1/1/1990', end, freq='A-%s' % month) + + result = getattr(ts.resample(targ, convention=conv), meth)() + expected = result.to_timestamp(targ, how=conv) + expected = expected.asfreq(targ, meth).to_period() + assert_series_equal(result, expected) + + def test_basic_downsample(self): + ts = _simple_pts('1/1/1990', '6/30/1995', freq='M') + result = ts.resample('a-dec').mean() + + expected = ts.groupby(ts.index.year).mean() + expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec') + assert_series_equal(result, expected) + + # this is ok + assert_series_equal(ts.resample('a-dec').mean(), result) + assert_series_equal(ts.resample('a').mean(), result) + + def test_not_subperiod(self): + # These are incompatible period rules for resampling + ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed') + pytest.raises(ValueError, lambda: ts.resample('a-dec').mean()) + pytest.raises(ValueError, lambda: ts.resample('q-mar').mean()) + pytest.raises(ValueError, lambda: ts.resample('M').mean()) + pytest.raises(ValueError, lambda: ts.resample('w-thu').mean()) + + @pytest.mark.parametrize('freq', ['D', '2D']) + def test_basic_upsample(self, freq): + ts = _simple_pts('1/1/1990', '6/30/1995', freq='M') + result = ts.resample('a-dec').mean() + + resampled = result.resample(freq, convention='end').ffill() + expected = result.to_timestamp(freq, how='end') + expected = expected.asfreq(freq, 'ffill').to_period(freq) + assert_series_equal(resampled, expected) + + def test_upsample_with_limit(self): + rng = period_range('1/1/2000', periods=5, freq='A') + ts = Series(np.random.randn(len(rng)), rng) + + result = ts.resample('M', convention='end').ffill(limit=2) + expected = ts.asfreq('M').reindex(result.index, method='ffill', + limit=2) + assert_series_equal(result, expected) + + def test_annual_upsample(self): + ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC') + df = DataFrame({'a': ts}) + rdf = df.resample('D').ffill() + exp = df['a'].resample('D').ffill() + assert_series_equal(rdf['a'], exp) + + rng = period_range('2000', '2003', freq='A-DEC') + ts = Series([1, 2, 3, 4], index=rng) + + result = ts.resample('M').ffill() + ex_index = period_range('2000-01', '2003-12', freq='M') + + expected = ts.asfreq('M', how='start').reindex(ex_index, + method='ffill') + assert_series_equal(result, expected) + + def test_quarterly_upsample(self): + targets = ['D', 'B', 'M'] + + for month in MONTHS: + ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month) + + for targ, conv in product(targets, ['start', 'end']): + result = ts.resample(targ, convention=conv).ffill() + expected = result.to_timestamp(targ, how=conv) + expected = expected.asfreq(targ, 'ffill').to_period() + assert_series_equal(result, expected) + + def test_monthly_upsample(self): + targets = ['D', 'B'] + + ts = _simple_pts('1/1/1990', '12/31/1995', freq='M') + + for targ, conv in product(targets, ['start', 'end']): + result = ts.resample(targ, convention=conv).ffill() + expected = result.to_timestamp(targ, how=conv) + expected = expected.asfreq(targ, 'ffill').to_period() + assert_series_equal(result, expected) + + def test_resample_basic(self): + # GH3609 + s = Series(range(100), index=date_range( + '20130101', freq='s', periods=100, name='idx'), dtype='float') + s[10:30] = np.nan + index = PeriodIndex([ + Period('2013-01-01 00:00', 'T'), + Period('2013-01-01 00:01', 'T')], name='idx') + expected = Series([34.5, 79.5], index=index) + result = s.to_period().resample('T', kind='period').mean() + assert_series_equal(result, expected) + result2 = s.resample('T', kind='period').mean() + assert_series_equal(result2, expected) + + @pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]), + ('2M', [31 + 29, 31 + 9])]) + def test_resample_count(self, freq, expected_vals): + # GH12774 + series = pd.Series(1, index=pd.period_range(start='2000', periods=100)) + result = series.resample(freq).count() + expected_index = pd.period_range(start='2000', freq=freq, + periods=len(expected_vals)) + expected = pd.Series(expected_vals, index=expected_index) + assert_series_equal(result, expected) + + def test_resample_same_freq(self): + + # GH12770 + series = pd.Series(range(3), index=pd.period_range( + start='2000', periods=3, freq='M')) + expected = series + + for method in resample_methods: + result = getattr(series.resample('M'), method)() + assert_series_equal(result, expected) + + def test_resample_incompat_freq(self): + + with pytest.raises(IncompatibleFrequency): + pd.Series(range(3), index=pd.period_range( + start='2000', periods=3, freq='M')).resample('W').mean() + + def test_with_local_timezone_pytz(self): + # see gh-5430 + local_timezone = pytz.timezone('America/Los_Angeles') + + start = datetime(year=2013, month=11, day=1, hour=0, minute=0, + tzinfo=pytz.utc) + # 1 day later + end = datetime(year=2013, month=11, day=2, hour=0, minute=0, + tzinfo=pytz.utc) + + index = pd.date_range(start, end, freq='H') + + series = pd.Series(1, index=index) + series = series.tz_convert(local_timezone) + result = series.resample('D', kind='period').mean() + + # Create the expected series + # Index is moved back a day with the timezone conversion from UTC to + # Pacific + expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) + expected = pd.Series(1, index=expected_index) + assert_series_equal(result, expected) + + def test_with_local_timezone_dateutil(self): + # see gh-5430 + local_timezone = 'dateutil/America/Los_Angeles' + + start = datetime(year=2013, month=11, day=1, hour=0, minute=0, + tzinfo=dateutil.tz.tzutc()) + # 1 day later + end = datetime(year=2013, month=11, day=2, hour=0, minute=0, + tzinfo=dateutil.tz.tzutc()) + + index = pd.date_range(start, end, freq='H', name='idx') + + series = pd.Series(1, index=index) + series = series.tz_convert(local_timezone) + result = series.resample('D', kind='period').mean() + + # Create the expected series + # Index is moved back a day with the timezone conversion from UTC to + # Pacific + expected_index = (pd.period_range(start=start, end=end, freq='D', + name='idx') - 1) + expected = pd.Series(1, index=expected_index) + assert_series_equal(result, expected) + + def test_fill_method_and_how_upsample(self): + # GH2073 + s = Series(np.arange(9, dtype='int64'), + index=date_range('2010-01-01', periods=9, freq='Q')) + last = s.resample('M').ffill() + both = s.resample('M').ffill().resample('M').last().astype('int64') + assert_series_equal(last, both) + + def test_weekly_upsample(self): + targets = ['D', 'B'] + + for day in DAYS: + ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day) + + for targ, conv in product(targets, ['start', 'end']): + result = ts.resample(targ, convention=conv).ffill() + expected = result.to_timestamp(targ, how=conv) + expected = expected.asfreq(targ, 'ffill').to_period() + assert_series_equal(result, expected) + + def test_resample_to_timestamps(self): + ts = _simple_pts('1/1/1990', '12/31/1995', freq='M') + + result = ts.resample('A-DEC', kind='timestamp').mean() + expected = ts.to_timestamp(how='end').resample('A-DEC').mean() + assert_series_equal(result, expected) + + def test_resample_to_quarterly(self): + for month in MONTHS: + ts = _simple_pts('1990', '1992', freq='A-%s' % month) + quar_ts = ts.resample('Q-%s' % month).ffill() + + stamps = ts.to_timestamp('D', how='start') + qdates = period_range(ts.index[0].asfreq('D', 'start'), + ts.index[-1].asfreq('D', 'end'), + freq='Q-%s' % month) + + expected = stamps.reindex(qdates.to_timestamp('D', 's'), + method='ffill') + expected.index = qdates + + assert_series_equal(quar_ts, expected) + + # conforms, but different month + ts = _simple_pts('1990', '1992', freq='A-JUN') + + for how in ['start', 'end']: + result = ts.resample('Q-MAR', convention=how).ffill() + expected = ts.asfreq('Q-MAR', how=how) + expected = expected.reindex(result.index, method='ffill') + + # .to_timestamp('D') + # expected = expected.resample('Q-MAR').ffill() + + assert_series_equal(result, expected) + + def test_resample_fill_missing(self): + rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A') + + s = Series(np.random.randn(4), index=rng) + + stamps = s.to_timestamp() + filled = s.resample('A').ffill() + expected = stamps.resample('A').ffill().to_period('A') + assert_series_equal(filled, expected) + + def test_cant_fill_missing_dups(self): + rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A') + s = Series(np.random.randn(5), index=rng) + pytest.raises(Exception, lambda: s.resample('A').ffill()) + + @pytest.mark.parametrize('freq', ['5min']) + @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) + def test_resample_5minute(self, freq, kind): + rng = period_range('1/1/2000', '1/5/2000', freq='T') + ts = Series(np.random.randn(len(rng)), index=rng) + expected = ts.to_timestamp().resample(freq).mean() + if kind != 'timestamp': + expected = expected.to_period(freq) + result = ts.resample(freq, kind=kind).mean() + assert_series_equal(result, expected) + + def test_upsample_daily_business_daily(self): + ts = _simple_pts('1/1/2000', '2/1/2000', freq='B') + + result = ts.resample('D').asfreq() + expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000')) + assert_series_equal(result, expected) + + ts = _simple_pts('1/1/2000', '2/1/2000') + result = ts.resample('H', convention='s').asfreq() + exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H') + expected = ts.asfreq('H', how='s').reindex(exp_rng) + assert_series_equal(result, expected) + + def test_resample_irregular_sparse(self): + dr = date_range(start='1/1/2012', freq='5min', periods=1000) + s = Series(np.array(100), index=dr) + # subset the data. + subset = s[:'2012-01-04 06:55'] + + result = subset.resample('10min').apply(len) + expected = s.resample('10min').apply(len).loc[result.index] + assert_series_equal(result, expected) + + def test_resample_weekly_all_na(self): + rng = date_range('1/1/2000', periods=10, freq='W-WED') + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.resample('W-THU').asfreq() + + assert result.isna().all() + + result = ts.resample('W-THU').asfreq().ffill()[:-1] + expected = ts.asfreq('W-THU').ffill() + assert_series_equal(result, expected) + + def test_resample_tz_localized(self): + dr = date_range(start='2012-4-13', end='2012-5-1') + ts = Series(lrange(len(dr)), dr) + + ts_utc = ts.tz_localize('UTC') + ts_local = ts_utc.tz_convert('America/Los_Angeles') + + result = ts_local.resample('W').mean() + + ts_local_naive = ts_local.copy() + ts_local_naive.index = [x.replace(tzinfo=None) + for x in ts_local_naive.index.to_pydatetime()] + + exp = ts_local_naive.resample( + 'W').mean().tz_localize('America/Los_Angeles') + + assert_series_equal(result, exp) + + # it works + result = ts_local.resample('D').mean() + + # #2245 + idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T', + tz='Australia/Sydney') + s = Series([1, 2], index=idx) + + result = s.resample('D', closed='right', label='right').mean() + ex_index = date_range('2001-09-21', periods=1, freq='D', + tz='Australia/Sydney') + expected = Series([1.5], index=ex_index) + + assert_series_equal(result, expected) + + # for good measure + result = s.resample('D', kind='period').mean() + ex_index = period_range('2001-09-20', periods=1, freq='D') + expected = Series([1.5], index=ex_index) + assert_series_equal(result, expected) + + # GH 6397 + # comparing an offset that doesn't propagate tz's + rng = date_range('1/1/2011', periods=20000, freq='H') + rng = rng.tz_localize('EST') + ts = DataFrame(index=rng) + ts['first'] = np.random.randn(len(rng)) + ts['second'] = np.cumsum(np.random.randn(len(rng))) + expected = DataFrame( + { + 'first': ts.resample('A').sum()['first'], + 'second': ts.resample('A').mean()['second']}, + columns=['first', 'second']) + result = ts.resample( + 'A').agg({'first': np.sum, + 'second': np.mean}).reindex(columns=['first', 'second']) + assert_frame_equal(result, expected) + + def test_closed_left_corner(self): + # #1465 + s = Series(np.random.randn(21), + index=date_range(start='1/1/2012 9:30', + freq='1min', periods=21)) + s[0] = np.nan + + result = s.resample('10min', closed='left', label='right').mean() + exp = s[1:].resample('10min', closed='left', label='right').mean() + assert_series_equal(result, exp) + + result = s.resample('10min', closed='left', label='left').mean() + exp = s[1:].resample('10min', closed='left', label='left').mean() + + ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3) + + tm.assert_index_equal(result.index, ex_index) + assert_series_equal(result, exp) + + def test_quarterly_resampling(self): + rng = period_range('2000Q1', periods=10, freq='Q-DEC') + ts = Series(np.arange(10), index=rng) + + result = ts.resample('A').mean() + exp = ts.to_timestamp().resample('A').mean().to_period() + assert_series_equal(result, exp) + + def test_resample_weekly_bug_1726(self): + # 8/6/12 is a Monday + ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D") + n = len(ind) + data = [[x] * 5 for x in range(n)] + df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'], + index=ind) + + # it works! + df.resample('W-MON', closed='left', label='left').first() + + def test_resample_bms_2752(self): + # GH2753 + foo = pd.Series(index=pd.bdate_range('20000101', '20000201')) + res1 = foo.resample("BMS").mean() + res2 = foo.resample("BMS").mean().resample("B").mean() + assert res1.index[0] == Timestamp('20000103') + assert res1.index[0] == res2.index[0] + + # def test_monthly_convention_span(self): + # rng = period_range('2000-01', periods=3, freq='M') + # ts = Series(np.arange(3), index=rng) + + # # hacky way to get same thing + # exp_index = period_range('2000-01-01', '2000-03-31', freq='D') + # expected = ts.asfreq('D', how='end').reindex(exp_index) + # expected = expected.fillna(method='bfill') + + # result = ts.resample('D', convention='span').mean() + + # assert_series_equal(result, expected) + + def test_default_right_closed_label(self): + end_freq = ['D', 'Q', 'M', 'D'] + end_types = ['M', 'A', 'Q', 'W'] + + for from_freq, to_freq in zip(end_freq, end_types): + idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq) + df = DataFrame(np.random.randn(len(idx), 2), idx) + + resampled = df.resample(to_freq).mean() + assert_frame_equal(resampled, df.resample(to_freq, closed='right', + label='right').mean()) + + def test_default_left_closed_label(self): + others = ['MS', 'AS', 'QS', 'D', 'H'] + others_freq = ['D', 'Q', 'M', 'H', 'T'] + + for from_freq, to_freq in zip(others_freq, others): + idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq) + df = DataFrame(np.random.randn(len(idx), 2), idx) + + resampled = df.resample(to_freq).mean() + assert_frame_equal(resampled, df.resample(to_freq, closed='left', + label='left').mean()) + + def test_all_values_single_bin(self): + # 2070 + index = period_range(start="2012-01-01", end="2012-12-31", freq="M") + s = Series(np.random.randn(len(index)), index=index) + + result = s.resample("A").mean() + tm.assert_almost_equal(result[0], s.mean()) + + def test_evenly_divisible_with_no_extra_bins(self): + # 4076 + # when the frequency is evenly divisible, sometimes extra bins + + df = DataFrame(np.random.randn(9, 3), + index=date_range('2000-1-1', periods=9)) + result = df.resample('5D').mean() + expected = pd.concat( + [df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T + expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')] + assert_frame_equal(result, expected) + + index = date_range(start='2001-5-4', periods=28) + df = DataFrame( + [{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90, + 'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 + + [{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10, + 'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28, + index=index.append(index)).sort_index() + + index = date_range('2001-5-4', periods=4, freq='7D') + expected = DataFrame( + [{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14, + 'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4, + index=index) + result = df.resample('7D').count() + assert_frame_equal(result, expected) + + expected = DataFrame( + [{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700, + 'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4, + index=index) + result = df.resample('7D').sum() + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) + @pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']]) + def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg): + # make sure passing loffset returns DatetimeIndex in all cases + # basic method taken from Base.test_resample_loffset_arg_type() + df = frame + expected_means = [df.values[i:i + 2].mean() + for i in range(0, len(df.values), 2)] + expected_index = self.create_index(df.index[0], + periods=len(df.index) / 2, + freq='2D') + + # loffset coerces PeriodIndex to DateTimeIndex + expected_index = expected_index.to_timestamp() + expected_index += timedelta(hours=2) + expected = DataFrame({'value': expected_means}, index=expected_index) + + result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result_how = df.resample('2D', how=agg_arg, loffset='2H', + kind=kind) + if isinstance(agg_arg, list): + expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')]) + assert_frame_equal(result_agg, expected) + assert_frame_equal(result_how, expected) + + @pytest.mark.parametrize('freq, period_mult', [('H', 24), ('12H', 2)]) + @pytest.mark.parametrize('kind', [None, 'period']) + def test_upsampling_ohlc(self, freq, period_mult, kind): + # GH 13083 + pi = PeriodIndex(start='2000', freq='D', periods=10) + s = Series(range(len(pi)), index=pi) + expected = s.to_timestamp().resample(freq).ohlc().to_period(freq) + + # timestamp-based resampling doesn't include all sub-periods + # of the last original period, so extend accordingly: + new_index = PeriodIndex(start='2000', freq=freq, + periods=period_mult * len(pi)) + expected = expected.reindex(new_index) + result = s.resample(freq, kind=kind).ohlc() + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('periods, values', + [([pd.NaT, '1970-01-01 00:00:00', pd.NaT, + '1970-01-01 00:00:02', '1970-01-01 00:00:03'], + [2, 3, 5, 7, 11]), + ([pd.NaT, pd.NaT, '1970-01-01 00:00:00', pd.NaT, + pd.NaT, pd.NaT, '1970-01-01 00:00:02', + '1970-01-01 00:00:03', pd.NaT, pd.NaT], + [1, 2, 3, 5, 6, 8, 7, 11, 12, 13])]) + @pytest.mark.parametrize('freq, expected_values', + [('1s', [3, np.NaN, 7, 11]), + ('2s', [3, int((7 + 11) / 2)]), + ('3s', [int((3 + 7) / 2), 11])]) + def test_resample_with_nat(self, periods, values, freq, expected_values): + # GH 13224 + index = PeriodIndex(periods, freq='S') + frame = DataFrame(values, index=index) + + expected_index = period_range('1970-01-01 00:00:00', + periods=len(expected_values), freq=freq) + expected = DataFrame(expected_values, index=expected_index) + result = frame.resample(freq).mean() + assert_frame_equal(result, expected) + + def test_resample_with_only_nat(self): + # GH 13224 + pi = PeriodIndex([pd.NaT] * 3, freq='S') + frame = DataFrame([2, 3, 5], index=pi) + expected_index = PeriodIndex(data=[], freq=pi.freq) + expected = DataFrame([], index=expected_index) + result = frame.resample('1s').mean() + assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py new file mode 100644 index 0000000000000..b7721b9e08570 --- /dev/null +++ b/pandas/tests/resample/test_resample_api.py @@ -0,0 +1,650 @@ +# pylint: disable=E1101 + +from datetime import datetime + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas import DataFrame, Series, Timestamp +from pandas.compat import range, OrderedDict +from pandas.core.base import SpecificationError +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries +from pandas.core.indexes.datetimes import date_range +from pandas.core.resample import DatetimeIndex, DatetimeIndexResampler +from pandas.util.testing import assert_frame_equal, assert_series_equal + + +class TestResampleAPI(object): + + def setup_method(self, method): + dti = DatetimeIndex(start=datetime(2005, 1, 1), + end=datetime(2005, 1, 10), freq='Min') + + self.series = Series(np.random.rand(len(dti)), dti) + self.frame = DataFrame( + {'A': self.series, 'B': self.series, 'C': np.arange(len(dti))}) + + def test_str(self): + + r = self.series.resample('H') + assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, ' + 'label=left, convention=start, base=0]' in str(r)) + + def test_api(self): + + r = self.series.resample('H') + result = r.mean() + assert isinstance(result, Series) + assert len(result) == 217 + + r = self.series.to_frame().resample('H') + result = r.mean() + assert isinstance(result, DataFrame) + assert len(result) == 217 + + def test_api_changes_v018(self): + + # change from .resample(....., how=...) + # to .resample(......).how() + + r = self.series.resample('H') + assert isinstance(r, DatetimeIndexResampler) + + for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.series.resample('H', how=how) + expected = getattr(self.series.resample('H'), how)() + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.series.resample('H', how='ohlc') + expected = self.series.resample('H').ohlc() + tm.assert_frame_equal(result, expected) + + # compat for pandas-like methods + for how in ['sort_values', 'isna']: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + getattr(r, how)() + + # invalids as these can be setting operations + r = self.series.resample('H') + pytest.raises(ValueError, lambda: r.iloc[0]) + pytest.raises(ValueError, lambda: r.iat[0]) + pytest.raises(ValueError, lambda: r.loc[0]) + pytest.raises(ValueError, lambda: r.loc[ + Timestamp('2013-01-01 00:00:00', offset='H')]) + pytest.raises(ValueError, lambda: r.at[ + Timestamp('2013-01-01 00:00:00', offset='H')]) + + def f(): + r[0] = 5 + + pytest.raises(ValueError, f) + + # str/repr + r = self.series.resample('H') + with tm.assert_produces_warning(None): + str(r) + with tm.assert_produces_warning(None): + repr(r) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + tm.assert_numpy_array_equal(np.array(r), np.array(r.mean())) + + # masquerade as Series/DataFrame as needed for API compat + assert isinstance(self.series.resample('H'), ABCSeries) + assert not isinstance(self.frame.resample('H'), ABCSeries) + assert not isinstance(self.series.resample('H'), ABCDataFrame) + assert isinstance(self.frame.resample('H'), ABCDataFrame) + + # bin numeric ops + for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']: + + if getattr(self.series, op, None) is None: + continue + r = self.series.resample('H') + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert isinstance(getattr(r, op)(2), pd.Series) + + # unary numeric ops + for op in ['__pos__', '__neg__', '__abs__', '__inv__']: + + if getattr(self.series, op, None) is None: + continue + r = self.series.resample('H') + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert isinstance(getattr(r, op)(), pd.Series) + + # comparison ops + for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']: + r = self.series.resample('H') + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert isinstance(getattr(r, op)(2), pd.Series) + + # IPython introspection shouldn't trigger warning GH 13618 + for op in ['_repr_json', '_repr_latex', + '_ipython_canary_method_should_not_exist_']: + r = self.series.resample('H') + with tm.assert_produces_warning(None): + getattr(r, op, None) + + # getitem compat + df = self.series.to_frame('foo') + + # same as prior versions for DataFrame + pytest.raises(KeyError, lambda: df.resample('H')[0]) + + # compat for Series + # but we cannot be sure that we need a warning here + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.series.resample('H')[0] + expected = self.series.resample('H').mean()[0] + assert result == expected + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.series.resample('H')['2005-01-09 23:00:00'] + expected = self.series.resample('H').mean()['2005-01-09 23:00:00'] + assert result == expected + + def test_groupby_resample_api(self): + + # GH 12448 + # .groupby(...).resample(...) hitting warnings + # when appropriate + df = DataFrame({'date': pd.date_range(start='2016-01-01', + periods=4, + freq='W'), + 'group': [1, 1, 2, 2], + 'val': [5, 6, 7, 8]}).set_index('date') + + # replication step + i = pd.date_range('2016-01-03', periods=8).tolist() + \ + pd.date_range('2016-01-17', periods=8).tolist() + index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i], + names=['group', 'date']) + expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]}, + index=index) + result = df.groupby('group').apply( + lambda x: x.resample('1D').ffill())[['val']] + assert_frame_equal(result, expected) + + def test_groupby_resample_on_api(self): + + # GH 15021 + # .groupby(...).resample(on=...) results in an unexpected + # keyword warning. + df = pd.DataFrame({'key': ['A', 'B'] * 5, + 'dates': pd.date_range('2016-01-01', periods=10), + 'values': np.random.randn(10)}) + + expected = df.set_index('dates').groupby('key').resample('D').mean() + + result = df.groupby('key').resample('D', on='dates').mean() + assert_frame_equal(result, expected) + + def test_plot_api(self): + tm._skip_if_no_mpl() + + # .resample(....).plot(...) + # hitting warnings + # GH 12448 + s = Series(np.random.randn(60), + index=date_range('2016-01-01', periods=60, freq='1min')) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s.resample('15min').plot() + tm.assert_is_valid_plot_return_object(result) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s.resample('15min', how='sum').plot() + tm.assert_is_valid_plot_return_object(result) + + def test_getitem(self): + + r = self.frame.resample('H') + tm.assert_index_equal(r._selected_obj.columns, self.frame.columns) + + r = self.frame.resample('H')['B'] + assert r._selected_obj.name == self.frame.columns[1] + + # technically this is allowed + r = self.frame.resample('H')['A', 'B'] + tm.assert_index_equal(r._selected_obj.columns, + self.frame.columns[[0, 1]]) + + r = self.frame.resample('H')['A', 'B'] + tm.assert_index_equal(r._selected_obj.columns, + self.frame.columns[[0, 1]]) + + def test_select_bad_cols(self): + + g = self.frame.resample('H') + pytest.raises(KeyError, g.__getitem__, ['D']) + + pytest.raises(KeyError, g.__getitem__, ['A', 'D']) + with tm.assert_raises_regex(KeyError, '^[^A]+$'): + # A should not be referenced as a bad column... + # will have to rethink regex if you change message! + g[['A', 'D']] + + def test_attribute_access(self): + + r = self.frame.resample('H') + tm.assert_series_equal(r.A.sum(), r['A'].sum()) + + # getting + pytest.raises(AttributeError, lambda: r.F) + + # setting + def f(): + r.F = 'bah' + + pytest.raises(ValueError, f) + + def test_api_compat_before_use(self): + + # make sure that we are setting the binner + # on these attributes + for attr in ['groups', 'ngroups', 'indices']: + rng = pd.date_range('1/1/2012', periods=100, freq='S') + ts = pd.Series(np.arange(len(rng)), index=rng) + rs = ts.resample('30s') + + # before use + getattr(rs, attr) + + # after grouper is initialized is ok + rs.mean() + getattr(rs, attr) + + def tests_skip_nuisance(self): + + df = self.frame + df['D'] = 'foo' + r = df.resample('H') + result = r[['A', 'B']].sum() + expected = pd.concat([r.A.sum(), r.B.sum()], axis=1) + assert_frame_equal(result, expected) + + expected = r[['A', 'B', 'C']].sum() + result = r.sum() + assert_frame_equal(result, expected) + + def test_downsample_but_actually_upsampling(self): + + # this is reindex / asfreq + rng = pd.date_range('1/1/2012', periods=100, freq='S') + ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng) + result = ts.resample('20s').asfreq() + expected = Series([0, 20, 40, 60, 80], + index=pd.date_range('2012-01-01 00:00:00', + freq='20s', + periods=5)) + assert_series_equal(result, expected) + + def test_combined_up_downsampling_of_irregular(self): + + # since we are reallydoing an operation like this + # ts2.resample('2s').mean().ffill() + # preserve these semantics + + rng = pd.date_range('1/1/2012', periods=100, freq='S') + ts = pd.Series(np.arange(len(rng)), index=rng) + ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]] + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = ts2.resample('2s', how='mean', fill_method='ffill') + expected = ts2.resample('2s').mean().ffill() + assert_series_equal(result, expected) + + def test_transform(self): + + r = self.series.resample('20min') + expected = self.series.groupby( + pd.Grouper(freq='20min')).transform('mean') + result = r.transform('mean') + assert_series_equal(result, expected) + + def test_fillna(self): + + # need to upsample here + rng = pd.date_range('1/1/2012', periods=10, freq='2S') + ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng) + r = ts.resample('s') + + expected = r.ffill() + result = r.fillna(method='ffill') + assert_series_equal(result, expected) + + expected = r.bfill() + result = r.fillna(method='bfill') + assert_series_equal(result, expected) + + with pytest.raises(ValueError): + r.fillna(0) + + def test_apply_without_aggregation(self): + + # both resample and groupby should work w/o aggregation + r = self.series.resample('20min') + g = self.series.groupby(pd.Grouper(freq='20min')) + + for t in [g, r]: + result = t.apply(lambda x: x) + assert_series_equal(result, self.series) + + def test_agg_consistency(self): + + # make sure that we are consistent across + # similar aggregations with and w/o selection list + df = DataFrame(np.random.randn(1000, 3), + index=pd.date_range('1/1/2012', freq='S', periods=1000), + columns=['A', 'B', 'C']) + + r = df.resample('3T') + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'}) + result = r.agg({'r1': 'mean', 'r2': 'sum'}) + assert_frame_equal(result, expected) + + # TODO: once GH 14008 is fixed, move these tests into + # `Base` test class + def test_agg(self): + # test with all three Resampler apis and TimeGrouper + + np.random.seed(1234) + index = date_range(datetime(2005, 1, 1), + datetime(2005, 1, 10), freq='D') + index.name = 'date' + df = pd.DataFrame(np.random.rand(10, 2), + columns=list('AB'), + index=index) + df_col = df.reset_index() + df_mult = df_col.copy() + df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index], + names=['index', 'date']) + r = df.resample('2D') + cases = [ + r, + df_col.resample('2D', on='date'), + df_mult.resample('2D', level='date'), + df.groupby(pd.Grouper(freq='2D')) + ] + + a_mean = r['A'].mean() + a_std = r['A'].std() + a_sum = r['A'].sum() + b_mean = r['B'].mean() + b_std = r['B'].std() + b_sum = r['B'].sum() + + expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1) + expected.columns = pd.MultiIndex.from_product([['A', 'B'], + ['mean', 'std']]) + for t in cases: + result = t.aggregate([np.mean, np.std]) + assert_frame_equal(result, expected) + + expected = pd.concat([a_mean, b_std], axis=1) + for t in cases: + result = t.aggregate({'A': np.mean, + 'B': np.std}) + assert_frame_equal(result, expected, check_like=True) + + expected = pd.concat([a_mean, a_std], axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), + ('A', 'std')]) + for t in cases: + result = t.aggregate({'A': ['mean', 'std']}) + assert_frame_equal(result, expected) + + expected = pd.concat([a_mean, a_sum], axis=1) + expected.columns = ['mean', 'sum'] + for t in cases: + result = t['A'].aggregate(['mean', 'sum']) + assert_frame_equal(result, expected) + + expected = pd.concat([a_mean, a_sum], axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), + ('A', 'sum')]) + for t in cases: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}}) + assert_frame_equal(result, expected, check_like=True) + + expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), + ('A', 'sum'), + ('B', 'mean2'), + ('B', 'sum2')]) + for t in cases: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}, + 'B': {'mean2': 'mean', 'sum2': 'sum'}}) + assert_frame_equal(result, expected, check_like=True) + + expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), + ('A', 'std'), + ('B', 'mean'), + ('B', 'std')]) + for t in cases: + result = t.aggregate({'A': ['mean', 'std'], + 'B': ['mean', 'std']}) + assert_frame_equal(result, expected, check_like=True) + + expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1) + expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'), + ('r1', 'A', 'sum'), + ('r2', 'B', 'mean'), + ('r2', 'B', 'sum')]) + + def test_agg_misc(self): + # test with all three Resampler apis and TimeGrouper + + np.random.seed(1234) + index = date_range(datetime(2005, 1, 1), + datetime(2005, 1, 10), freq='D') + index.name = 'date' + df = pd.DataFrame(np.random.rand(10, 2), + columns=list('AB'), + index=index) + df_col = df.reset_index() + df_mult = df_col.copy() + df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index], + names=['index', 'date']) + + r = df.resample('2D') + cases = [ + r, + df_col.resample('2D', on='date'), + df_mult.resample('2D', level='date'), + df.groupby(pd.Grouper(freq='2D')) + ] + + # passed lambda + for t in cases: + result = t.agg({'A': np.sum, + 'B': lambda x: np.std(x, ddof=1)}) + rcustom = t['B'].apply(lambda x: np.std(x, ddof=1)) + expected = pd.concat([r['A'].sum(), rcustom], axis=1) + assert_frame_equal(result, expected, check_like=True) + + # agg with renamers + expected = pd.concat([t['A'].sum(), + t['B'].sum(), + t['A'].mean(), + t['B'].mean()], + axis=1) + expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'), + ('result1', 'B'), + ('result2', 'A'), + ('result2', 'B')]) + + for t in cases: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum), + ('result2', np.mean)])) + assert_frame_equal(result, expected, check_like=True) + + # agg with different hows + expected = pd.concat([t['A'].sum(), + t['A'].std(), + t['B'].mean(), + t['B'].std()], + axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'), + ('A', 'std'), + ('B', 'mean'), + ('B', 'std')]) + for t in cases: + result = t.agg(OrderedDict([('A', ['sum', 'std']), + ('B', ['mean', 'std'])])) + assert_frame_equal(result, expected, check_like=True) + + # equivalent of using a selection list / or not + for t in cases: + result = t[['A', 'B']].agg({'A': ['sum', 'std'], + 'B': ['mean', 'std']}) + assert_frame_equal(result, expected, check_like=True) + + # series like aggs + for t in cases: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t['A'].agg({'A': ['sum', 'std']}) + expected = pd.concat([t['A'].sum(), + t['A'].std()], + axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'), + ('A', 'std')]) + assert_frame_equal(result, expected, check_like=True) + + expected = pd.concat([t['A'].agg(['sum', 'std']), + t['A'].agg(['mean', 'std'])], + axis=1) + expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'), + ('A', 'std'), + ('B', 'mean'), + ('B', 'std')]) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t['A'].agg({'A': ['sum', 'std'], + 'B': ['mean', 'std']}) + assert_frame_equal(result, expected, check_like=True) + + # errors + # invalid names in the agg specification + for t in cases: + def f(): + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + t[['A']].agg({'A': ['sum', 'std'], + 'B': ['mean', 'std']}) + + pytest.raises(SpecificationError, f) + + def test_agg_nested_dicts(self): + + np.random.seed(1234) + index = date_range(datetime(2005, 1, 1), + datetime(2005, 1, 10), freq='D') + index.name = 'date' + df = pd.DataFrame(np.random.rand(10, 2), + columns=list('AB'), + index=index) + df_col = df.reset_index() + df_mult = df_col.copy() + df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index], + names=['index', 'date']) + r = df.resample('2D') + cases = [ + r, + df_col.resample('2D', on='date'), + df_mult.resample('2D', level='date'), + df.groupby(pd.Grouper(freq='2D')) + ] + + for t in cases: + def f(): + t.aggregate({'r1': {'A': ['mean', 'sum']}, + 'r2': {'B': ['mean', 'sum']}}) + pytest.raises(ValueError, f) + + for t in cases: + expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(), + t['B'].std()], axis=1) + expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( + 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')]) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']}, + 'B': {'rb': ['mean', 'std']}}) + assert_frame_equal(result, expected, check_like=True) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t.agg({'A': {'ra': ['mean', 'std']}, + 'B': {'rb': ['mean', 'std']}}) + assert_frame_equal(result, expected, check_like=True) + + def test_selection_api_validation(self): + # GH 13500 + index = date_range(datetime(2005, 1, 1), + datetime(2005, 1, 10), freq='D') + df = pd.DataFrame({'date': index, + 'a': np.arange(len(index), dtype=np.int64)}, + index=pd.MultiIndex.from_arrays([ + np.arange(len(index), dtype=np.int64), + index], names=['v', 'd'])) + df_exp = pd.DataFrame({'a': np.arange(len(index), dtype=np.int64)}, + index=index) + + # non DatetimeIndex + with pytest.raises(TypeError): + df.resample('2D', level='v') + + with pytest.raises(ValueError): + df.resample('2D', on='date', level='d') + + with pytest.raises(TypeError): + df.resample('2D', on=['a', 'date']) + + with pytest.raises(KeyError): + df.resample('2D', level=['a', 'date']) + + # upsampling not allowed + with pytest.raises(ValueError): + df.resample('2D', level='d').asfreq() + + with pytest.raises(ValueError): + df.resample('2D', on='date').asfreq() + + exp = df_exp.resample('2D').sum() + exp.index.name = 'date' + assert_frame_equal(exp, df.resample('2D', on='date').sum()) + + exp.index.name = 'd' + assert_frame_equal(exp, df.resample('2D', level='d').sum()) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py new file mode 100644 index 0000000000000..9b7019024f533 --- /dev/null +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -0,0 +1,238 @@ +# pylint: disable=E1101 + +from textwrap import dedent + +import numpy as np + +import pandas as pd +import pandas.util.testing as tm +from pandas import DataFrame +from pandas.compat import range +from pandas.core.indexes.datetimes import date_range +from pandas.util.testing import assert_frame_equal, assert_series_equal + + +class TestResamplerGrouper(object): + + def setup_method(self, method): + self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8, + 'B': np.arange(40)}, + index=date_range('1/1/2000', + freq='s', + periods=40)) + + def test_back_compat_v180(self): + + df = self.frame + for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = df.groupby('A').resample('4s', how=how) + expected = getattr(df.groupby('A').resample('4s'), how)() + assert_frame_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = df.groupby('A').resample('4s', how='mean', + fill_method='ffill') + expected = df.groupby('A').resample('4s').mean().ffill() + assert_frame_equal(result, expected) + + def test_tab_complete_ipython6_warning(self, ip): + from IPython.core.completer import provisionalcompleter + code = dedent("""\ + import pandas.util.testing as tm + s = tm.makeTimeSeries() + rs = s.resample("D") + """) + ip.run_code(code) + + with tm.assert_produces_warning(None): + with provisionalcompleter('ignore'): + list(ip.Completer.completions('rs.', 1)) + + def test_deferred_with_groupby(self): + + # GH 12486 + # support deferred resample ops with groupby + data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3], + ['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7], + ['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5], + ['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1], + ['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]] + + df = DataFrame(data, columns=['date', 'id', 'score']) + df.date = pd.to_datetime(df.date) + f = lambda x: x.set_index('date').resample('D').asfreq() + expected = df.groupby('id').apply(f) + result = df.set_index('date').groupby('id').resample('D').asfreq() + assert_frame_equal(result, expected) + + df = DataFrame({'date': pd.date_range(start='2016-01-01', + periods=4, + freq='W'), + 'group': [1, 1, 2, 2], + 'val': [5, 6, 7, 8]}).set_index('date') + + f = lambda x: x.resample('1D').ffill() + expected = df.groupby('group').apply(f) + result = df.groupby('group').resample('1D').ffill() + assert_frame_equal(result, expected) + + def test_getitem(self): + g = self.frame.groupby('A') + + expected = g.B.apply(lambda x: x.resample('2s').mean()) + + result = g.resample('2s').B.mean() + assert_series_equal(result, expected) + + result = g.B.resample('2s').mean() + assert_series_equal(result, expected) + + result = g.resample('2s').mean().B + assert_series_equal(result, expected) + + def test_getitem_multiple(self): + + # GH 13174 + # multiple calls after selection causing an issue with aliasing + data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}] + df = pd.DataFrame(data, index=pd.date_range('2016-01-01', periods=2)) + r = df.groupby('id').resample('1D') + result = r['buyer'].count() + expected = pd.Series([1, 1], + index=pd.MultiIndex.from_tuples( + [(1, pd.Timestamp('2016-01-01')), + (2, pd.Timestamp('2016-01-02'))], + names=['id', None]), + name='buyer') + assert_series_equal(result, expected) + + result = r['buyer'].count() + assert_series_equal(result, expected) + + def test_nearest(self): + + # GH 17496 + # Resample nearest + index = pd.date_range('1/1/2000', periods=3, freq='T') + result = pd.Series(range(3), index=index).resample('20s').nearest() + + expected = pd.Series( + np.array([0, 0, 1, 1, 1, 2, 2]), + index=pd.DatetimeIndex( + ['2000-01-01 00:00:00', '2000-01-01 00:00:20', + '2000-01-01 00:00:40', '2000-01-01 00:01:00', + '2000-01-01 00:01:20', '2000-01-01 00:01:40', + '2000-01-01 00:02:00'], + dtype='datetime64[ns]', + freq='20S')) + assert_series_equal(result, expected) + + def test_methods(self): + g = self.frame.groupby('A') + r = g.resample('2s') + + for f in ['first', 'last', 'median', 'sem', 'sum', 'mean', + 'min', 'max']: + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) + assert_frame_equal(result, expected) + + for f in ['size']: + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) + assert_series_equal(result, expected) + + for f in ['count']: + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) + assert_frame_equal(result, expected) + + # series only + for f in ['nunique']: + result = getattr(r.B, f)() + expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)()) + assert_series_equal(result, expected) + + for f in ['nearest', 'backfill', 'ffill', 'asfreq']: + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) + assert_frame_equal(result, expected) + + result = r.ohlc() + expected = g.apply(lambda x: x.resample('2s').ohlc()) + assert_frame_equal(result, expected) + + for f in ['std', 'var']: + result = getattr(r, f)(ddof=1) + expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1)) + assert_frame_equal(result, expected) + + def test_apply(self): + + g = self.frame.groupby('A') + r = g.resample('2s') + + # reduction + expected = g.resample('2s').sum() + + def f(x): + return x.resample('2s').sum() + + result = r.apply(f) + assert_frame_equal(result, expected) + + def f(x): + return x.resample('2s').apply(lambda y: y.sum()) + + result = g.apply(f) + assert_frame_equal(result, expected) + + def test_resample_groupby_with_label(self): + # GH 13235 + index = date_range('2000-01-01', freq='2D', periods=5) + df = DataFrame(index=index, + data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]} + ) + result = df.groupby('col0').resample('1W', label='left').sum() + + mi = [np.array([0, 0, 1, 2]), + pd.to_datetime(np.array(['1999-12-26', '2000-01-02', + '2000-01-02', '2000-01-02']) + ) + ] + mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None]) + expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]}, + index=mindex + ) + + assert_frame_equal(result, expected) + + def test_consistency_with_window(self): + + # consistent return values with window + df = self.frame + expected = pd.Int64Index([1, 2, 3], name='A') + result = df.groupby('A').resample('2s').mean() + assert result.index.nlevels == 2 + tm.assert_index_equal(result.index.levels[0], expected) + + result = df.groupby('A').rolling(20).mean() + assert result.index.nlevels == 2 + tm.assert_index_equal(result.index.levels[0], expected) + + def test_median_duplicate_columns(self): + # GH 14233 + + df = pd.DataFrame(np.random.randn(20, 3), + columns=list('aaa'), + index=pd.date_range('2012-01-01', + periods=20, freq='s')) + df2 = df.copy() + df2.columns = ['a', 'b', 'c'] + expected = df2.resample('5s').median() + result = df.resample('5s').median() + expected.columns = result.columns + assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py new file mode 100644 index 0000000000000..dfee28e13b6a3 --- /dev/null +++ b/pandas/tests/resample/test_time_grouper.py @@ -0,0 +1,245 @@ +# pylint: disable=E1101 + +from datetime import datetime +from warnings import catch_warnings + +import numpy as np + +import pandas as pd +import pandas.util.testing as tm +from pandas import DataFrame, Panel, Series +from pandas.compat import zip +from pandas.core.indexes.datetimes import date_range +from pandas.core.resample import TimeGrouper +from pandas.util.testing import assert_frame_equal, assert_series_equal + + +class TestTimeGrouper(object): + + def setup_method(self, method): + self.ts = Series(np.random.randn(1000), + index=date_range('1/1/2000', periods=1000)) + + def test_apply(self): + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + grouper = pd.TimeGrouper(freq='A', label='right', closed='right') + + grouped = self.ts.groupby(grouper) + + f = lambda x: x.sort_values()[-3:] + + applied = grouped.apply(f) + expected = self.ts.groupby(lambda x: x.year).apply(f) + + applied.index = applied.index.droplevel(0) + expected.index = expected.index.droplevel(0) + assert_series_equal(applied, expected) + + def test_count(self): + self.ts[::3] = np.nan + + expected = self.ts.groupby(lambda x: x.year).count() + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + grouper = pd.TimeGrouper(freq='A', label='right', closed='right') + result = self.ts.groupby(grouper).count() + expected.index = result.index + assert_series_equal(result, expected) + + result = self.ts.resample('A').count() + expected.index = result.index + assert_series_equal(result, expected) + + def test_numpy_reduction(self): + result = self.ts.resample('A', closed='right').prod() + + expected = self.ts.groupby(lambda x: x.year).agg(np.prod) + expected.index = result.index + + assert_series_equal(result, expected) + + def test_apply_iteration(self): + # #2300 + N = 1000 + ind = pd.date_range(start="2000-01-01", freq="D", periods=N) + df = DataFrame({'open': 1, 'close': 2}, index=ind) + tg = TimeGrouper('M') + + _, grouper, _ = tg._get_grouper(df) + + # Errors + grouped = df.groupby(grouper, group_keys=False) + f = lambda df: df['close'] / df['open'] + + # it works! + result = grouped.apply(f) + tm.assert_index_equal(result.index, df.index) + + def test_panel_aggregation(self): + ind = pd.date_range('1/1/2000', periods=100) + data = np.random.randn(2, len(ind), 4) + + with catch_warnings(record=True): + wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind, + minor_axis=['A', 'B', 'C', 'D']) + + tg = TimeGrouper('M', axis=1) + _, grouper, _ = tg._get_grouper(wp) + bingrouped = wp.groupby(grouper) + binagg = bingrouped.mean() + + def f(x): + assert (isinstance(x, Panel)) + return x.mean(1) + + result = bingrouped.agg(f) + tm.assert_panel_equal(result, binagg) + + def test_fails_on_no_datetime_index(self): + index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex') + index_funcs = (tm.makeIntIndex, + tm.makeUnicodeIndex, tm.makeFloatIndex, + lambda m: tm.makeCustomIndex(m, 2)) + n = 2 + for name, func in zip(index_names, index_funcs): + index = func(n) + df = DataFrame({'a': np.random.randn(n)}, index=index) + with tm.assert_raises_regex(TypeError, + "Only valid with " + "DatetimeIndex, TimedeltaIndex " + "or PeriodIndex, but got an " + "instance of %r" % name): + df.groupby(TimeGrouper('D')) + + def test_aaa_group_order(self): + # GH 12840 + # check TimeGrouper perform stable sorts + n = 20 + data = np.random.randn(n, 4) + df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), + datetime(2013, 1, 3), datetime(2013, 1, 4), + datetime(2013, 1, 5)] * 4 + grouped = df.groupby(TimeGrouper(key='key', freq='D')) + + tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)), + df[::5]) + tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)), + df[1::5]) + tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)), + df[2::5]) + tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)), + df[3::5]) + tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)), + df[4::5]) + + def test_aggregate_normal(self): + # check TimeGrouper's aggregation is identical as normal groupby + + n = 20 + data = np.random.randn(n, 4) + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, 3, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), + datetime(2013, 1, 3), datetime(2013, 1, 4), + datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + for func in ['min', 'max', 'prod', 'var', 'std', 'mean']: + expected = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_frame_equal(expected, dt_result) + + for func in ['count', 'sum']: + expected = getattr(normal_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_frame_equal(expected, dt_result) + + # GH 7453 + for func in ['size']: + expected = getattr(normal_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_series_equal(expected, dt_result) + + # GH 7453 + for func in ['first', 'last']: + expected = getattr(normal_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_frame_equal(expected, dt_result) + + # if TimeGrouper is used included, 'nth' doesn't work yet + + """ + for func in ['nth']: + expected = getattr(normal_grouped, func)(3) + expected.index = date_range(start='2013-01-01', + freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)(3) + assert_frame_equal(expected, dt_result) + """ + + def test_aggregate_with_nat(self): + # check TimeGrouper's aggregation is identical as normal groupby + + n = 20 + data = np.random.randn(n, 4).astype('int64') + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + for func in ['min', 'max', 'sum', 'prod']: + normal_result = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() + pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], + columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_frame_equal(expected, dt_result) + + for func in ['count']: + normal_result = getattr(normal_grouped, func)() + pad = DataFrame([[0, 0, 0, 0]], index=[3], + columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_frame_equal(expected, dt_result) + + for func in ['size']: + normal_result = getattr(normal_grouped, func)() + pad = Series([0], index=[3]) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_series_equal(expected, dt_result) + # GH 9925 + assert dt_result.index.name == 'key' + + # if NaT is included, 'var', 'std', 'mean', 'first','last' + # and 'nth' doesn't work yet diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py new file mode 100644 index 0000000000000..6fbe01059ef34 --- /dev/null +++ b/pandas/tests/resample/test_timedelta.py @@ -0,0 +1,43 @@ +# pylint: disable=E1101 + +import numpy as np +import pytest + +from pandas import DataFrame, Series +from pandas.core.indexes.timedeltas import timedelta_range +from pandas.util.testing import assert_frame_equal + +from pandas.tests.resample.base import Base + + +class TestTimedeltaIndex(Base): + _index_factory = lambda x: timedelta_range + + @pytest.fixture + def _index_start(self): + return '1 day' + + @pytest.fixture + def _index_end(self): + return '10 day' + + @pytest.fixture + def _series_name(self): + return 'tdi' + + def create_series(self): + i = timedelta_range('1 day', + '10 day', freq='D') + + return Series(np.arange(len(i)), index=i, name='tdi') + + def test_asfreq_bug(self): + import datetime as dt + df = DataFrame(data=[1, 3], + index=[dt.timedelta(), dt.timedelta(minutes=3)]) + result = df.resample('1T').asfreq() + expected = DataFrame(data=[1, np.nan, np.nan, 3], + index=timedelta_range('0 day', + periods=4, + freq='1T')) + assert_frame_equal(result, expected) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py deleted file mode 100644 index cd15203eccd82..0000000000000 --- a/pandas/tests/test_resample.py +++ /dev/null @@ -1,3382 +0,0 @@ -# pylint: disable=E1101 - -from warnings import catch_warnings -from datetime import datetime, timedelta -from functools import partial -from textwrap import dedent - -import pytz -import pytest -import dateutil -import numpy as np - -import pandas as pd -import pandas.tseries.offsets as offsets -import pandas.util.testing as tm -from pandas import (Series, DataFrame, Panel, Index, isna, - notna, Timestamp) - -from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame -from pandas.compat import range, lrange, zip, product, OrderedDict -from pandas.core.base import SpecificationError, AbstractMethodError -from pandas.errors import UnsupportedFunctionCall -from pandas.core.groupby import DataError -from pandas.tseries.frequencies import MONTHS, DAYS -from pandas.tseries.frequencies import to_offset -from pandas.core.indexes.datetimes import date_range -from pandas.tseries.offsets import Minute, BDay -from pandas.core.indexes.period import period_range, PeriodIndex, Period -from pandas.core.resample import (DatetimeIndex, TimeGrouper, - DatetimeIndexResampler) -from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex -from pandas.util.testing import (assert_series_equal, assert_almost_equal, - assert_frame_equal, assert_index_equal) -from pandas._libs.period import IncompatibleFrequency - -bday = BDay() - -# The various methods we support -downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', - 'median', 'prod', 'var', 'ohlc'] -upsample_methods = ['count', 'size'] -series_methods = ['nunique'] -resample_methods = downsample_methods + upsample_methods + series_methods - - -def _simple_ts(start, end, freq='D'): - rng = date_range(start, end, freq=freq) - return Series(np.random.randn(len(rng)), index=rng) - - -def _simple_pts(start, end, freq='D'): - rng = period_range(start, end, freq=freq) - return Series(np.random.randn(len(rng)), index=rng) - - -class TestResampleAPI(object): - - def setup_method(self, method): - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='Min') - - self.series = Series(np.random.rand(len(dti)), dti) - self.frame = DataFrame( - {'A': self.series, 'B': self.series, 'C': np.arange(len(dti))}) - - def test_str(self): - - r = self.series.resample('H') - assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, ' - 'label=left, convention=start, base=0]' in str(r)) - - def test_api(self): - - r = self.series.resample('H') - result = r.mean() - assert isinstance(result, Series) - assert len(result) == 217 - - r = self.series.to_frame().resample('H') - result = r.mean() - assert isinstance(result, DataFrame) - assert len(result) == 217 - - def test_api_changes_v018(self): - - # change from .resample(....., how=...) - # to .resample(......).how() - - r = self.series.resample('H') - assert isinstance(r, DatetimeIndexResampler) - - for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = self.series.resample('H', how=how) - expected = getattr(self.series.resample('H'), how)() - tm.assert_series_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = self.series.resample('H', how='ohlc') - expected = self.series.resample('H').ohlc() - tm.assert_frame_equal(result, expected) - - # compat for pandas-like methods - for how in ['sort_values', 'isna']: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - getattr(r, how)() - - # invalids as these can be setting operations - r = self.series.resample('H') - pytest.raises(ValueError, lambda: r.iloc[0]) - pytest.raises(ValueError, lambda: r.iat[0]) - pytest.raises(ValueError, lambda: r.loc[0]) - pytest.raises(ValueError, lambda: r.loc[ - Timestamp('2013-01-01 00:00:00', offset='H')]) - pytest.raises(ValueError, lambda: r.at[ - Timestamp('2013-01-01 00:00:00', offset='H')]) - - def f(): - r[0] = 5 - - pytest.raises(ValueError, f) - - # str/repr - r = self.series.resample('H') - with tm.assert_produces_warning(None): - str(r) - with tm.assert_produces_warning(None): - repr(r) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - tm.assert_numpy_array_equal(np.array(r), np.array(r.mean())) - - # masquerade as Series/DataFrame as needed for API compat - assert isinstance(self.series.resample('H'), ABCSeries) - assert not isinstance(self.frame.resample('H'), ABCSeries) - assert not isinstance(self.series.resample('H'), ABCDataFrame) - assert isinstance(self.frame.resample('H'), ABCDataFrame) - - # bin numeric ops - for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']: - - if getattr(self.series, op, None) is None: - continue - r = self.series.resample('H') - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - assert isinstance(getattr(r, op)(2), pd.Series) - - # unary numeric ops - for op in ['__pos__', '__neg__', '__abs__', '__inv__']: - - if getattr(self.series, op, None) is None: - continue - r = self.series.resample('H') - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - assert isinstance(getattr(r, op)(), pd.Series) - - # comparison ops - for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']: - r = self.series.resample('H') - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - assert isinstance(getattr(r, op)(2), pd.Series) - - # IPython introspection shouldn't trigger warning GH 13618 - for op in ['_repr_json', '_repr_latex', - '_ipython_canary_method_should_not_exist_']: - r = self.series.resample('H') - with tm.assert_produces_warning(None): - getattr(r, op, None) - - # getitem compat - df = self.series.to_frame('foo') - - # same as prior versions for DataFrame - pytest.raises(KeyError, lambda: df.resample('H')[0]) - - # compat for Series - # but we cannot be sure that we need a warning here - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = self.series.resample('H')[0] - expected = self.series.resample('H').mean()[0] - assert result == expected - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = self.series.resample('H')['2005-01-09 23:00:00'] - expected = self.series.resample('H').mean()['2005-01-09 23:00:00'] - assert result == expected - - def test_groupby_resample_api(self): - - # GH 12448 - # .groupby(...).resample(...) hitting warnings - # when appropriate - df = DataFrame({'date': pd.date_range(start='2016-01-01', - periods=4, - freq='W'), - 'group': [1, 1, 2, 2], - 'val': [5, 6, 7, 8]}).set_index('date') - - # replication step - i = pd.date_range('2016-01-03', periods=8).tolist() + \ - pd.date_range('2016-01-17', periods=8).tolist() - index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i], - names=['group', 'date']) - expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]}, - index=index) - result = df.groupby('group').apply( - lambda x: x.resample('1D').ffill())[['val']] - assert_frame_equal(result, expected) - - def test_groupby_resample_on_api(self): - - # GH 15021 - # .groupby(...).resample(on=...) results in an unexpected - # keyword warning. - df = pd.DataFrame({'key': ['A', 'B'] * 5, - 'dates': pd.date_range('2016-01-01', periods=10), - 'values': np.random.randn(10)}) - - expected = df.set_index('dates').groupby('key').resample('D').mean() - - result = df.groupby('key').resample('D', on='dates').mean() - assert_frame_equal(result, expected) - - def test_plot_api(self): - tm._skip_if_no_mpl() - - # .resample(....).plot(...) - # hitting warnings - # GH 12448 - s = Series(np.random.randn(60), - index=date_range('2016-01-01', periods=60, freq='1min')) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = s.resample('15min').plot() - tm.assert_is_valid_plot_return_object(result) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = s.resample('15min', how='sum').plot() - tm.assert_is_valid_plot_return_object(result) - - def test_getitem(self): - - r = self.frame.resample('H') - tm.assert_index_equal(r._selected_obj.columns, self.frame.columns) - - r = self.frame.resample('H')['B'] - assert r._selected_obj.name == self.frame.columns[1] - - # technically this is allowed - r = self.frame.resample('H')['A', 'B'] - tm.assert_index_equal(r._selected_obj.columns, - self.frame.columns[[0, 1]]) - - r = self.frame.resample('H')['A', 'B'] - tm.assert_index_equal(r._selected_obj.columns, - self.frame.columns[[0, 1]]) - - def test_select_bad_cols(self): - - g = self.frame.resample('H') - pytest.raises(KeyError, g.__getitem__, ['D']) - - pytest.raises(KeyError, g.__getitem__, ['A', 'D']) - with tm.assert_raises_regex(KeyError, '^[^A]+$'): - # A should not be referenced as a bad column... - # will have to rethink regex if you change message! - g[['A', 'D']] - - def test_attribute_access(self): - - r = self.frame.resample('H') - tm.assert_series_equal(r.A.sum(), r['A'].sum()) - - # getting - pytest.raises(AttributeError, lambda: r.F) - - # setting - def f(): - r.F = 'bah' - - pytest.raises(ValueError, f) - - def test_api_compat_before_use(self): - - # make sure that we are setting the binner - # on these attributes - for attr in ['groups', 'ngroups', 'indices']: - rng = pd.date_range('1/1/2012', periods=100, freq='S') - ts = pd.Series(np.arange(len(rng)), index=rng) - rs = ts.resample('30s') - - # before use - getattr(rs, attr) - - # after grouper is initialized is ok - rs.mean() - getattr(rs, attr) - - def tests_skip_nuisance(self): - - df = self.frame - df['D'] = 'foo' - r = df.resample('H') - result = r[['A', 'B']].sum() - expected = pd.concat([r.A.sum(), r.B.sum()], axis=1) - assert_frame_equal(result, expected) - - expected = r[['A', 'B', 'C']].sum() - result = r.sum() - assert_frame_equal(result, expected) - - def test_downsample_but_actually_upsampling(self): - - # this is reindex / asfreq - rng = pd.date_range('1/1/2012', periods=100, freq='S') - ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng) - result = ts.resample('20s').asfreq() - expected = Series([0, 20, 40, 60, 80], - index=pd.date_range('2012-01-01 00:00:00', - freq='20s', - periods=5)) - assert_series_equal(result, expected) - - def test_combined_up_downsampling_of_irregular(self): - - # since we are reallydoing an operation like this - # ts2.resample('2s').mean().ffill() - # preserve these semantics - - rng = pd.date_range('1/1/2012', periods=100, freq='S') - ts = pd.Series(np.arange(len(rng)), index=rng) - ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]] - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = ts2.resample('2s', how='mean', fill_method='ffill') - expected = ts2.resample('2s').mean().ffill() - assert_series_equal(result, expected) - - def test_transform(self): - - r = self.series.resample('20min') - expected = self.series.groupby( - pd.Grouper(freq='20min')).transform('mean') - result = r.transform('mean') - assert_series_equal(result, expected) - - def test_fillna(self): - - # need to upsample here - rng = pd.date_range('1/1/2012', periods=10, freq='2S') - ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng) - r = ts.resample('s') - - expected = r.ffill() - result = r.fillna(method='ffill') - assert_series_equal(result, expected) - - expected = r.bfill() - result = r.fillna(method='bfill') - assert_series_equal(result, expected) - - with pytest.raises(ValueError): - r.fillna(0) - - def test_apply_without_aggregation(self): - - # both resample and groupby should work w/o aggregation - r = self.series.resample('20min') - g = self.series.groupby(pd.Grouper(freq='20min')) - - for t in [g, r]: - result = t.apply(lambda x: x) - assert_series_equal(result, self.series) - - def test_agg_consistency(self): - - # make sure that we are consistent across - # similar aggregations with and w/o selection list - df = DataFrame(np.random.randn(1000, 3), - index=pd.date_range('1/1/2012', freq='S', periods=1000), - columns=['A', 'B', 'C']) - - r = df.resample('3T') - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'}) - result = r.agg({'r1': 'mean', 'r2': 'sum'}) - assert_frame_equal(result, expected) - - # TODO: once GH 14008 is fixed, move these tests into - # `Base` test class - def test_agg(self): - # test with all three Resampler apis and TimeGrouper - - np.random.seed(1234) - index = date_range(datetime(2005, 1, 1), - datetime(2005, 1, 10), freq='D') - index.name = 'date' - df = pd.DataFrame(np.random.rand(10, 2), - columns=list('AB'), - index=index) - df_col = df.reset_index() - df_mult = df_col.copy() - df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index], - names=['index', 'date']) - r = df.resample('2D') - cases = [ - r, - df_col.resample('2D', on='date'), - df_mult.resample('2D', level='date'), - df.groupby(pd.Grouper(freq='2D')) - ] - - a_mean = r['A'].mean() - a_std = r['A'].std() - a_sum = r['A'].sum() - b_mean = r['B'].mean() - b_std = r['B'].std() - b_sum = r['B'].sum() - - expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1) - expected.columns = pd.MultiIndex.from_product([['A', 'B'], - ['mean', 'std']]) - for t in cases: - result = t.aggregate([np.mean, np.std]) - assert_frame_equal(result, expected) - - expected = pd.concat([a_mean, b_std], axis=1) - for t in cases: - result = t.aggregate({'A': np.mean, - 'B': np.std}) - assert_frame_equal(result, expected, check_like=True) - - expected = pd.concat([a_mean, a_std], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), - ('A', 'std')]) - for t in cases: - result = t.aggregate({'A': ['mean', 'std']}) - assert_frame_equal(result, expected) - - expected = pd.concat([a_mean, a_sum], axis=1) - expected.columns = ['mean', 'sum'] - for t in cases: - result = t['A'].aggregate(['mean', 'sum']) - assert_frame_equal(result, expected) - - expected = pd.concat([a_mean, a_sum], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), - ('A', 'sum')]) - for t in cases: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}}) - assert_frame_equal(result, expected, check_like=True) - - expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), - ('A', 'sum'), - ('B', 'mean2'), - ('B', 'sum2')]) - for t in cases: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}, - 'B': {'mean2': 'mean', 'sum2': 'sum'}}) - assert_frame_equal(result, expected, check_like=True) - - expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), - ('A', 'std'), - ('B', 'mean'), - ('B', 'std')]) - for t in cases: - result = t.aggregate({'A': ['mean', 'std'], - 'B': ['mean', 'std']}) - assert_frame_equal(result, expected, check_like=True) - - expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'), - ('r1', 'A', 'sum'), - ('r2', 'B', 'mean'), - ('r2', 'B', 'sum')]) - - def test_agg_misc(self): - # test with all three Resampler apis and TimeGrouper - - np.random.seed(1234) - index = date_range(datetime(2005, 1, 1), - datetime(2005, 1, 10), freq='D') - index.name = 'date' - df = pd.DataFrame(np.random.rand(10, 2), - columns=list('AB'), - index=index) - df_col = df.reset_index() - df_mult = df_col.copy() - df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index], - names=['index', 'date']) - - r = df.resample('2D') - cases = [ - r, - df_col.resample('2D', on='date'), - df_mult.resample('2D', level='date'), - df.groupby(pd.Grouper(freq='2D')) - ] - - # passed lambda - for t in cases: - result = t.agg({'A': np.sum, - 'B': lambda x: np.std(x, ddof=1)}) - rcustom = t['B'].apply(lambda x: np.std(x, ddof=1)) - expected = pd.concat([r['A'].sum(), rcustom], axis=1) - assert_frame_equal(result, expected, check_like=True) - - # agg with renamers - expected = pd.concat([t['A'].sum(), - t['B'].sum(), - t['A'].mean(), - t['B'].mean()], - axis=1) - expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'), - ('result1', 'B'), - ('result2', 'A'), - ('result2', 'B')]) - - for t in cases: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum), - ('result2', np.mean)])) - assert_frame_equal(result, expected, check_like=True) - - # agg with different hows - expected = pd.concat([t['A'].sum(), - t['A'].std(), - t['B'].mean(), - t['B'].std()], - axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'), - ('A', 'std'), - ('B', 'mean'), - ('B', 'std')]) - for t in cases: - result = t.agg(OrderedDict([('A', ['sum', 'std']), - ('B', ['mean', 'std'])])) - assert_frame_equal(result, expected, check_like=True) - - # equivalent of using a selection list / or not - for t in cases: - result = t[['A', 'B']].agg({'A': ['sum', 'std'], - 'B': ['mean', 'std']}) - assert_frame_equal(result, expected, check_like=True) - - # series like aggs - for t in cases: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t['A'].agg({'A': ['sum', 'std']}) - expected = pd.concat([t['A'].sum(), - t['A'].std()], - axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'), - ('A', 'std')]) - assert_frame_equal(result, expected, check_like=True) - - expected = pd.concat([t['A'].agg(['sum', 'std']), - t['A'].agg(['mean', 'std'])], - axis=1) - expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'), - ('A', 'std'), - ('B', 'mean'), - ('B', 'std')]) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t['A'].agg({'A': ['sum', 'std'], - 'B': ['mean', 'std']}) - assert_frame_equal(result, expected, check_like=True) - - # errors - # invalid names in the agg specification - for t in cases: - def f(): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - t[['A']].agg({'A': ['sum', 'std'], - 'B': ['mean', 'std']}) - - pytest.raises(SpecificationError, f) - - def test_agg_nested_dicts(self): - - np.random.seed(1234) - index = date_range(datetime(2005, 1, 1), - datetime(2005, 1, 10), freq='D') - index.name = 'date' - df = pd.DataFrame(np.random.rand(10, 2), - columns=list('AB'), - index=index) - df_col = df.reset_index() - df_mult = df_col.copy() - df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index], - names=['index', 'date']) - r = df.resample('2D') - cases = [ - r, - df_col.resample('2D', on='date'), - df_mult.resample('2D', level='date'), - df.groupby(pd.Grouper(freq='2D')) - ] - - for t in cases: - def f(): - t.aggregate({'r1': {'A': ['mean', 'sum']}, - 'r2': {'B': ['mean', 'sum']}}) - pytest.raises(ValueError, f) - - for t in cases: - expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(), - t['B'].std()], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( - 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')]) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']}, - 'B': {'rb': ['mean', 'std']}}) - assert_frame_equal(result, expected, check_like=True) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = t.agg({'A': {'ra': ['mean', 'std']}, - 'B': {'rb': ['mean', 'std']}}) - assert_frame_equal(result, expected, check_like=True) - - def test_selection_api_validation(self): - # GH 13500 - index = date_range(datetime(2005, 1, 1), - datetime(2005, 1, 10), freq='D') - df = pd.DataFrame({'date': index, - 'a': np.arange(len(index), dtype=np.int64)}, - index=pd.MultiIndex.from_arrays([ - np.arange(len(index), dtype=np.int64), - index], names=['v', 'd'])) - df_exp = pd.DataFrame({'a': np.arange(len(index), dtype=np.int64)}, - index=index) - - # non DatetimeIndex - with pytest.raises(TypeError): - df.resample('2D', level='v') - - with pytest.raises(ValueError): - df.resample('2D', on='date', level='d') - - with pytest.raises(TypeError): - df.resample('2D', on=['a', 'date']) - - with pytest.raises(KeyError): - df.resample('2D', level=['a', 'date']) - - # upsampling not allowed - with pytest.raises(ValueError): - df.resample('2D', level='d').asfreq() - - with pytest.raises(ValueError): - df.resample('2D', on='date').asfreq() - - exp = df_exp.resample('2D').sum() - exp.index.name = 'date' - assert_frame_equal(exp, df.resample('2D', on='date').sum()) - - exp.index.name = 'd' - assert_frame_equal(exp, df.resample('2D', level='d').sum()) - - -class Base(object): - """ - base class for resampling testing, calling - .create_series() generates a series of each index type - """ - - def create_index(self, *args, **kwargs): - """ return the _index_factory created using the args, kwargs """ - factory = self._index_factory() - return factory(*args, **kwargs) - - @pytest.fixture - def _index_start(self): - return datetime(2005, 1, 1) - - @pytest.fixture - def _index_end(self): - return datetime(2005, 1, 10) - - @pytest.fixture - def _index_freq(self): - return 'D' - - @pytest.fixture - def index(self, _index_start, _index_end, _index_freq): - return self.create_index(_index_start, _index_end, freq=_index_freq) - - @pytest.fixture - def _series_name(self): - raise AbstractMethodError(self) - - @pytest.fixture - def _static_values(self, index): - return np.arange(len(index)) - - @pytest.fixture - def series(self, index, _series_name, _static_values): - return Series(_static_values, index=index, name=_series_name) - - @pytest.fixture - def frame(self, index, _static_values): - return DataFrame({'value': _static_values}, index=index) - - @pytest.fixture(params=[Series, DataFrame]) - def series_and_frame(self, request, index, _series_name, _static_values): - if request.param == Series: - return Series(_static_values, index=index, name=_series_name) - if request.param == DataFrame: - return DataFrame({'value': _static_values}, index=index) - - @pytest.mark.parametrize('freq', ['2D', '1H']) - def test_asfreq(self, series_and_frame, freq): - obj = series_and_frame - - result = obj.resample(freq).asfreq() - if freq == '2D': - new_index = obj.index.take(np.arange(0, len(obj.index), 2)) - new_index.freq = to_offset('2D') - else: - new_index = self.create_index(obj.index[0], obj.index[-1], - freq=freq) - expected = obj.reindex(new_index) - assert_almost_equal(result, expected) - - def test_asfreq_fill_value(self): - # test for fill value during resampling, issue 3715 - - s = self.create_series() - - result = s.resample('1H').asfreq() - new_index = self.create_index(s.index[0], s.index[-1], freq='1H') - expected = s.reindex(new_index) - assert_series_equal(result, expected) - - frame = s.to_frame('value') - frame.iloc[1] = None - result = frame.resample('1H').asfreq(fill_value=4.0) - new_index = self.create_index(frame.index[0], - frame.index[-1], freq='1H') - expected = frame.reindex(new_index, fill_value=4.0) - assert_frame_equal(result, expected) - - def test_resample_interpolate(self): - # # 12925 - df = self.create_series().to_frame('value') - assert_frame_equal( - df.resample('1T').asfreq().interpolate(), - df.resample('1T').interpolate()) - - def test_raises_on_non_datetimelike_index(self): - # this is a non datetimelike index - xp = DataFrame() - pytest.raises(TypeError, lambda: xp.resample('A').mean()) - - def test_resample_empty_series(self): - # GH12771 & GH12868 - - s = self.create_series()[:0] - - for freq in ['M', 'D', 'H']: - # need to test for ohlc from GH13083 - methods = [method for method in resample_methods - if method != 'ohlc'] - for method in methods: - result = getattr(s.resample(freq), method)() - - expected = s.copy() - expected.index = s.index._shallow_copy(freq=freq) - assert_index_equal(result.index, expected.index) - assert result.index.freq == expected.index.freq - assert_series_equal(result, expected, check_dtype=False) - - def test_resample_empty_dataframe(self): - # GH13212 - index = self.create_series().index[:0] - f = DataFrame(index=index) - - for freq in ['M', 'D', 'H']: - # count retains dimensions too - methods = downsample_methods + upsample_methods - for method in methods: - result = getattr(f.resample(freq), method)() - if method != 'size': - expected = f.copy() - else: - # GH14962 - expected = Series([]) - - expected.index = f.index._shallow_copy(freq=freq) - assert_index_equal(result.index, expected.index) - assert result.index.freq == expected.index.freq - assert_almost_equal(result, expected, check_dtype=False) - - # test size for GH13212 (currently stays as df) - - def test_resample_empty_dtypes(self): - - # Empty series were sometimes causing a segfault (for the functions - # with Cython bounds-checking disabled) or an IndexError. We just run - # them to ensure they no longer do. (GH #10228) - for index in tm.all_timeseries_index_generator(0): - for dtype in (np.float, np.int, np.object, 'datetime64[ns]'): - for how in downsample_methods + upsample_methods: - empty_series = pd.Series([], index, dtype) - try: - getattr(empty_series.resample('d'), how)() - except DataError: - # Ignore these since some combinations are invalid - # (ex: doing mean with dtype of np.object) - pass - - def test_resample_loffset_arg_type(self): - # GH 13218, 15002 - df = self.create_series().to_frame('value') - expected_means = [df.values[i:i + 2].mean() - for i in range(0, len(df.values), 2)] - expected_index = self.create_index(df.index[0], - periods=len(df.index) / 2, - freq='2D') - - # loffset coerces PeriodIndex to DateTimeIndex - if isinstance(expected_index, PeriodIndex): - expected_index = expected_index.to_timestamp() - - expected_index += timedelta(hours=2) - expected = DataFrame({'value': expected_means}, index=expected_index) - - for arg in ['mean', {'value': 'mean'}, ['mean']]: - - result_agg = df.resample('2D', loffset='2H').agg(arg) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result_how = df.resample('2D', how=arg, loffset='2H') - - if isinstance(arg, list): - expected.columns = pd.MultiIndex.from_tuples([('value', - 'mean')]) - - # GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex - if isinstance(expected.index, TimedeltaIndex): - with pytest.raises(AssertionError): - assert_frame_equal(result_agg, expected) - assert_frame_equal(result_how, expected) - else: - assert_frame_equal(result_agg, expected) - assert_frame_equal(result_how, expected) - - def test_apply_to_empty_series(self): - # GH 14313 - series = self.create_series()[:0] - - for freq in ['M', 'D', 'H']: - result = series.resample(freq).apply(lambda x: 1) - expected = series.resample(freq).apply(np.sum) - - assert_series_equal(result, expected, check_dtype=False) - - -class TestDatetimeIndex(Base): - _index_factory = lambda x: date_range - - @pytest.fixture - def _series_name(self): - return 'dti' - - def setup_method(self, method): - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='Min') - - self.series = Series(np.random.rand(len(dti)), dti) - - def create_series(self): - i = date_range(datetime(2005, 1, 1), - datetime(2005, 1, 10), freq='D') - - return Series(np.arange(len(i)), index=i, name='dti') - - def test_custom_grouper(self): - - dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10)) - - s = Series(np.array([1] * len(dti)), index=dti, dtype='int64') - - b = TimeGrouper(Minute(5)) - g = s.groupby(b) - - # check all cython functions work - funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var'] - for f in funcs: - g._cython_agg_general(f) - - b = TimeGrouper(Minute(5), closed='right', label='right') - g = s.groupby(b) - # check all cython functions work - funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var'] - for f in funcs: - g._cython_agg_general(f) - - assert g.ngroups == 2593 - assert notna(g.mean()).all() - - # construct expected val - arr = [1] + [5] * 2592 - idx = dti[0:-1:5] - idx = idx.append(dti[-1:]) - expect = Series(arr, index=idx) - - # GH2763 - return in put dtype if we can - result = g.agg(np.sum) - assert_series_equal(result, expect) - - df = DataFrame(np.random.rand(len(dti), 10), - index=dti, dtype='float64') - r = df.groupby(b).agg(np.sum) - - assert len(r.columns) == 10 - assert len(r.index) == 2593 - - def test_resample_basic(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', - name='index') - s = Series(np.random.randn(14), index=rng) - result = s.resample('5min', closed='right', label='right').mean() - - exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index') - expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], - index=exp_idx) - assert_series_equal(result, expected) - assert result.index.name == 'index' - - result = s.resample('5min', closed='left', label='right').mean() - - exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min', - name='index') - expected = Series([s[:5].mean(), s[5:10].mean(), - s[10:].mean()], index=exp_idx) - assert_series_equal(result, expected) - - s = self.series - result = s.resample('5Min').last() - grouper = TimeGrouper(Minute(5), closed='left', label='left') - expect = s.groupby(grouper).agg(lambda x: x[-1]) - assert_series_equal(result, expect) - - def test_resample_how(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', - name='index') - s = Series(np.random.randn(14), index=rng) - grouplist = np.ones_like(s) - grouplist[0] = 0 - grouplist[1:6] = 1 - grouplist[6:11] = 2 - grouplist[11:] = 3 - args = downsample_methods - - def _ohlc(group): - if isna(group).all(): - return np.repeat(np.nan, 4) - return [group[0], group.max(), group.min(), group[-1]] - - inds = date_range('1/1/2000', periods=4, freq='5min', name='index') - - for arg in args: - if arg == 'ohlc': - func = _ohlc - else: - func = arg - try: - result = getattr(s.resample( - '5min', closed='right', label='right'), arg)() - - expected = s.groupby(grouplist).agg(func) - assert result.index.name == 'index' - if arg == 'ohlc': - expected = DataFrame(expected.values.tolist()) - expected.columns = ['open', 'high', 'low', 'close'] - expected.index = Index(inds, name='index') - assert_frame_equal(result, expected) - else: - expected.index = inds - assert_series_equal(result, expected) - except BaseException as exc: - - exc.args += ('how=%s' % arg,) - raise - - def test_numpy_compat(self): - # see gh-12811 - s = Series([1, 2, 3, 4, 5], index=date_range( - '20130101', periods=5, freq='s')) - r = s.resample('2s') - - msg = "numpy operations are not valid with resample" - - for func in ('min', 'max', 'sum', 'prod', - 'mean', 'var', 'std'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(r, func), - func, 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(r, func), axis=1) - - def test_resample_how_callables(self): - # GH 7929 - data = np.arange(5, dtype=np.int64) - ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d') - df = pd.DataFrame({"A": data, "B": data}, index=ind) - - def fn(x, a=1): - return str(type(x)) - - class fn_class: - - def __call__(self, x): - return str(type(x)) - - df_standard = df.resample("M").apply(fn) - df_lambda = df.resample("M").apply(lambda x: str(type(x))) - df_partial = df.resample("M").apply(partial(fn)) - df_partial2 = df.resample("M").apply(partial(fn, a=2)) - df_class = df.resample("M").apply(fn_class()) - - assert_frame_equal(df_standard, df_lambda) - assert_frame_equal(df_standard, df_partial) - assert_frame_equal(df_standard, df_partial2) - assert_frame_equal(df_standard, df_class) - - def test_resample_with_timedeltas(self): - - expected = DataFrame({'A': np.arange(1480)}) - expected = expected.groupby(expected.index // 30).sum() - expected.index = pd.timedelta_range('0 days', freq='30T', periods=50) - - df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta( - np.arange(1480), unit='T')) - result = df.resample('30T').sum() - - assert_frame_equal(result, expected) - - s = df['A'] - result = s.resample('30T').sum() - assert_series_equal(result, expected['A']) - - def test_resample_single_period_timedelta(self): - - s = Series(list(range(5)), index=pd.timedelta_range( - '1 day', freq='s', periods=5)) - result = s.resample('2s').sum() - expected = Series([1, 5, 4], index=pd.timedelta_range( - '1 day', freq='2s', periods=3)) - assert_series_equal(result, expected) - - def test_resample_timedelta_idempotency(self): - - # GH 12072 - index = pd.timedelta_range('0', periods=9, freq='10L') - series = pd.Series(range(9), index=index) - result = series.resample('10L').mean() - expected = series - assert_series_equal(result, expected) - - def test_resample_rounding(self): - # GH 8371 - # odd results when rounding is needed - - data = """date,time,value -11-08-2014,00:00:01.093,1 -11-08-2014,00:00:02.159,1 -11-08-2014,00:00:02.667,1 -11-08-2014,00:00:03.175,1 -11-08-2014,00:00:07.058,1 -11-08-2014,00:00:07.362,1 -11-08-2014,00:00:08.324,1 -11-08-2014,00:00:08.830,1 -11-08-2014,00:00:08.982,1 -11-08-2014,00:00:09.815,1 -11-08-2014,00:00:10.540,1 -11-08-2014,00:00:11.061,1 -11-08-2014,00:00:11.617,1 -11-08-2014,00:00:13.607,1 -11-08-2014,00:00:14.535,1 -11-08-2014,00:00:15.525,1 -11-08-2014,00:00:17.960,1 -11-08-2014,00:00:20.674,1 -11-08-2014,00:00:21.191,1""" - - from pandas.compat import StringIO - df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [ - 'date', 'time']}, index_col='timestamp') - df.index.name = None - result = df.resample('6s').sum() - expected = DataFrame({'value': [ - 4, 9, 4, 2 - ]}, index=date_range('2014-11-08', freq='6s', periods=4)) - assert_frame_equal(result, expected) - - result = df.resample('7s').sum() - expected = DataFrame({'value': [ - 4, 10, 4, 1 - ]}, index=date_range('2014-11-08', freq='7s', periods=4)) - assert_frame_equal(result, expected) - - result = df.resample('11s').sum() - expected = DataFrame({'value': [ - 11, 8 - ]}, index=date_range('2014-11-08', freq='11s', periods=2)) - assert_frame_equal(result, expected) - - result = df.resample('13s').sum() - expected = DataFrame({'value': [ - 13, 6 - ]}, index=date_range('2014-11-08', freq='13s', periods=2)) - assert_frame_equal(result, expected) - - result = df.resample('17s').sum() - expected = DataFrame({'value': [ - 16, 3 - ]}, index=date_range('2014-11-08', freq='17s', periods=2)) - assert_frame_equal(result, expected) - - def test_resample_basic_from_daily(self): - # from daily - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='D', name='index') - - s = Series(np.random.rand(len(dti)), dti) - - # to weekly - result = s.resample('w-sun').last() - - assert len(result) == 3 - assert (result.index.dayofweek == [6, 6, 6]).all() - assert result.iloc[0] == s['1/2/2005'] - assert result.iloc[1] == s['1/9/2005'] - assert result.iloc[2] == s.iloc[-1] - - result = s.resample('W-MON').last() - assert len(result) == 2 - assert (result.index.dayofweek == [0, 0]).all() - assert result.iloc[0] == s['1/3/2005'] - assert result.iloc[1] == s['1/10/2005'] - - result = s.resample('W-TUE').last() - assert len(result) == 2 - assert (result.index.dayofweek == [1, 1]).all() - assert result.iloc[0] == s['1/4/2005'] - assert result.iloc[1] == s['1/10/2005'] - - result = s.resample('W-WED').last() - assert len(result) == 2 - assert (result.index.dayofweek == [2, 2]).all() - assert result.iloc[0] == s['1/5/2005'] - assert result.iloc[1] == s['1/10/2005'] - - result = s.resample('W-THU').last() - assert len(result) == 2 - assert (result.index.dayofweek == [3, 3]).all() - assert result.iloc[0] == s['1/6/2005'] - assert result.iloc[1] == s['1/10/2005'] - - result = s.resample('W-FRI').last() - assert len(result) == 2 - assert (result.index.dayofweek == [4, 4]).all() - assert result.iloc[0] == s['1/7/2005'] - assert result.iloc[1] == s['1/10/2005'] - - # to biz day - result = s.resample('B').last() - assert len(result) == 7 - assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all() - - assert result.iloc[0] == s['1/2/2005'] - assert result.iloc[1] == s['1/3/2005'] - assert result.iloc[5] == s['1/9/2005'] - assert result.index.name == 'index' - - def test_resample_upsampling_picked_but_not_correct(self): - - # Test for issue #3020 - dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D') - series = Series(1, index=dates) - - result = series.resample('D').mean() - assert result.index[0] == dates[0] - - # GH 5955 - # incorrect deciding to upsample when the axis frequency matches the - # resample frequency - - import datetime - s = Series(np.arange(1., 6), index=[datetime.datetime( - 1975, 1, i, 12, 0) for i in range(1, 6)]) - expected = Series(np.arange(1., 6), index=date_range( - '19750101', periods=5, freq='D')) - - result = s.resample('D').count() - assert_series_equal(result, Series(1, index=expected.index)) - - result1 = s.resample('D').sum() - result2 = s.resample('D').mean() - assert_series_equal(result1, expected) - assert_series_equal(result2, expected) - - def test_resample_frame_basic(self): - df = tm.makeTimeDataFrame() - - b = TimeGrouper('M') - g = df.groupby(b) - - # check all cython functions work - funcs = ['add', 'mean', 'prod', 'min', 'max', 'var'] - for f in funcs: - g._cython_agg_general(f) - - result = df.resample('A').mean() - assert_series_equal(result['A'], df['A'].resample('A').mean()) - - result = df.resample('M').mean() - assert_series_equal(result['A'], df['A'].resample('M').mean()) - - df.resample('M', kind='period').mean() - df.resample('W-WED', kind='period').mean() - - def test_resample_loffset(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min') - s = Series(np.random.randn(14), index=rng) - - result = s.resample('5min', closed='right', label='right', - loffset=timedelta(minutes=1)).mean() - idx = date_range('1/1/2000', periods=4, freq='5min') - expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], - index=idx + timedelta(minutes=1)) - assert_series_equal(result, expected) - - expected = s.resample( - '5min', closed='right', label='right', - loffset='1min').mean() - assert_series_equal(result, expected) - - expected = s.resample( - '5min', closed='right', label='right', - loffset=Minute(1)).mean() - assert_series_equal(result, expected) - - assert result.index.freq == Minute(5) - - # from daily - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='D') - ser = Series(np.random.rand(len(dti)), dti) - - # to weekly - result = ser.resample('w-sun').last() - expected = ser.resample('w-sun', loffset=-bday).last() - assert result.index[0] - bday == expected.index[0] - - def test_resample_loffset_count(self): - # GH 12725 - start_time = '1/1/2000 00:00:00' - rng = date_range(start_time, periods=100, freq='S') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.resample('10S', loffset='1s').count() - - expected_index = ( - date_range(start_time, periods=10, freq='10S') + - timedelta(seconds=1) - ) - expected = pd.Series(10, index=expected_index) - - assert_series_equal(result, expected) - - # Same issue should apply to .size() since it goes through - # same code path - result = ts.resample('10S', loffset='1s').size() - - assert_series_equal(result, expected) - - def test_resample_upsample(self): - # from daily - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='D', name='index') - - s = Series(np.random.rand(len(dti)), dti) - - # to minutely, by padding - result = s.resample('Min').pad() - assert len(result) == 12961 - assert result[0] == s[0] - assert result[-1] == s[-1] - - assert result.index.name == 'index' - - def test_resample_how_method(self): - # GH9915 - s = pd.Series([11, 22], - index=[Timestamp('2015-03-31 21:48:52.672000'), - Timestamp('2015-03-31 21:49:52.739000')]) - expected = pd.Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22], - index=[Timestamp('2015-03-31 21:48:50'), - Timestamp('2015-03-31 21:49:00'), - Timestamp('2015-03-31 21:49:10'), - Timestamp('2015-03-31 21:49:20'), - Timestamp('2015-03-31 21:49:30'), - Timestamp('2015-03-31 21:49:40'), - Timestamp('2015-03-31 21:49:50')]) - assert_series_equal(s.resample("10S").mean(), expected) - - def test_resample_extra_index_point(self): - # GH 9756 - index = DatetimeIndex(start='20150101', end='20150331', freq='BM') - expected = DataFrame({'A': Series([21, 41, 63], index=index)}) - - index = DatetimeIndex(start='20150101', end='20150331', freq='B') - df = DataFrame( - {'A': Series(range(len(index)), index=index)}, dtype='int64') - result = df.resample('BM').last() - assert_frame_equal(result, expected) - - def test_upsample_with_limit(self): - rng = date_range('1/1/2000', periods=3, freq='5t') - ts = Series(np.random.randn(len(rng)), rng) - - result = ts.resample('t').ffill(limit=2) - expected = ts.reindex(result.index, method='ffill', limit=2) - assert_series_equal(result, expected) - - def test_nearest_upsample_with_limit(self): - rng = date_range('1/1/2000', periods=3, freq='5t') - ts = Series(np.random.randn(len(rng)), rng) - - result = ts.resample('t').nearest(limit=2) - expected = ts.reindex(result.index, method='nearest', limit=2) - assert_series_equal(result, expected) - - def test_resample_ohlc(self): - s = self.series - - grouper = TimeGrouper(Minute(5)) - expect = s.groupby(grouper).agg(lambda x: x[-1]) - result = s.resample('5Min').ohlc() - - assert len(result) == len(expect) - assert len(result.columns) == 4 - - xs = result.iloc[-2] - assert xs['open'] == s[-6] - assert xs['high'] == s[-6:-1].max() - assert xs['low'] == s[-6:-1].min() - assert xs['close'] == s[-2] - - xs = result.iloc[0] - assert xs['open'] == s[0] - assert xs['high'] == s[:5].max() - assert xs['low'] == s[:5].min() - assert xs['close'] == s[4] - - def test_resample_ohlc_result(self): - - # GH 12332 - index = pd.date_range('1-1-2000', '2-15-2000', freq='h') - index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h')) - s = Series(range(len(index)), index=index) - - a = s.loc[:'4-15-2000'].resample('30T').ohlc() - assert isinstance(a, DataFrame) - - b = s.loc[:'4-14-2000'].resample('30T').ohlc() - assert isinstance(b, DataFrame) - - # GH12348 - # raising on odd period - rng = date_range('2013-12-30', '2014-01-07') - index = rng.drop([Timestamp('2014-01-01'), - Timestamp('2013-12-31'), - Timestamp('2014-01-04'), - Timestamp('2014-01-05')]) - df = DataFrame(data=np.arange(len(index)), index=index) - result = df.resample('B').mean() - expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B')) - assert_frame_equal(result, expected) - - def test_resample_ohlc_dataframe(self): - df = ( - pd.DataFrame({ - 'PRICE': { - Timestamp('2011-01-06 10:59:05', tz=None): 24990, - Timestamp('2011-01-06 12:43:33', tz=None): 25499, - Timestamp('2011-01-06 12:54:09', tz=None): 25499}, - 'VOLUME': { - Timestamp('2011-01-06 10:59:05', tz=None): 1500000000, - Timestamp('2011-01-06 12:43:33', tz=None): 5000000000, - Timestamp('2011-01-06 12:54:09', tz=None): 100000000}}) - ).reindex_axis(['VOLUME', 'PRICE'], axis=1) - res = df.resample('H').ohlc() - exp = pd.concat([df['VOLUME'].resample('H').ohlc(), - df['PRICE'].resample('H').ohlc()], - axis=1, - keys=['VOLUME', 'PRICE']) - assert_frame_equal(exp, res) - - df.columns = [['a', 'b'], ['c', 'd']] - res = df.resample('H').ohlc() - exp.columns = pd.MultiIndex.from_tuples([ - ('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'), - ('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'), - ('b', 'd', 'low'), ('b', 'd', 'close')]) - assert_frame_equal(exp, res) - - # dupe columns fail atm - # df.columns = ['PRICE', 'PRICE'] - - def test_resample_dup_index(self): - - # GH 4812 - # dup columns with resample raising - df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000], - columns=[Period(year=2000, month=i + 1, freq='M') - for i in range(12)]) - df.iloc[3, :] = np.nan - result = df.resample('Q', axis=1).mean() - expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean() - expected.columns = [ - Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)] - assert_frame_equal(result, expected) - - def test_resample_reresample(self): - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='D') - s = Series(np.random.rand(len(dti)), dti) - bs = s.resample('B', closed='right', label='right').mean() - result = bs.resample('8H').mean() - assert len(result) == 22 - assert isinstance(result.index.freq, offsets.DateOffset) - assert result.index.freq == offsets.Hour(8) - - def test_resample_timestamp_to_period(self): - ts = _simple_ts('1/1/1990', '1/1/2000') - - result = ts.resample('A-DEC', kind='period').mean() - expected = ts.resample('A-DEC').mean() - expected.index = period_range('1990', '2000', freq='a-dec') - assert_series_equal(result, expected) - - result = ts.resample('A-JUN', kind='period').mean() - expected = ts.resample('A-JUN').mean() - expected.index = period_range('1990', '2000', freq='a-jun') - assert_series_equal(result, expected) - - result = ts.resample('M', kind='period').mean() - expected = ts.resample('M').mean() - expected.index = period_range('1990-01', '2000-01', freq='M') - assert_series_equal(result, expected) - - result = ts.resample('M', kind='period').mean() - expected = ts.resample('M').mean() - expected.index = period_range('1990-01', '2000-01', freq='M') - assert_series_equal(result, expected) - - def test_ohlc_5min(self): - def _ohlc(group): - if isna(group).all(): - return np.repeat(np.nan, 4) - return [group[0], group.max(), group.min(), group[-1]] - - rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s') - ts = Series(np.random.randn(len(rng)), index=rng) - - resampled = ts.resample('5min', closed='right', - label='right').ohlc() - - assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all() - - exp = _ohlc(ts[1:31]) - assert (resampled.loc['1/1/2000 00:05'] == exp).all() - - exp = _ohlc(ts['1/1/2000 5:55:01':]) - assert (resampled.loc['1/1/2000 6:00:00'] == exp).all() - - def test_downsample_non_unique(self): - rng = date_range('1/1/2000', '2/29/2000') - rng2 = rng.repeat(5).values - ts = Series(np.random.randn(len(rng2)), index=rng2) - - result = ts.resample('M').mean() - - expected = ts.groupby(lambda x: x.month).mean() - assert len(result) == 2 - assert_almost_equal(result[0], expected[1]) - assert_almost_equal(result[1], expected[2]) - - def test_asfreq_non_unique(self): - # GH #1077 - rng = date_range('1/1/2000', '2/29/2000') - rng2 = rng.repeat(2).values - ts = Series(np.random.randn(len(rng2)), index=rng2) - - pytest.raises(Exception, ts.asfreq, 'B') - - def test_resample_axis1(self): - rng = date_range('1/1/2000', '2/29/2000') - df = DataFrame(np.random.randn(3, len(rng)), columns=rng, - index=['a', 'b', 'c']) - - result = df.resample('M', axis=1).mean() - expected = df.T.resample('M').mean().T - tm.assert_frame_equal(result, expected) - - def test_resample_panel(self): - rng = date_range('1/1/2000', '6/30/2000') - n = len(rng) - - with catch_warnings(record=True): - panel = Panel(np.random.randn(3, n, 5), - items=['one', 'two', 'three'], - major_axis=rng, - minor_axis=['a', 'b', 'c', 'd', 'e']) - - result = panel.resample('M', axis=1).mean() - - def p_apply(panel, f): - result = {} - for item in panel.items: - result[item] = f(panel[item]) - return Panel(result, items=panel.items) - - expected = p_apply(panel, lambda x: x.resample('M').mean()) - tm.assert_panel_equal(result, expected) - - panel2 = panel.swapaxes(1, 2) - result = panel2.resample('M', axis=2).mean() - expected = p_apply(panel2, - lambda x: x.resample('M', axis=1).mean()) - tm.assert_panel_equal(result, expected) - - def test_resample_panel_numpy(self): - rng = date_range('1/1/2000', '6/30/2000') - n = len(rng) - - with catch_warnings(record=True): - panel = Panel(np.random.randn(3, n, 5), - items=['one', 'two', 'three'], - major_axis=rng, - minor_axis=['a', 'b', 'c', 'd', 'e']) - - result = panel.resample('M', axis=1).apply(lambda x: x.mean(1)) - expected = panel.resample('M', axis=1).mean() - tm.assert_panel_equal(result, expected) - - panel = panel.swapaxes(1, 2) - result = panel.resample('M', axis=2).apply(lambda x: x.mean(2)) - expected = panel.resample('M', axis=2).mean() - tm.assert_panel_equal(result, expected) - - def test_resample_anchored_ticks(self): - # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should - # "anchor" the origin at midnight so we get regular intervals rather - # than starting from the first timestamp which might start in the - # middle of a desired interval - - rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s') - ts = Series(np.random.randn(len(rng)), index=rng) - ts[:2] = np.nan # so results are the same - - freqs = ['t', '5t', '15t', '30t', '4h', '12h'] - for freq in freqs: - result = ts[2:].resample(freq, closed='left', label='left').mean() - expected = ts.resample(freq, closed='left', label='left').mean() - assert_series_equal(result, expected) - - def test_resample_single_group(self): - mysum = lambda x: x.sum() - - rng = date_range('2000-1-1', '2000-2-10', freq='D') - ts = Series(np.random.randn(len(rng)), index=rng) - assert_series_equal(ts.resample('M').sum(), - ts.resample('M').apply(mysum)) - - rng = date_range('2000-1-1', '2000-1-10', freq='D') - ts = Series(np.random.randn(len(rng)), index=rng) - assert_series_equal(ts.resample('M').sum(), - ts.resample('M').apply(mysum)) - - # GH 3849 - s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'), - Timestamp('20070915 15:40:00')]) - expected = Series([0.75], index=[Timestamp('20070915')]) - result = s.resample('D').apply(lambda x: np.std(x)) - assert_series_equal(result, expected) - - def test_resample_base(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s') - ts = Series(np.random.randn(len(rng)), index=rng) - - resampled = ts.resample('5min', base=2).mean() - exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57', - freq='5min') - tm.assert_index_equal(resampled.index, exp_rng) - - def test_resample_base_with_timedeltaindex(self): - - # GH 10530 - rng = timedelta_range(start='0s', periods=25, freq='s') - ts = Series(np.random.randn(len(rng)), index=rng) - - with_base = ts.resample('2s', base=5).mean() - without_base = ts.resample('2s').mean() - - exp_without_base = timedelta_range(start='0s', end='25s', freq='2s') - exp_with_base = timedelta_range(start='5s', end='29s', freq='2s') - - tm.assert_index_equal(without_base.index, exp_without_base) - tm.assert_index_equal(with_base.index, exp_with_base) - - def test_resample_categorical_data_with_timedeltaindex(self): - # GH #12169 - df = DataFrame({'Group_obj': 'A'}, - index=pd.to_timedelta(list(range(20)), unit='s')) - df['Group'] = df['Group_obj'].astype('category') - result = df.resample('10s').agg(lambda x: (x.value_counts().index[0])) - expected = DataFrame({'Group_obj': ['A', 'A'], - 'Group': ['A', 'A']}, - index=pd.to_timedelta([0, 10], unit='s')) - expected = expected.reindex_axis(['Group_obj', 'Group'], 1) - tm.assert_frame_equal(result, expected) - - def test_resample_daily_anchored(self): - rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T') - ts = Series(np.random.randn(len(rng)), index=rng) - ts[:2] = np.nan # so results are the same - - result = ts[2:].resample('D', closed='left', label='left').mean() - expected = ts.resample('D', closed='left', label='left').mean() - assert_series_equal(result, expected) - - def test_resample_to_period_monthly_buglet(self): - # GH #1259 - - rng = date_range('1/1/2000', '12/31/2000') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.resample('M', kind='period').mean() - exp_index = period_range('Jan-2000', 'Dec-2000', freq='M') - tm.assert_index_equal(result.index, exp_index) - - def test_period_with_agg(self): - - # aggregate a period resampler with a lambda - s2 = pd.Series(np.random.randint(0, 5, 50), - index=pd.period_range('2012-01-01', - freq='H', - periods=50), - dtype='float64') - - expected = s2.to_timestamp().resample('D').mean().to_period() - result = s2.resample('D').agg(lambda x: x.mean()) - assert_series_equal(result, expected) - - def test_resample_segfault(self): - # GH 8573 - # segfaulting in older versions - all_wins_and_wagers = [ - (1, datetime(2013, 10, 1, 16, 20), 1, 0), - (2, datetime(2013, 10, 1, 16, 10), 1, 0), - (2, datetime(2013, 10, 1, 18, 15), 1, 0), - (2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)] - - df = pd.DataFrame.from_records(all_wins_and_wagers, - columns=("ID", "timestamp", "A", "B") - ).set_index("timestamp") - result = df.groupby("ID").resample("5min").sum() - expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum()) - assert_frame_equal(result, expected) - - def test_resample_dtype_preservation(self): - - # GH 12202 - # validation tests for dtype preservation - - df = DataFrame({'date': pd.date_range(start='2016-01-01', - periods=4, freq='W'), - 'group': [1, 1, 2, 2], - 'val': Series([5, 6, 7, 8], - dtype='int32')} - ).set_index('date') - - result = df.resample('1D').ffill() - assert result.val.dtype == np.int32 - - result = df.groupby('group').resample('1D').ffill() - assert result.val.dtype == np.int32 - - def test_resample_dtype_coerceion(self): - - pytest.importorskip('scipy.interpolate') - - # GH 16361 - df = {"a": [1, 3, 1, 4]} - df = pd.DataFrame( - df, index=pd.date_range("2017-01-01", "2017-01-04")) - - expected = (df.astype("float64") - .resample("H") - .mean() - ["a"] - .interpolate("cubic") - ) - - result = df.resample("H")["a"].mean().interpolate("cubic") - tm.assert_series_equal(result, expected) - - result = df.resample("H").mean()["a"].interpolate("cubic") - tm.assert_series_equal(result, expected) - - def test_weekly_resample_buglet(self): - # #1327 - rng = date_range('1/1/2000', freq='B', periods=20) - ts = Series(np.random.randn(len(rng)), index=rng) - - resampled = ts.resample('W').mean() - expected = ts.resample('W-SUN').mean() - assert_series_equal(resampled, expected) - - def test_monthly_resample_error(self): - # #1451 - dates = date_range('4/16/2012 20:00', periods=5000, freq='h') - ts = Series(np.random.randn(len(dates)), index=dates) - # it works! - ts.resample('M') - - def test_nanosecond_resample_error(self): - # GH 12307 - Values falls after last bin when - # Resampling using pd.tseries.offsets.Nano as period - start = 1443707890427 - exp_start = 1443707890400 - indx = pd.date_range( - start=pd.to_datetime(start), - periods=10, - freq='100n' - ) - ts = pd.Series(range(len(indx)), index=indx) - r = ts.resample(pd.tseries.offsets.Nano(100)) - result = r.agg('mean') - - exp_indx = pd.date_range( - start=pd.to_datetime(exp_start), - periods=10, - freq='100n' - ) - exp = pd.Series(range(len(exp_indx)), index=exp_indx) - - assert_series_equal(result, exp) - - def test_resample_anchored_intraday(self): - # #1471, #1458 - - rng = date_range('1/1/2012', '4/1/2012', freq='100min') - df = DataFrame(rng.month, index=rng) - - result = df.resample('M').mean() - expected = df.resample( - 'M', kind='period').mean().to_timestamp(how='end') - tm.assert_frame_equal(result, expected) - - result = df.resample('M', closed='left').mean() - exp = df.tshift(1, freq='D').resample('M', kind='period').mean() - exp = exp.to_timestamp(how='end') - - tm.assert_frame_equal(result, exp) - - rng = date_range('1/1/2012', '4/1/2012', freq='100min') - df = DataFrame(rng.month, index=rng) - - result = df.resample('Q').mean() - expected = df.resample( - 'Q', kind='period').mean().to_timestamp(how='end') - tm.assert_frame_equal(result, expected) - - result = df.resample('Q', closed='left').mean() - expected = df.tshift(1, freq='D').resample('Q', kind='period', - closed='left').mean() - expected = expected.to_timestamp(how='end') - tm.assert_frame_equal(result, expected) - - ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h') - resampled = ts.resample('M').mean() - assert len(resampled) == 1 - - def test_resample_anchored_monthstart(self): - ts = _simple_ts('1/1/2000', '12/31/2002') - - freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN'] - - for freq in freqs: - ts.resample(freq).mean() - - def test_resample_anchored_multiday(self): - # When resampling a range spanning multiple days, ensure that the - # start date gets used to determine the offset. Fixes issue where - # a one day period is not a multiple of the frequency. - # - # See: https://github.com/pandas-dev/pandas/issues/8683 - - index = pd.date_range( - '2014-10-14 23:06:23.206', periods=3, freq='400L' - ) | pd.date_range( - '2014-10-15 23:00:00', periods=2, freq='2200L') - - s = pd.Series(np.random.randn(5), index=index) - - # Ensure left closing works - result = s.resample('2200L').mean() - assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:02.000') - - # Ensure right closing works - result = s.resample('2200L', label='right').mean() - assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:04.200') - - def test_corner_cases(self): - # miscellaneous test coverage - - rng = date_range('1/1/2000', periods=12, freq='t') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.resample('5t', closed='right', label='left').mean() - ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t') - tm.assert_index_equal(result.index, ex_index) - - len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0] - # it works - result = len0pts.resample('A-DEC').mean() - assert len(result) == 0 - - # resample to periods - ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h') - result = ts.resample('M', kind='period').mean() - assert len(result) == 1 - assert result.index[0] == Period('2000-04', freq='M') - - def test_anchored_lowercase_buglet(self): - dates = date_range('4/16/2012 20:00', periods=50000, freq='s') - ts = Series(np.random.randn(len(dates)), index=dates) - # it works! - ts.resample('d').mean() - - def test_upsample_apply_functions(self): - # #1596 - rng = pd.date_range('2012-06-12', periods=4, freq='h') - - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.resample('20min').aggregate(['mean', 'sum']) - assert isinstance(result, DataFrame) - - def test_resample_not_monotonic(self): - rng = pd.date_range('2012-06-12', periods=200, freq='h') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts = ts.take(np.random.permutation(len(ts))) - - result = ts.resample('D').sum() - exp = ts.sort_index().resample('D').sum() - assert_series_equal(result, exp) - - def test_resample_median_bug_1688(self): - - for dtype in ['int64', 'int32', 'float64', 'float32']: - df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), - datetime(2012, 1, 1, 0, 5, 0)], - dtype=dtype) - - result = df.resample("T").apply(lambda x: x.mean()) - exp = df.asfreq('T') - tm.assert_frame_equal(result, exp) - - result = df.resample("T").median() - exp = df.asfreq('T') - tm.assert_frame_equal(result, exp) - - def test_how_lambda_functions(self): - - ts = _simple_ts('1/1/2000', '4/1/2000') - - result = ts.resample('M').apply(lambda x: x.mean()) - exp = ts.resample('M').mean() - tm.assert_series_equal(result, exp) - - foo_exp = ts.resample('M').mean() - foo_exp.name = 'foo' - bar_exp = ts.resample('M').std() - bar_exp.name = 'bar' - - result = ts.resample('M').apply( - [lambda x: x.mean(), lambda x: x.std(ddof=1)]) - result.columns = ['foo', 'bar'] - tm.assert_series_equal(result['foo'], foo_exp) - tm.assert_series_equal(result['bar'], bar_exp) - - # this is a MI Series, so comparing the names of the results - # doesn't make sense - result = ts.resample('M').aggregate({'foo': lambda x: x.mean(), - 'bar': lambda x: x.std(ddof=1)}) - tm.assert_series_equal(result['foo'], foo_exp, check_names=False) - tm.assert_series_equal(result['bar'], bar_exp, check_names=False) - - def test_resample_unequal_times(self): - # #1772 - start = datetime(1999, 3, 1, 5) - # end hour is less than start - end = datetime(2012, 7, 31, 4) - bad_ind = date_range(start, end, freq="30min") - df = DataFrame({'close': 1}, index=bad_ind) - - # it works! - df.resample('AS').sum() - - def test_resample_consistency(self): - - # GH 6418 - # resample with bfill / limit / reindex consistency - - i30 = pd.date_range('2002-02-02', periods=4, freq='30T') - s = pd.Series(np.arange(4.), index=i30) - s[2] = np.NaN - - # Upsample by factor 3 with reindex() and resample() methods: - i10 = pd.date_range(i30[0], i30[-1], freq='10T') - - s10 = s.reindex(index=i10, method='bfill') - s10_2 = s.reindex(index=i10, method='bfill', limit=2) - rl = s.reindex_like(s10, method='bfill', limit=2) - r10_2 = s.resample('10Min').bfill(limit=2) - r10 = s.resample('10Min').bfill() - - # s10_2, r10, r10_2, rl should all be equal - assert_series_equal(s10_2, r10) - assert_series_equal(s10_2, r10_2) - assert_series_equal(s10_2, rl) - - def test_resample_timegrouper(self): - # GH 7227 - dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3), - datetime(2014, 11, 5), datetime(2014, 9, 5), - datetime(2014, 10, 8), datetime(2014, 7, 15)] - - dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:] - dates3 = [pd.NaT] + dates1 + [pd.NaT] - - for dates in [dates1, dates2, dates3]: - df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) - result = df.set_index('A').resample('M').count() - exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', - '2014-09-30', - '2014-10-31', '2014-11-30'], - freq='M', name='A') - expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx) - assert_frame_equal(result, expected) - - result = df.groupby(pd.Grouper(freq='M', key='A')).count() - assert_frame_equal(result, expected) - - df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange( - len(dates)))) - result = df.set_index('A').resample('M').count() - expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]}, - index=exp_idx, columns=['B', 'C']) - assert_frame_equal(result, expected) - - result = df.groupby(pd.Grouper(freq='M', key='A')).count() - assert_frame_equal(result, expected) - - def test_resample_nunique(self): - - # GH 12352 - df = DataFrame({ - 'ID': {pd.Timestamp('2015-06-05 00:00:00'): '0010100903', - pd.Timestamp('2015-06-08 00:00:00'): '0010150847'}, - 'DATE': {pd.Timestamp('2015-06-05 00:00:00'): '2015-06-05', - pd.Timestamp('2015-06-08 00:00:00'): '2015-06-08'}}) - r = df.resample('D') - g = df.groupby(pd.Grouper(freq='D')) - expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x: - x.nunique()) - assert expected.name == 'ID' - - for t in [r, g]: - result = r.ID.nunique() - assert_series_equal(result, expected) - - result = df.ID.resample('D').nunique() - assert_series_equal(result, expected) - - result = df.ID.groupby(pd.Grouper(freq='D')).nunique() - assert_series_equal(result, expected) - - def test_resample_nunique_with_date_gap(self): - # GH 13453 - index = pd.date_range('1-1-2000', '2-15-2000', freq='h') - index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h') - index3 = index.append(index2) - s = pd.Series(range(len(index3)), index=index3, dtype='int64') - r = s.resample('M') - - # Since all elements are unique, these should all be the same - results = [ - r.count(), - r.nunique(), - r.agg(pd.Series.nunique), - r.agg('nunique') - ] - - assert_series_equal(results[0], results[1]) - assert_series_equal(results[0], results[2]) - assert_series_equal(results[0], results[3]) - - def test_resample_group_info(self): # GH10914 - for n, k in product((10000, 100000), (10, 100, 1000)): - dr = date_range(start='2015-08-27', periods=n // 10, freq='T') - ts = Series(np.random.randint(0, n // k, n).astype('int64'), - index=np.random.choice(dr, n)) - - left = ts.resample('30T').nunique() - ix = date_range(start=ts.index.min(), end=ts.index.max(), - freq='30T') - - vals = ts.values - bins = np.searchsorted(ix.values, ts.index, side='right') - - sorter = np.lexsort((vals, bins)) - vals, bins = vals[sorter], bins[sorter] - - mask = np.r_[True, vals[1:] != vals[:-1]] - mask |= np.r_[True, bins[1:] != bins[:-1]] - - arr = np.bincount(bins[mask] - 1, - minlength=len(ix)).astype('int64', copy=False) - right = Series(arr, index=ix) - - assert_series_equal(left, right) - - def test_resample_size(self): - n = 10000 - dr = date_range('2015-09-19', periods=n, freq='T') - ts = Series(np.random.randn(n), index=np.random.choice(dr, n)) - - left = ts.resample('7T').size() - ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T') - - bins = np.searchsorted(ix.values, ts.index.values, side='right') - val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64', - copy=False) - - right = Series(val, index=ix) - assert_series_equal(left, right) - - def test_resample_across_dst(self): - # The test resamples a DatetimeIndex with values before and after a - # DST change - # Issue: 14682 - - # The DatetimeIndex we will start with - # (note that DST happens at 03:00+02:00 -> 02:00+01:00) - # 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00 - df1 = DataFrame([1477786980, 1477790580], columns=['ts']) - dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s') - .dt.tz_localize('UTC') - .dt.tz_convert('Europe/Madrid')) - - # The expected DatetimeIndex after resampling. - # 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00 - df2 = DataFrame([1477785600, 1477789200], columns=['ts']) - dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s') - .dt.tz_localize('UTC') - .dt.tz_convert('Europe/Madrid')) - df = DataFrame([5, 5], index=dti1) - - result = df.resample(rule='H').sum() - expected = DataFrame([5, 5], index=dti2) - - assert_frame_equal(result, expected) - - def test_resample_dst_anchor(self): - # 5172 - dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern') - df = DataFrame([5], index=dti) - assert_frame_equal(df.resample(rule='D').sum(), - DataFrame([5], index=df.index.normalize())) - df.resample(rule='MS').sum() - assert_frame_equal( - df.resample(rule='MS').sum(), - DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)], - tz='US/Eastern'))) - - dti = date_range('2013-09-30', '2013-11-02', freq='30Min', - tz='Europe/Paris') - values = range(dti.size) - df = DataFrame({"a": values, - "b": values, - "c": values}, index=dti, dtype='int64') - how = {"a": "min", "b": "max", "c": "count"} - - assert_frame_equal( - df.resample("W-MON").agg(how)[["a", "b", "c"]], - DataFrame({"a": [0, 48, 384, 720, 1056, 1394], - "b": [47, 383, 719, 1055, 1393, 1586], - "c": [48, 336, 336, 336, 338, 193]}, - index=date_range('9/30/2013', '11/4/2013', - freq='W-MON', tz='Europe/Paris')), - 'W-MON Frequency') - - assert_frame_equal( - df.resample("2W-MON").agg(how)[["a", "b", "c"]], - DataFrame({"a": [0, 48, 720, 1394], - "b": [47, 719, 1393, 1586], - "c": [48, 672, 674, 193]}, - index=date_range('9/30/2013', '11/11/2013', - freq='2W-MON', tz='Europe/Paris')), - '2W-MON Frequency') - - assert_frame_equal( - df.resample("MS").agg(how)[["a", "b", "c"]], - DataFrame({"a": [0, 48, 1538], - "b": [47, 1537, 1586], - "c": [48, 1490, 49]}, - index=date_range('9/1/2013', '11/1/2013', - freq='MS', tz='Europe/Paris')), - 'MS Frequency') - - assert_frame_equal( - df.resample("2MS").agg(how)[["a", "b", "c"]], - DataFrame({"a": [0, 1538], - "b": [1537, 1586], - "c": [1538, 49]}, - index=date_range('9/1/2013', '11/1/2013', - freq='2MS', tz='Europe/Paris')), - '2MS Frequency') - - df_daily = df['10/26/2013':'10/29/2013'] - assert_frame_equal( - df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"}) - [["a", "b", "c"]], - DataFrame({"a": [1248, 1296, 1346, 1394], - "b": [1295, 1345, 1393, 1441], - "c": [48, 50, 48, 48]}, - index=date_range('10/26/2013', '10/29/2013', - freq='D', tz='Europe/Paris')), - 'D Frequency') - - def test_resample_with_nat(self): - # GH 13020 - index = DatetimeIndex([pd.NaT, - '1970-01-01 00:00:00', - pd.NaT, - '1970-01-01 00:00:01', - '1970-01-01 00:00:02']) - frame = DataFrame([2, 3, 5, 7, 11], index=index) - - index_1s = DatetimeIndex(['1970-01-01 00:00:00', - '1970-01-01 00:00:01', - '1970-01-01 00:00:02']) - frame_1s = DataFrame([3, 7, 11], index=index_1s) - assert_frame_equal(frame.resample('1s').mean(), frame_1s) - - index_2s = DatetimeIndex(['1970-01-01 00:00:00', - '1970-01-01 00:00:02']) - frame_2s = DataFrame([5, 11], index=index_2s) - assert_frame_equal(frame.resample('2s').mean(), frame_2s) - - index_3s = DatetimeIndex(['1970-01-01 00:00:00']) - frame_3s = DataFrame([7], index=index_3s) - assert_frame_equal(frame.resample('3s').mean(), frame_3s) - - assert_frame_equal(frame.resample('60s').mean(), frame_3s) - - def test_resample_timedelta_values(self): - # GH 13119 - # check that timedelta dtype is preserved when NaT values are - # introduced by the resampling - - times = timedelta_range('1 day', '4 day', freq='4D') - df = DataFrame({'time': times}, index=times) - - times2 = timedelta_range('1 day', '4 day', freq='2D') - exp = Series(times2, index=times2, name='time') - exp.iloc[1] = pd.NaT - - res = df.resample('2D').first()['time'] - tm.assert_series_equal(res, exp) - res = df['time'].resample('2D').first() - tm.assert_series_equal(res, exp) - - def test_resample_datetime_values(self): - # GH 13119 - # check that datetime dtype is preserved when NaT values are - # introduced by the resampling - - dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)] - df = DataFrame({'timestamp': dates}, index=dates) - - exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)], - index=date_range('2016-01-15', periods=3, freq='2D'), - name='timestamp') - - res = df.resample('2D').first()['timestamp'] - tm.assert_series_equal(res, exp) - res = df['timestamp'].resample('2D').first() - tm.assert_series_equal(res, exp) - - -class TestPeriodIndex(Base): - _index_factory = lambda x: period_range - - @pytest.fixture - def _series_name(self): - return 'pi' - - def create_series(self): - # TODO: replace calls to .create_series() by injecting the series - # fixture - i = period_range(datetime(2005, 1, 1), - datetime(2005, 1, 10), freq='D') - - return Series(np.arange(len(i)), index=i, name='pi') - - @pytest.mark.parametrize('freq', ['2D', '1H', '2H']) - @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) - def test_asfreq(self, series_and_frame, freq, kind): - # GH 12884, 15944 - # make sure .asfreq() returns PeriodIndex (except kind='timestamp') - - obj = series_and_frame - if kind == 'timestamp': - expected = obj.to_timestamp().resample(freq).asfreq() - else: - start = obj.index[0].to_timestamp(how='start') - end = (obj.index[-1] + 1).to_timestamp(how='start') - new_index = date_range(start=start, end=end, freq=freq, - closed='left') - expected = obj.to_timestamp().reindex(new_index).to_period(freq) - result = obj.resample(freq, kind=kind).asfreq() - assert_almost_equal(result, expected) - - def test_asfreq_fill_value(self): - # test for fill value during resampling, issue 3715 - - s = self.create_series() - new_index = date_range(s.index[0].to_timestamp(how='start'), - (s.index[-1]).to_timestamp(how='start'), - freq='1H') - expected = s.to_timestamp().reindex(new_index, fill_value=4.0) - result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0) - assert_series_equal(result, expected) - - frame = s.to_frame('value') - new_index = date_range(frame.index[0].to_timestamp(how='start'), - (frame.index[-1]).to_timestamp(how='start'), - freq='1H') - expected = frame.to_timestamp().reindex(new_index, fill_value=3.0) - result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0) - assert_frame_equal(result, expected) - - @pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W']) - @pytest.mark.parametrize('kind', [None, 'period', 'timestamp']) - def test_selection(self, index, freq, kind): - # This is a bug, these should be implemented - # GH 14008 - df = pd.DataFrame({'date': index, - 'a': np.arange(len(index), dtype=np.int64)}, - index=pd.MultiIndex.from_arrays([ - np.arange(len(index), dtype=np.int64), - index], names=['v', 'd'])) - with pytest.raises(NotImplementedError): - df.resample(freq, on='date', kind=kind) - with pytest.raises(NotImplementedError): - df.resample(freq, level='d', kind=kind) - - def test_annual_upsample_D_s_f(self): - self._check_annual_upsample_cases('D', 'start', 'ffill') - - def test_annual_upsample_D_e_f(self): - self._check_annual_upsample_cases('D', 'end', 'ffill') - - def test_annual_upsample_D_s_b(self): - self._check_annual_upsample_cases('D', 'start', 'bfill') - - def test_annual_upsample_D_e_b(self): - self._check_annual_upsample_cases('D', 'end', 'bfill') - - def test_annual_upsample_B_s_f(self): - self._check_annual_upsample_cases('B', 'start', 'ffill') - - def test_annual_upsample_B_e_f(self): - self._check_annual_upsample_cases('B', 'end', 'ffill') - - def test_annual_upsample_B_s_b(self): - self._check_annual_upsample_cases('B', 'start', 'bfill') - - def test_annual_upsample_B_e_b(self): - self._check_annual_upsample_cases('B', 'end', 'bfill') - - def test_annual_upsample_M_s_f(self): - self._check_annual_upsample_cases('M', 'start', 'ffill') - - def test_annual_upsample_M_e_f(self): - self._check_annual_upsample_cases('M', 'end', 'ffill') - - def test_annual_upsample_M_s_b(self): - self._check_annual_upsample_cases('M', 'start', 'bfill') - - def test_annual_upsample_M_e_b(self): - self._check_annual_upsample_cases('M', 'end', 'bfill') - - def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'): - for month in MONTHS: - ts = _simple_pts('1/1/1990', end, freq='A-%s' % month) - - result = getattr(ts.resample(targ, convention=conv), meth)() - expected = result.to_timestamp(targ, how=conv) - expected = expected.asfreq(targ, meth).to_period() - assert_series_equal(result, expected) - - def test_basic_downsample(self): - ts = _simple_pts('1/1/1990', '6/30/1995', freq='M') - result = ts.resample('a-dec').mean() - - expected = ts.groupby(ts.index.year).mean() - expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec') - assert_series_equal(result, expected) - - # this is ok - assert_series_equal(ts.resample('a-dec').mean(), result) - assert_series_equal(ts.resample('a').mean(), result) - - def test_not_subperiod(self): - # These are incompatible period rules for resampling - ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed') - pytest.raises(ValueError, lambda: ts.resample('a-dec').mean()) - pytest.raises(ValueError, lambda: ts.resample('q-mar').mean()) - pytest.raises(ValueError, lambda: ts.resample('M').mean()) - pytest.raises(ValueError, lambda: ts.resample('w-thu').mean()) - - @pytest.mark.parametrize('freq', ['D', '2D']) - def test_basic_upsample(self, freq): - ts = _simple_pts('1/1/1990', '6/30/1995', freq='M') - result = ts.resample('a-dec').mean() - - resampled = result.resample(freq, convention='end').ffill() - expected = result.to_timestamp(freq, how='end') - expected = expected.asfreq(freq, 'ffill').to_period(freq) - assert_series_equal(resampled, expected) - - def test_upsample_with_limit(self): - rng = period_range('1/1/2000', periods=5, freq='A') - ts = Series(np.random.randn(len(rng)), rng) - - result = ts.resample('M', convention='end').ffill(limit=2) - expected = ts.asfreq('M').reindex(result.index, method='ffill', - limit=2) - assert_series_equal(result, expected) - - def test_annual_upsample(self): - ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC') - df = DataFrame({'a': ts}) - rdf = df.resample('D').ffill() - exp = df['a'].resample('D').ffill() - assert_series_equal(rdf['a'], exp) - - rng = period_range('2000', '2003', freq='A-DEC') - ts = Series([1, 2, 3, 4], index=rng) - - result = ts.resample('M').ffill() - ex_index = period_range('2000-01', '2003-12', freq='M') - - expected = ts.asfreq('M', how='start').reindex(ex_index, - method='ffill') - assert_series_equal(result, expected) - - def test_quarterly_upsample(self): - targets = ['D', 'B', 'M'] - - for month in MONTHS: - ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month) - - for targ, conv in product(targets, ['start', 'end']): - result = ts.resample(targ, convention=conv).ffill() - expected = result.to_timestamp(targ, how=conv) - expected = expected.asfreq(targ, 'ffill').to_period() - assert_series_equal(result, expected) - - def test_monthly_upsample(self): - targets = ['D', 'B'] - - ts = _simple_pts('1/1/1990', '12/31/1995', freq='M') - - for targ, conv in product(targets, ['start', 'end']): - result = ts.resample(targ, convention=conv).ffill() - expected = result.to_timestamp(targ, how=conv) - expected = expected.asfreq(targ, 'ffill').to_period() - assert_series_equal(result, expected) - - def test_resample_basic(self): - # GH3609 - s = Series(range(100), index=date_range( - '20130101', freq='s', periods=100, name='idx'), dtype='float') - s[10:30] = np.nan - index = PeriodIndex([ - Period('2013-01-01 00:00', 'T'), - Period('2013-01-01 00:01', 'T')], name='idx') - expected = Series([34.5, 79.5], index=index) - result = s.to_period().resample('T', kind='period').mean() - assert_series_equal(result, expected) - result2 = s.resample('T', kind='period').mean() - assert_series_equal(result2, expected) - - @pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]), - ('2M', [31 + 29, 31 + 9])]) - def test_resample_count(self, freq, expected_vals): - # GH12774 - series = pd.Series(1, index=pd.period_range(start='2000', periods=100)) - result = series.resample(freq).count() - expected_index = pd.period_range(start='2000', freq=freq, - periods=len(expected_vals)) - expected = pd.Series(expected_vals, index=expected_index) - assert_series_equal(result, expected) - - def test_resample_same_freq(self): - - # GH12770 - series = pd.Series(range(3), index=pd.period_range( - start='2000', periods=3, freq='M')) - expected = series - - for method in resample_methods: - result = getattr(series.resample('M'), method)() - assert_series_equal(result, expected) - - def test_resample_incompat_freq(self): - - with pytest.raises(IncompatibleFrequency): - pd.Series(range(3), index=pd.period_range( - start='2000', periods=3, freq='M')).resample('W').mean() - - def test_with_local_timezone_pytz(self): - # see gh-5430 - local_timezone = pytz.timezone('America/Los_Angeles') - - start = datetime(year=2013, month=11, day=1, hour=0, minute=0, - tzinfo=pytz.utc) - # 1 day later - end = datetime(year=2013, month=11, day=2, hour=0, minute=0, - tzinfo=pytz.utc) - - index = pd.date_range(start, end, freq='H') - - series = pd.Series(1, index=index) - series = series.tz_convert(local_timezone) - result = series.resample('D', kind='period').mean() - - # Create the expected series - # Index is moved back a day with the timezone conversion from UTC to - # Pacific - expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) - expected = pd.Series(1, index=expected_index) - assert_series_equal(result, expected) - - def test_with_local_timezone_dateutil(self): - # see gh-5430 - local_timezone = 'dateutil/America/Los_Angeles' - - start = datetime(year=2013, month=11, day=1, hour=0, minute=0, - tzinfo=dateutil.tz.tzutc()) - # 1 day later - end = datetime(year=2013, month=11, day=2, hour=0, minute=0, - tzinfo=dateutil.tz.tzutc()) - - index = pd.date_range(start, end, freq='H', name='idx') - - series = pd.Series(1, index=index) - series = series.tz_convert(local_timezone) - result = series.resample('D', kind='period').mean() - - # Create the expected series - # Index is moved back a day with the timezone conversion from UTC to - # Pacific - expected_index = (pd.period_range(start=start, end=end, freq='D', - name='idx') - 1) - expected = pd.Series(1, index=expected_index) - assert_series_equal(result, expected) - - def test_fill_method_and_how_upsample(self): - # GH2073 - s = Series(np.arange(9, dtype='int64'), - index=date_range('2010-01-01', periods=9, freq='Q')) - last = s.resample('M').ffill() - both = s.resample('M').ffill().resample('M').last().astype('int64') - assert_series_equal(last, both) - - def test_weekly_upsample(self): - targets = ['D', 'B'] - - for day in DAYS: - ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day) - - for targ, conv in product(targets, ['start', 'end']): - result = ts.resample(targ, convention=conv).ffill() - expected = result.to_timestamp(targ, how=conv) - expected = expected.asfreq(targ, 'ffill').to_period() - assert_series_equal(result, expected) - - def test_resample_to_timestamps(self): - ts = _simple_pts('1/1/1990', '12/31/1995', freq='M') - - result = ts.resample('A-DEC', kind='timestamp').mean() - expected = ts.to_timestamp(how='end').resample('A-DEC').mean() - assert_series_equal(result, expected) - - def test_resample_to_quarterly(self): - for month in MONTHS: - ts = _simple_pts('1990', '1992', freq='A-%s' % month) - quar_ts = ts.resample('Q-%s' % month).ffill() - - stamps = ts.to_timestamp('D', how='start') - qdates = period_range(ts.index[0].asfreq('D', 'start'), - ts.index[-1].asfreq('D', 'end'), - freq='Q-%s' % month) - - expected = stamps.reindex(qdates.to_timestamp('D', 's'), - method='ffill') - expected.index = qdates - - assert_series_equal(quar_ts, expected) - - # conforms, but different month - ts = _simple_pts('1990', '1992', freq='A-JUN') - - for how in ['start', 'end']: - result = ts.resample('Q-MAR', convention=how).ffill() - expected = ts.asfreq('Q-MAR', how=how) - expected = expected.reindex(result.index, method='ffill') - - # .to_timestamp('D') - # expected = expected.resample('Q-MAR').ffill() - - assert_series_equal(result, expected) - - def test_resample_fill_missing(self): - rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A') - - s = Series(np.random.randn(4), index=rng) - - stamps = s.to_timestamp() - filled = s.resample('A').ffill() - expected = stamps.resample('A').ffill().to_period('A') - assert_series_equal(filled, expected) - - def test_cant_fill_missing_dups(self): - rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A') - s = Series(np.random.randn(5), index=rng) - pytest.raises(Exception, lambda: s.resample('A').ffill()) - - @pytest.mark.parametrize('freq', ['5min']) - @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) - def test_resample_5minute(self, freq, kind): - rng = period_range('1/1/2000', '1/5/2000', freq='T') - ts = Series(np.random.randn(len(rng)), index=rng) - expected = ts.to_timestamp().resample(freq).mean() - if kind != 'timestamp': - expected = expected.to_period(freq) - result = ts.resample(freq, kind=kind).mean() - assert_series_equal(result, expected) - - def test_upsample_daily_business_daily(self): - ts = _simple_pts('1/1/2000', '2/1/2000', freq='B') - - result = ts.resample('D').asfreq() - expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000')) - assert_series_equal(result, expected) - - ts = _simple_pts('1/1/2000', '2/1/2000') - result = ts.resample('H', convention='s').asfreq() - exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H') - expected = ts.asfreq('H', how='s').reindex(exp_rng) - assert_series_equal(result, expected) - - def test_resample_irregular_sparse(self): - dr = date_range(start='1/1/2012', freq='5min', periods=1000) - s = Series(np.array(100), index=dr) - # subset the data. - subset = s[:'2012-01-04 06:55'] - - result = subset.resample('10min').apply(len) - expected = s.resample('10min').apply(len).loc[result.index] - assert_series_equal(result, expected) - - def test_resample_weekly_all_na(self): - rng = date_range('1/1/2000', periods=10, freq='W-WED') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.resample('W-THU').asfreq() - - assert result.isna().all() - - result = ts.resample('W-THU').asfreq().ffill()[:-1] - expected = ts.asfreq('W-THU').ffill() - assert_series_equal(result, expected) - - def test_resample_tz_localized(self): - dr = date_range(start='2012-4-13', end='2012-5-1') - ts = Series(lrange(len(dr)), dr) - - ts_utc = ts.tz_localize('UTC') - ts_local = ts_utc.tz_convert('America/Los_Angeles') - - result = ts_local.resample('W').mean() - - ts_local_naive = ts_local.copy() - ts_local_naive.index = [x.replace(tzinfo=None) - for x in ts_local_naive.index.to_pydatetime()] - - exp = ts_local_naive.resample( - 'W').mean().tz_localize('America/Los_Angeles') - - assert_series_equal(result, exp) - - # it works - result = ts_local.resample('D').mean() - - # #2245 - idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T', - tz='Australia/Sydney') - s = Series([1, 2], index=idx) - - result = s.resample('D', closed='right', label='right').mean() - ex_index = date_range('2001-09-21', periods=1, freq='D', - tz='Australia/Sydney') - expected = Series([1.5], index=ex_index) - - assert_series_equal(result, expected) - - # for good measure - result = s.resample('D', kind='period').mean() - ex_index = period_range('2001-09-20', periods=1, freq='D') - expected = Series([1.5], index=ex_index) - assert_series_equal(result, expected) - - # GH 6397 - # comparing an offset that doesn't propagate tz's - rng = date_range('1/1/2011', periods=20000, freq='H') - rng = rng.tz_localize('EST') - ts = DataFrame(index=rng) - ts['first'] = np.random.randn(len(rng)) - ts['second'] = np.cumsum(np.random.randn(len(rng))) - expected = DataFrame( - { - 'first': ts.resample('A').sum()['first'], - 'second': ts.resample('A').mean()['second']}, - columns=['first', 'second']) - result = ts.resample( - 'A').agg({'first': np.sum, - 'second': np.mean}).reindex(columns=['first', 'second']) - assert_frame_equal(result, expected) - - def test_closed_left_corner(self): - # #1465 - s = Series(np.random.randn(21), - index=date_range(start='1/1/2012 9:30', - freq='1min', periods=21)) - s[0] = np.nan - - result = s.resample('10min', closed='left', label='right').mean() - exp = s[1:].resample('10min', closed='left', label='right').mean() - assert_series_equal(result, exp) - - result = s.resample('10min', closed='left', label='left').mean() - exp = s[1:].resample('10min', closed='left', label='left').mean() - - ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3) - - tm.assert_index_equal(result.index, ex_index) - assert_series_equal(result, exp) - - def test_quarterly_resampling(self): - rng = period_range('2000Q1', periods=10, freq='Q-DEC') - ts = Series(np.arange(10), index=rng) - - result = ts.resample('A').mean() - exp = ts.to_timestamp().resample('A').mean().to_period() - assert_series_equal(result, exp) - - def test_resample_weekly_bug_1726(self): - # 8/6/12 is a Monday - ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D") - n = len(ind) - data = [[x] * 5 for x in range(n)] - df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'], - index=ind) - - # it works! - df.resample('W-MON', closed='left', label='left').first() - - def test_resample_bms_2752(self): - # GH2753 - foo = pd.Series(index=pd.bdate_range('20000101', '20000201')) - res1 = foo.resample("BMS").mean() - res2 = foo.resample("BMS").mean().resample("B").mean() - assert res1.index[0] == Timestamp('20000103') - assert res1.index[0] == res2.index[0] - - # def test_monthly_convention_span(self): - # rng = period_range('2000-01', periods=3, freq='M') - # ts = Series(np.arange(3), index=rng) - - # # hacky way to get same thing - # exp_index = period_range('2000-01-01', '2000-03-31', freq='D') - # expected = ts.asfreq('D', how='end').reindex(exp_index) - # expected = expected.fillna(method='bfill') - - # result = ts.resample('D', convention='span').mean() - - # assert_series_equal(result, expected) - - def test_default_right_closed_label(self): - end_freq = ['D', 'Q', 'M', 'D'] - end_types = ['M', 'A', 'Q', 'W'] - - for from_freq, to_freq in zip(end_freq, end_types): - idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq) - df = DataFrame(np.random.randn(len(idx), 2), idx) - - resampled = df.resample(to_freq).mean() - assert_frame_equal(resampled, df.resample(to_freq, closed='right', - label='right').mean()) - - def test_default_left_closed_label(self): - others = ['MS', 'AS', 'QS', 'D', 'H'] - others_freq = ['D', 'Q', 'M', 'H', 'T'] - - for from_freq, to_freq in zip(others_freq, others): - idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq) - df = DataFrame(np.random.randn(len(idx), 2), idx) - - resampled = df.resample(to_freq).mean() - assert_frame_equal(resampled, df.resample(to_freq, closed='left', - label='left').mean()) - - def test_all_values_single_bin(self): - # 2070 - index = period_range(start="2012-01-01", end="2012-12-31", freq="M") - s = Series(np.random.randn(len(index)), index=index) - - result = s.resample("A").mean() - tm.assert_almost_equal(result[0], s.mean()) - - def test_evenly_divisible_with_no_extra_bins(self): - # 4076 - # when the frequency is evenly divisible, sometimes extra bins - - df = DataFrame(np.random.randn(9, 3), - index=date_range('2000-1-1', periods=9)) - result = df.resample('5D').mean() - expected = pd.concat( - [df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T - expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')] - assert_frame_equal(result, expected) - - index = date_range(start='2001-5-4', periods=28) - df = DataFrame( - [{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90, - 'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 + - [{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10, - 'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28, - index=index.append(index)).sort_index() - - index = date_range('2001-5-4', periods=4, freq='7D') - expected = DataFrame( - [{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14, - 'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4, - index=index) - result = df.resample('7D').count() - assert_frame_equal(result, expected) - - expected = DataFrame( - [{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700, - 'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4, - index=index) - result = df.resample('7D').sum() - assert_frame_equal(result, expected) - - @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) - @pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']]) - def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg): - # make sure passing loffset returns DatetimeIndex in all cases - # basic method taken from Base.test_resample_loffset_arg_type() - df = frame - expected_means = [df.values[i:i + 2].mean() - for i in range(0, len(df.values), 2)] - expected_index = self.create_index(df.index[0], - periods=len(df.index) / 2, - freq='2D') - - # loffset coerces PeriodIndex to DateTimeIndex - expected_index = expected_index.to_timestamp() - expected_index += timedelta(hours=2) - expected = DataFrame({'value': expected_means}, index=expected_index) - - result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result_how = df.resample('2D', how=agg_arg, loffset='2H', - kind=kind) - if isinstance(agg_arg, list): - expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')]) - assert_frame_equal(result_agg, expected) - assert_frame_equal(result_how, expected) - - @pytest.mark.parametrize('freq, period_mult', [('H', 24), ('12H', 2)]) - @pytest.mark.parametrize('kind', [None, 'period']) - def test_upsampling_ohlc(self, freq, period_mult, kind): - # GH 13083 - pi = PeriodIndex(start='2000', freq='D', periods=10) - s = Series(range(len(pi)), index=pi) - expected = s.to_timestamp().resample(freq).ohlc().to_period(freq) - - # timestamp-based resampling doesn't include all sub-periods - # of the last original period, so extend accordingly: - new_index = PeriodIndex(start='2000', freq=freq, - periods=period_mult * len(pi)) - expected = expected.reindex(new_index) - result = s.resample(freq, kind=kind).ohlc() - assert_frame_equal(result, expected) - - @pytest.mark.parametrize('periods, values', - [([pd.NaT, '1970-01-01 00:00:00', pd.NaT, - '1970-01-01 00:00:02', '1970-01-01 00:00:03'], - [2, 3, 5, 7, 11]), - ([pd.NaT, pd.NaT, '1970-01-01 00:00:00', pd.NaT, - pd.NaT, pd.NaT, '1970-01-01 00:00:02', - '1970-01-01 00:00:03', pd.NaT, pd.NaT], - [1, 2, 3, 5, 6, 8, 7, 11, 12, 13])]) - @pytest.mark.parametrize('freq, expected_values', - [('1s', [3, np.NaN, 7, 11]), - ('2s', [3, int((7 + 11) / 2)]), - ('3s', [int((3 + 7) / 2), 11])]) - def test_resample_with_nat(self, periods, values, freq, expected_values): - # GH 13224 - index = PeriodIndex(periods, freq='S') - frame = DataFrame(values, index=index) - - expected_index = period_range('1970-01-01 00:00:00', - periods=len(expected_values), freq=freq) - expected = DataFrame(expected_values, index=expected_index) - result = frame.resample(freq).mean() - assert_frame_equal(result, expected) - - def test_resample_with_only_nat(self): - # GH 13224 - pi = PeriodIndex([pd.NaT] * 3, freq='S') - frame = DataFrame([2, 3, 5], index=pi) - expected_index = PeriodIndex(data=[], freq=pi.freq) - expected = DataFrame([], index=expected_index) - result = frame.resample('1s').mean() - assert_frame_equal(result, expected) - - -class TestTimedeltaIndex(Base): - _index_factory = lambda x: timedelta_range - - @pytest.fixture - def _index_start(self): - return '1 day' - - @pytest.fixture - def _index_end(self): - return '10 day' - - @pytest.fixture - def _series_name(self): - return 'tdi' - - def create_series(self): - i = timedelta_range('1 day', - '10 day', freq='D') - - return Series(np.arange(len(i)), index=i, name='tdi') - - def test_asfreq_bug(self): - import datetime as dt - df = DataFrame(data=[1, 3], - index=[dt.timedelta(), dt.timedelta(minutes=3)]) - result = df.resample('1T').asfreq() - expected = DataFrame(data=[1, np.nan, np.nan, 3], - index=timedelta_range('0 day', - periods=4, - freq='1T')) - assert_frame_equal(result, expected) - - -class TestResamplerGrouper(object): - - def setup_method(self, method): - self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8, - 'B': np.arange(40)}, - index=date_range('1/1/2000', - freq='s', - periods=40)) - - def test_back_compat_v180(self): - - df = self.frame - for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = df.groupby('A').resample('4s', how=how) - expected = getattr(df.groupby('A').resample('4s'), how)() - assert_frame_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = df.groupby('A').resample('4s', how='mean', - fill_method='ffill') - expected = df.groupby('A').resample('4s').mean().ffill() - assert_frame_equal(result, expected) - - def test_tab_complete_ipython6_warning(self, ip): - from IPython.core.completer import provisionalcompleter - code = dedent("""\ - import pandas.util.testing as tm - s = tm.makeTimeSeries() - rs = s.resample("D") - """) - ip.run_code(code) - - with tm.assert_produces_warning(None): - with provisionalcompleter('ignore'): - list(ip.Completer.completions('rs.', 1)) - - def test_deferred_with_groupby(self): - - # GH 12486 - # support deferred resample ops with groupby - data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3], - ['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7], - ['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5], - ['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1], - ['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]] - - df = DataFrame(data, columns=['date', 'id', 'score']) - df.date = pd.to_datetime(df.date) - f = lambda x: x.set_index('date').resample('D').asfreq() - expected = df.groupby('id').apply(f) - result = df.set_index('date').groupby('id').resample('D').asfreq() - assert_frame_equal(result, expected) - - df = DataFrame({'date': pd.date_range(start='2016-01-01', - periods=4, - freq='W'), - 'group': [1, 1, 2, 2], - 'val': [5, 6, 7, 8]}).set_index('date') - - f = lambda x: x.resample('1D').ffill() - expected = df.groupby('group').apply(f) - result = df.groupby('group').resample('1D').ffill() - assert_frame_equal(result, expected) - - def test_getitem(self): - g = self.frame.groupby('A') - - expected = g.B.apply(lambda x: x.resample('2s').mean()) - - result = g.resample('2s').B.mean() - assert_series_equal(result, expected) - - result = g.B.resample('2s').mean() - assert_series_equal(result, expected) - - result = g.resample('2s').mean().B - assert_series_equal(result, expected) - - def test_getitem_multiple(self): - - # GH 13174 - # multiple calls after selection causing an issue with aliasing - data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}] - df = pd.DataFrame(data, index=pd.date_range('2016-01-01', periods=2)) - r = df.groupby('id').resample('1D') - result = r['buyer'].count() - expected = pd.Series([1, 1], - index=pd.MultiIndex.from_tuples( - [(1, pd.Timestamp('2016-01-01')), - (2, pd.Timestamp('2016-01-02'))], - names=['id', None]), - name='buyer') - assert_series_equal(result, expected) - - result = r['buyer'].count() - assert_series_equal(result, expected) - - def test_nearest(self): - - # GH 17496 - # Resample nearest - index = pd.date_range('1/1/2000', periods=3, freq='T') - result = pd.Series(range(3), index=index).resample('20s').nearest() - - expected = pd.Series( - np.array([0, 0, 1, 1, 1, 2, 2]), - index=pd.DatetimeIndex( - ['2000-01-01 00:00:00', '2000-01-01 00:00:20', - '2000-01-01 00:00:40', '2000-01-01 00:01:00', - '2000-01-01 00:01:20', '2000-01-01 00:01:40', - '2000-01-01 00:02:00'], - dtype='datetime64[ns]', - freq='20S')) - assert_series_equal(result, expected) - - def test_methods(self): - g = self.frame.groupby('A') - r = g.resample('2s') - - for f in ['first', 'last', 'median', 'sem', 'sum', 'mean', - 'min', 'max']: - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) - assert_frame_equal(result, expected) - - for f in ['size']: - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) - assert_series_equal(result, expected) - - for f in ['count']: - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) - assert_frame_equal(result, expected) - - # series only - for f in ['nunique']: - result = getattr(r.B, f)() - expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)()) - assert_series_equal(result, expected) - - for f in ['nearest', 'backfill', 'ffill', 'asfreq']: - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.resample('2s'), f)()) - assert_frame_equal(result, expected) - - result = r.ohlc() - expected = g.apply(lambda x: x.resample('2s').ohlc()) - assert_frame_equal(result, expected) - - for f in ['std', 'var']: - result = getattr(r, f)(ddof=1) - expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1)) - assert_frame_equal(result, expected) - - def test_apply(self): - - g = self.frame.groupby('A') - r = g.resample('2s') - - # reduction - expected = g.resample('2s').sum() - - def f(x): - return x.resample('2s').sum() - - result = r.apply(f) - assert_frame_equal(result, expected) - - def f(x): - return x.resample('2s').apply(lambda y: y.sum()) - - result = g.apply(f) - assert_frame_equal(result, expected) - - def test_resample_groupby_with_label(self): - # GH 13235 - index = date_range('2000-01-01', freq='2D', periods=5) - df = DataFrame(index=index, - data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]} - ) - result = df.groupby('col0').resample('1W', label='left').sum() - - mi = [np.array([0, 0, 1, 2]), - pd.to_datetime(np.array(['1999-12-26', '2000-01-02', - '2000-01-02', '2000-01-02']) - ) - ] - mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None]) - expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]}, - index=mindex - ) - - assert_frame_equal(result, expected) - - def test_consistency_with_window(self): - - # consistent return values with window - df = self.frame - expected = pd.Int64Index([1, 2, 3], name='A') - result = df.groupby('A').resample('2s').mean() - assert result.index.nlevels == 2 - tm.assert_index_equal(result.index.levels[0], expected) - - result = df.groupby('A').rolling(20).mean() - assert result.index.nlevels == 2 - tm.assert_index_equal(result.index.levels[0], expected) - - def test_median_duplicate_columns(self): - # GH 14233 - - df = pd.DataFrame(np.random.randn(20, 3), - columns=list('aaa'), - index=pd.date_range('2012-01-01', - periods=20, freq='s')) - df2 = df.copy() - df2.columns = ['a', 'b', 'c'] - expected = df2.resample('5s').median() - result = df.resample('5s').median() - expected.columns = result.columns - assert_frame_equal(result, expected) - - -class TestTimeGrouper(object): - - def setup_method(self, method): - self.ts = Series(np.random.randn(1000), - index=date_range('1/1/2000', periods=1000)) - - def test_apply(self): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - grouper = pd.TimeGrouper(freq='A', label='right', closed='right') - - grouped = self.ts.groupby(grouper) - - f = lambda x: x.sort_values()[-3:] - - applied = grouped.apply(f) - expected = self.ts.groupby(lambda x: x.year).apply(f) - - applied.index = applied.index.droplevel(0) - expected.index = expected.index.droplevel(0) - assert_series_equal(applied, expected) - - def test_count(self): - self.ts[::3] = np.nan - - expected = self.ts.groupby(lambda x: x.year).count() - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - grouper = pd.TimeGrouper(freq='A', label='right', closed='right') - result = self.ts.groupby(grouper).count() - expected.index = result.index - assert_series_equal(result, expected) - - result = self.ts.resample('A').count() - expected.index = result.index - assert_series_equal(result, expected) - - def test_numpy_reduction(self): - result = self.ts.resample('A', closed='right').prod() - - expected = self.ts.groupby(lambda x: x.year).agg(np.prod) - expected.index = result.index - - assert_series_equal(result, expected) - - def test_apply_iteration(self): - # #2300 - N = 1000 - ind = pd.date_range(start="2000-01-01", freq="D", periods=N) - df = DataFrame({'open': 1, 'close': 2}, index=ind) - tg = TimeGrouper('M') - - _, grouper, _ = tg._get_grouper(df) - - # Errors - grouped = df.groupby(grouper, group_keys=False) - f = lambda df: df['close'] / df['open'] - - # it works! - result = grouped.apply(f) - tm.assert_index_equal(result.index, df.index) - - def test_panel_aggregation(self): - ind = pd.date_range('1/1/2000', periods=100) - data = np.random.randn(2, len(ind), 4) - - with catch_warnings(record=True): - wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind, - minor_axis=['A', 'B', 'C', 'D']) - - tg = TimeGrouper('M', axis=1) - _, grouper, _ = tg._get_grouper(wp) - bingrouped = wp.groupby(grouper) - binagg = bingrouped.mean() - - def f(x): - assert (isinstance(x, Panel)) - return x.mean(1) - - result = bingrouped.agg(f) - tm.assert_panel_equal(result, binagg) - - def test_fails_on_no_datetime_index(self): - index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex') - index_funcs = (tm.makeIntIndex, - tm.makeUnicodeIndex, tm.makeFloatIndex, - lambda m: tm.makeCustomIndex(m, 2)) - n = 2 - for name, func in zip(index_names, index_funcs): - index = func(n) - df = DataFrame({'a': np.random.randn(n)}, index=index) - with tm.assert_raises_regex(TypeError, - "Only valid with " - "DatetimeIndex, TimedeltaIndex " - "or PeriodIndex, but got an " - "instance of %r" % name): - df.groupby(TimeGrouper('D')) - - def test_aaa_group_order(self): - # GH 12840 - # check TimeGrouper perform stable sorts - n = 20 - data = np.random.randn(n, 4) - df = DataFrame(data, columns=['A', 'B', 'C', 'D']) - df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), - datetime(2013, 1, 3), datetime(2013, 1, 4), - datetime(2013, 1, 5)] * 4 - grouped = df.groupby(TimeGrouper(key='key', freq='D')) - - tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)), - df[::5]) - tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)), - df[1::5]) - tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)), - df[2::5]) - tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)), - df[3::5]) - tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)), - df[4::5]) - - def test_aggregate_normal(self): - # check TimeGrouper's aggregation is identical as normal groupby - - n = 20 - data = np.random.randn(n, 4) - normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) - normal_df['key'] = [1, 2, 3, 4, 5] * 4 - - dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) - dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), - datetime(2013, 1, 3), datetime(2013, 1, 4), - datetime(2013, 1, 5)] * 4 - - normal_grouped = normal_df.groupby('key') - dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - - for func in ['min', 'max', 'prod', 'var', 'std', 'mean']: - expected = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) - - for func in ['count', 'sum']: - expected = getattr(normal_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) - - # GH 7453 - for func in ['size']: - expected = getattr(normal_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - - # GH 7453 - for func in ['first', 'last']: - expected = getattr(normal_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) - - # if TimeGrouper is used included, 'nth' doesn't work yet - - """ - for func in ['nth']: - expected = getattr(normal_grouped, func)(3) - expected.index = date_range(start='2013-01-01', - freq='D', periods=5, name='key') - dt_result = getattr(dt_grouped, func)(3) - assert_frame_equal(expected, dt_result) - """ - - def test_aggregate_with_nat(self): - # check TimeGrouper's aggregation is identical as normal groupby - - n = 20 - data = np.random.randn(n, 4).astype('int64') - normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) - normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 - - dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) - dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, - datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 - - normal_grouped = normal_df.groupby('key') - dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - - for func in ['min', 'max', 'sum', 'prod']: - normal_result = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) - - for func in ['count']: - normal_result = getattr(normal_grouped, func)() - pad = DataFrame([[0, 0, 0, 0]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) - - for func in ['size']: - normal_result = getattr(normal_grouped, func)() - pad = Series([0], index=[3]) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - # GH 9925 - assert dt_result.index.name == 'key' - - # if NaT is included, 'var', 'std', 'mean', 'first','last' - # and 'nth' doesn't work yet
- [ ] closes #17806 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17808
2017-10-06T20:22:12Z
2018-01-21T18:14:16Z
null
2018-01-21T18:14:16Z
Implement npy_dtime.pyx
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 5269cddf8d2fd..7a335b19eb87b 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -47,7 +47,6 @@ from datetime cimport ( npy_datetime, is_leapyear, dayofweek, - check_dts_bounds, PANDAS_FR_ns, PyDateTime_Check, PyDate_Check, PyDateTime_IMPORT, @@ -58,6 +57,9 @@ from datetime cimport ( from datetime import timedelta, datetime from datetime import time as datetime_time +from tslibs.np_datetime cimport check_dts_bounds +from tslibs.np_datetime import OutOfBoundsDatetime + from khash cimport ( khiter_t, kh_destroy_int64, kh_put_int64, @@ -732,7 +734,7 @@ class Timestamp(_Timestamp): ts = convert_datetime_to_tsobject(ts_input, _tzinfo) value = ts.value + (dts.ps // 1000) if value != NPY_NAT: - _check_dts_bounds(&dts) + check_dts_bounds(&dts) return create_timestamp_from_ts(value, dts, _tzinfo, self.freq) @@ -1645,7 +1647,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit, 'Timestamp'.format(ts, type(ts))) if obj.value != NPY_NAT: - _check_dts_bounds(&obj.dts) + check_dts_bounds(&obj.dts) if tz is not None: _localize_tso(obj, tz) @@ -1726,7 +1728,7 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, obj.value += nanos obj.dts.ps = nanos * 1000 - _check_dts_bounds(&obj.dts) + check_dts_bounds(&obj.dts) return obj @@ -1762,12 +1764,12 @@ cpdef convert_str_to_tsobject(object ts, object tz, object unit, _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) obj.value = pandas_datetimestruct_to_datetime( PANDAS_FR_ns, &obj.dts) - _check_dts_bounds(&obj.dts) + check_dts_bounds(&obj.dts) if out_local == 1: obj.tzinfo = pytz.FixedOffset(out_tzoffset) obj.value = tz_convert_single(obj.value, obj.tzinfo, 'UTC') if tz is None: - _check_dts_bounds(&obj.dts) + check_dts_bounds(&obj.dts) return obj else: # Keep the converter same as PyDateTime's @@ -1810,7 +1812,7 @@ def _test_parse_iso8601(object ts): _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts) - _check_dts_bounds(&obj.dts) + check_dts_bounds(&obj.dts) if out_local == 1: obj.tzinfo = pytz.FixedOffset(out_tzoffset) obj.value = tz_convert_single(obj.value, obj.tzinfo, 'UTC') @@ -1897,18 +1899,6 @@ cpdef inline object _localize_pydatetime(object dt, object tz): return dt.replace(tzinfo=tz) -class OutOfBoundsDatetime(ValueError): - pass - -cdef inline _check_dts_bounds(pandas_datetimestruct *dts): - if check_dts_bounds(dts): - fmt = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (dts.year, dts.month, - dts.day, dts.hour, - dts.min, dts.sec) - raise OutOfBoundsDatetime( - 'Out of bounds nanosecond timestamp: %s' % fmt) - - def datetime_to_datetime64(ndarray[object] values): cdef: Py_ssize_t i, n = len(values) @@ -1933,13 +1923,13 @@ def datetime_to_datetime64(ndarray[object] values): _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value - _check_dts_bounds(&_ts.dts) + check_dts_bounds(&_ts.dts) else: if inferred_tz is not None: raise ValueError('Cannot mix tz-aware with ' 'tz-naive values') iresult[i] = _pydatetime_to_dts(val, &dts) - _check_dts_bounds(&dts) + check_dts_bounds(&dts) else: raise TypeError('Unrecognized value type: %s' % type(val)) @@ -2252,7 +2242,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value try: - _check_dts_bounds(&_ts.dts) + check_dts_bounds(&_ts.dts) except ValueError: if is_coerce: iresult[i] = NPY_NAT @@ -2267,7 +2257,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', if is_timestamp(val): iresult[i] += val.nanosecond try: - _check_dts_bounds(&dts) + check_dts_bounds(&dts) except ValueError: if is_coerce: iresult[i] = NPY_NAT @@ -2277,7 +2267,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', elif PyDate_Check(val): iresult[i] = _date_to_datetime64(val, &dts) try: - _check_dts_bounds(&dts) + check_dts_bounds(&dts) seen_datetime = 1 except ValueError: if is_coerce: @@ -2334,7 +2324,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', tz = pytz.FixedOffset(out_tzoffset) value = tz_convert_single(value, tz, 'UTC') iresult[i] = value - _check_dts_bounds(&dts) + check_dts_bounds(&dts) except ValueError: # if requiring iso8601 strings, skip trying other formats if require_iso8601: @@ -2433,7 +2423,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', oresult[i] = parse_datetime_string(val, dayfirst=dayfirst, yearfirst=yearfirst) _pydatetime_to_dts(oresult[i], &dts) - _check_dts_bounds(&dts) + check_dts_bounds(&dts) except Exception: if is_raise: raise @@ -3239,7 +3229,7 @@ cdef inline _get_datetime64_nanos(object val): if unit != PANDAS_FR_ns: pandas_datetime_to_datetimestruct(ival, unit, &dts) - _check_dts_bounds(&dts) + check_dts_bounds(&dts) return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts) else: return ival @@ -3267,7 +3257,7 @@ def cast_to_nanoseconds(ndarray arr): if ivalues[i] != NPY_NAT: pandas_datetime_to_datetimestruct(ivalues[i], unit, &dts) iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts) - _check_dts_bounds(&dts) + check_dts_bounds(&dts) else: iresult[i] = NPY_NAT diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd new file mode 100644 index 0000000000000..d4079aae68900 --- /dev/null +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +from numpy cimport int64_t, int32_t + + +cdef extern from "../src/datetime/np_datetime.h": + ctypedef struct pandas_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + +cdef check_dts_bounds(pandas_datetimestruct *dts) + +cdef int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil +cdef void dt64_to_dtstruct(int64_t dt64, pandas_datetimestruct* out) nogil diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx new file mode 100644 index 0000000000000..38158c816812d --- /dev/null +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +from numpy cimport int64_t + +cdef extern from "numpy/ndarrayobject.h": + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "../src/datetime/np_datetime.h": + ctypedef enum PANDAS_DATETIMEUNIT: + PANDAS_FR_Y + PANDAS_FR_M + PANDAS_FR_W + PANDAS_FR_D + PANDAS_FR_B + PANDAS_FR_h + PANDAS_FR_m + PANDAS_FR_s + PANDAS_FR_ms + PANDAS_FR_us + PANDAS_FR_ns + PANDAS_FR_ps + PANDAS_FR_fs + PANDAS_FR_as + + int cmp_pandas_datetimestruct(pandas_datetimestruct *a, + pandas_datetimestruct *b) + + npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, + pandas_datetimestruct *d + ) nogil + + void pandas_datetime_to_datetimestruct(npy_datetime val, + PANDAS_DATETIMEUNIT fr, + pandas_datetimestruct *result) nogil + + pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS + +# ---------------------------------------------------------------------- + + +class OutOfBoundsDatetime(ValueError): + pass + + +cdef inline check_dts_bounds(pandas_datetimestruct *dts): + """Raises OutOfBoundsDatetime if the given date is outside the range that + can be represented by nanosecond-resolution 64-bit integers.""" + cdef: + bint error = False + + if (dts.year <= 1677 and + cmp_pandas_datetimestruct(dts, &_NS_MIN_DTS) == -1): + error = True + elif (dts.year >= 2262 and + cmp_pandas_datetimestruct(dts, &_NS_MAX_DTS) == 1): + error = True + + if error: + fmt = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (dts.year, dts.month, + dts.day, dts.hour, + dts.min, dts.sec) + raise OutOfBoundsDatetime( + 'Out of bounds nanosecond timestamp: {fmt}'.format(fmt=fmt)) + + +# ---------------------------------------------------------------------- +# Conversion + +cdef inline int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil: + """Convenience function to call pandas_datetimestruct_to_datetime + with the by-far-most-common frequency PANDAS_FR_ns""" + return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts) + + +cdef inline void dt64_to_dtstruct(int64_t dt64, + pandas_datetimestruct* out) nogil: + """Convenience function to call pandas_datetime_to_datetimestruct + with the by-far-most-common frequency PANDAS_FR_ns""" + pandas_datetime_to_datetimestruct(dt64, PANDAS_FR_ns, out) + return diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 59a7376280da0..4a141c7b56428 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -33,12 +33,8 @@ from numpy cimport ndarray, int64_t from datetime import date as datetime_date from datetime cimport datetime -# This is src/datetime.pxd -from datetime cimport ( - PANDAS_FR_ns, - check_dts_bounds, - pandas_datetimestruct, - pandas_datetimestruct_to_datetime) +from np_datetime cimport (check_dts_bounds, + dtstruct_to_dt64, pandas_datetimestruct) from util cimport is_string_object, get_nat @@ -333,18 +329,14 @@ def array_strptime(ndarray[object] values, object fmt, dts.us = us dts.ps = ns * 1000 - iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts) - if check_dts_bounds(&dts): + iresult[i] = dtstruct_to_dt64(&dts) + try: + check_dts_bounds(&dts) + except ValueError: if is_coerce: iresult[i] = NPY_NAT continue - else: - from pandas._libs.tslib import OutOfBoundsDatetime - fmt = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (dts.year, dts.month, - dts.day, dts.hour, - dts.min, dts.sec) - raise OutOfBoundsDatetime( - 'Out of bounds nanosecond timestamp: %s' % fmt) + raise return result diff --git a/setup.py b/setup.py index 8b3ae40f01a10..2843ab6587412 100755 --- a/setup.py +++ b/setup.py @@ -344,6 +344,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/sparse.pyx', 'pandas/_libs/parsers.pyx', 'pandas/_libs/tslibs/strptime.pyx', + 'pandas/_libs/tslibs/np_datetime.pyx', 'pandas/_libs/tslibs/timedeltas.pyx', 'pandas/_libs/tslibs/timezones.pyx', 'pandas/_libs/tslibs/fields.pyx', @@ -469,12 +470,11 @@ def pxd(name): 'pandas/_libs/src/parse_helper.h', 'pandas/_libs/src/compat_helper.h'] - -tseries_depends = ['pandas/_libs/src/datetime/np_datetime.h', - 'pandas/_libs/src/datetime/np_datetime_strings.h', - 'pandas/_libs/src/datetime.pxd'] -npdt_srces = ['pandas/_libs/src/datetime/np_datetime.c', - 'pandas/_libs/src/datetime/np_datetime_strings.c'] +np_datetime_headers = ['pandas/_libs/src/datetime/np_datetime.h', + 'pandas/_libs/src/datetime/np_datetime_strings.h'] +np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c', + 'pandas/_libs/src/datetime/np_datetime_strings.c'] +tseries_depends = np_datetime_headers + ['pandas/_libs/src/datetime.pxd'] # some linux distros require it libraries = ['m'] if not is_platform_windows() else [] @@ -489,28 +489,31 @@ def pxd(name): _pxi_dep['hashtable'])}, '_libs.tslibs.strptime': {'pyxfile': '_libs/tslibs/strptime', 'depends': tseries_depends, - 'sources': npdt_srces}, + 'sources': np_datetime_sources}, '_libs.tslibs.offsets': {'pyxfile': '_libs/tslibs/offsets'}, '_libs.tslib': {'pyxfile': '_libs/tslib', 'pxdfiles': ['_libs/src/util', '_libs/lib'], 'depends': tseries_depends, - 'sources': npdt_srces}, + 'sources': np_datetime_sources}, + '_libs.tslibs.np_datetime': {'pyxfile': '_libs/tslibs/np_datetime', + 'depends': np_datetime_headers, + 'sources': np_datetime_sources}, '_libs.tslibs.timedeltas': {'pyxfile': '_libs/tslibs/timedeltas'}, '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'}, '_libs.tslibs.fields': {'pyxfile': '_libs/tslibs/fields', 'depends': tseries_depends, - 'sources': npdt_srces}, + 'sources': np_datetime_sources}, '_libs.period': {'pyxfile': '_libs/period', 'depends': (tseries_depends + ['pandas/_libs/src/period_helper.h']), - 'sources': npdt_srces + [ + 'sources': np_datetime_sources + [ 'pandas/_libs/src/period_helper.c']}, '_libs.tslibs.parsing': {'pyxfile': '_libs/tslibs/parsing', 'pxdfiles': ['_libs/src/util']}, '_libs.tslibs.frequencies': {'pyxfile': '_libs/tslibs/frequencies', 'pxdfiles': ['_libs/src/util']}, '_libs.index': {'pyxfile': '_libs/index', - 'sources': npdt_srces, + 'sources': np_datetime_sources, 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['index']}, '_libs.algos': {'pyxfile': '_libs/algos', @@ -623,7 +626,7 @@ def pxd(name): 'pandas/_libs/src/ujson/python/JSONtoObj.c', 'pandas/_libs/src/ujson/lib/ultrajsonenc.c', 'pandas/_libs/src/ujson/lib/ultrajsondec.c'] + - npdt_srces), + np_datetime_sources), include_dirs=(['pandas/_libs/src/ujson/python', 'pandas/_libs/src/ujson/lib', 'pandas/_libs/src/datetime'] +
`npy_dtime` is a `util`-like module for `tslibs`. Actually, a better analogy would be src/datetime.pxd (which it ultimately is intended to replace). Upcoming steps require `_check_dts_bounds` and `OutOfBoundsDatetime` be in a module upstream from `tslib`. The exception `OutOfBoundsDatetime` cannot be defined in a pxd file, so this can't just be shunted into src/datetime.pxd. Using a .pyx file instead of a .pxd file is that dependency specification in setup.py gets less fragile. After updating e.g. `tslibs.fields` to `cimport` from `npy_dtime`, it will no longer need `tseries_depends` and `pandas/_libs/src/datetime/np_datetime.c` etc in its `ext_data` entry. This also implements two small convenience functions `dtstruct_to_dt64` and `dt64_to_dtstruct` that pass through to `pandas_datetimestruct_to_datetime` and `pandas_datetime_to_datetimestruct` respectively in the overwhelmingly-most-common case where `unit=PANDAS_FR_ns`. To keep the footprint small this PR only demonstrates their usage in one location in `strptime`. It's a lot less verbose, ultimately gets rid of a lot of unsightly wrapped lines.
https://api.github.com/repos/pandas-dev/pandas/pulls/17805
2017-10-06T16:50:38Z
2017-10-29T21:36:55Z
2017-10-29T21:36:55Z
2017-12-08T19:39:07Z
BLD: fix setup.py for xref #17798
diff --git a/setup.py b/setup.py index 23457c6f4edc1..365d387dc54d6 100755 --- a/setup.py +++ b/setup.py @@ -720,7 +720,7 @@ def pxd(name): 'sas/data/*.sas7bdat', 'data/*.html', 'data/html_encoding/*.html', - 'json/data/*.json'], + 'json/data/*.json*'], 'pandas.tests.io.formats': ['data/*.csv'], 'pandas.tests.io.msgpack': ['data/*.mp'], 'pandas.tests.reshape': ['data/*.csv'],
xref #17798
https://api.github.com/repos/pandas-dev/pandas/pulls/17804
2017-10-06T14:17:14Z
2017-10-06T15:22:34Z
2017-10-06T15:22:34Z
2017-10-06T15:46:42Z
DOC: sub-section on boolean Index array changes
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 94e4700a59f24..e6a8b070f9c44 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -356,6 +356,61 @@ Selection with all keys found is unchanged. s.loc[[1, 2]] +.. _whatsnew_0210.api_breaking.loc_with_index: + +Indexing with a Boolean Index +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Previously when passing a boolean ``Index`` to ``.loc``, if the index of the ``Series/DataFrame`` had ``boolean`` labels, +you would get a label based selection, potentially duplicating result labels, rather than a boolean indexing selection +(where ``True`` selects elements), this was inconsistent how a boolean numpy array indexed. The new behavior is to +act like a boolean numpy array indexer. (:issue:`17738`) + +Previous Behavior: + +.. ipython:: python + + s = pd.Series([1, 2, 3], index=[False, True, False]) + s + +.. code-block:: ipython + + In [59]: s.loc[pd.Index([True, False, True])] + Out[59]: + True 2 + False 1 + False 3 + True 2 + dtype: int64 + +Current Behavior + +.. ipython:: python + + s.loc[pd.Index([True, False, True])] + + +Furthermore, previously if you had an index that was non-numeric (e.g. strings), then a boolean Index would raise a ``KeyError``. +This will now be treated as a boolean indexer. + +Previously Behavior: + +.. ipython:: python + + s = pd.Series([1,2,3], index=['a', 'b', 'c']) + s + +.. code-block:: ipython + + In [39]: s.loc[pd.Index([True, False, True])] + KeyError: "None of [Index([True, False, True], dtype='object')] are in the [index]" + +Current Behavior + +.. ipython:: python + + s.loc[pd.Index([True, False, True])] + .. _whatsnew_0210.api_breaking.pandas_eval:
closes #17740
https://api.github.com/repos/pandas-dev/pandas/pulls/17803
2017-10-06T10:05:54Z
2017-10-06T15:37:25Z
2017-10-06T15:37:25Z
2017-10-06T15:39:01Z
DOC: some typos in whatsnew
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst index 9af66058a7aaa..aba70ccbcc9fb 100644 --- a/doc/source/remote_data.rst +++ b/doc/source/remote_data.rst @@ -13,18 +13,18 @@ DataReader The sub-package ``pandas.io.data`` was deprecated in v.0.17 and removed in `v.0.19 <http://pandas-docs.github.io/pandas-docs-travis/whatsnew.html#v0-19-0-october-2-2016>`__. - Instead there has been created a separately installable `pandas-datareader package -<https://github.com/pydata/pandas-datareader>`_. This will allow the data -modules to be independently updated on your pandas installation. +Instead there has been created a separately installable +`pandas-datareader package <https://github.com/pydata/pandas-datareader>`__. +This will allow the data modules to be independently updated on your pandas installation. - For code older than < 0.19 you should replace the imports of the following: +For code older than < 0.19 you should replace the imports of the following: - .. code-block:: python +.. code-block:: python - from pandas.io import data, wb + from pandas.io import data, wb - With: +With: - .. code-block:: python +.. code-block:: python - from pandas_datareader import data, wb + from pandas_datareader import data, wb diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 5f0af8859a133..94e4700a59f24 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -344,7 +344,7 @@ Current Behavior 3 NaN dtype: float64 -The idiomatic way to achieve selecting potentially not-found elmenents is via ``.reindex()`` +The idiomatic way to achieve selecting potentially not-found elements is via ``.reindex()`` .. ipython:: python @@ -453,7 +453,7 @@ New Behaviour: Dtype Conversions ^^^^^^^^^^^^^^^^^ -Previously assignments, ``.where()`` and ``.fillna()`` with a ``bool`` assignment, would coerce to same the type (e.g. int / float), or raise for datetimelikes. These will now preseve the bools with ``object`` dtypes. (:issue:`16821`). +Previously assignments, ``.where()`` and ``.fillna()`` with a ``bool`` assignment, would coerce to same the type (e.g. int / float), or raise for datetimelikes. These will now preserve the bools with ``object`` dtypes. (:issue:`16821`). .. ipython:: python @@ -638,7 +638,7 @@ Other API Changes - :func:`read_csv` now treats ``'null'`` strings as missing values by default (:issue:`16471`) - :func:`read_csv` now treats ``'n/a'`` strings as missing values by default (:issue:`16078`) - :class:`pandas.HDFStore`'s string representation is now faster and less detailed. For the previous behavior, use ``pandas.HDFStore.info()``. (:issue:`16503`). -- Compression defaults in HDF stores now follow pytable standards. Default is no compression and if ``complib`` is missing and ``complevel`` > 0 ``zlib`` is used (:issue:`15943`) +- Compression defaults in HDF stores now follow pytables standards. Default is no compression and if ``complib`` is missing and ``complevel`` > 0 ``zlib`` is used (:issue:`15943`) - ``Index.get_indexer_non_unique()`` now returns a ndarray indexer rather than an ``Index``; this is consistent with ``Index.get_indexer()`` (:issue:`16819`) - Removed the ``@slow`` decorator from ``pandas.util.testing``, which caused issues for some downstream packages' test suites. Use ``@pytest.mark.slow`` instead, which achieves the same thing (:issue:`16850`) - Moved definition of ``MergeError`` to the ``pandas.errors`` module. @@ -666,7 +666,7 @@ Deprecations - ``cdate_range`` has been deprecated in favor of :func:`bdate_range`, which has gained ``weekmask`` and ``holidays`` parameters for building custom frequency date ranges. See the :ref:`documentation <timeseries.custom-freq-ranges>` for more details (:issue:`17596`) - passing ``categories`` or ``ordered`` kwargs to :func:`Series.astype` is deprecated, in favor of passing a :ref:`CategoricalDtype <whatsnew_0210.enhancements.categorical_dtype>` (:issue:`17636`) - ``.get_value`` and ``.set_value`` on ``Series``, ``DataFrame``, ``Panel``, ``SparseSeries``, and ``SparseDataFrame`` are deprecated in favor of using ``.iat[]`` or ``.at[]`` accessors (:issue:`15269`) -- Passing a non-existant column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) +- Passing a non-existent column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) - ``raise_on_error`` parameter to :func:`Series.where`, :func:`Series.mask`, :func:`DataFrame.where`, :func:`DataFrame.mask` is deprecated, in favor of ``errors=`` (:issue:`14968`) .. _whatsnew_0210.deprecations.select: @@ -768,7 +768,7 @@ Conversion - Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`) - Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`) - Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`) -- Bug in :meth:`~DataFrame.astype` converting to object dtype when passeed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). +- Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). Indexing ^^^^^^^^ @@ -827,7 +827,7 @@ Groupby/Resample/Rolling - Bug in ``.rolling(...).quantile()`` which incorrectly used different defaults than :func:`Series.quantile()` and :func:`DataFrame.quantile()` (:issue:`9413`, :issue:`16211`) - Bug in ``groupby.transform()`` that would coerce boolean dtypes back to float (:issue:`16875`) - Bug in ``Series.resample(...).apply()`` where an empty ``Series`` modified the source index and did not return the name of a ``Series`` (:issue:`14313`) -- Bug in ``.rolling(...).apply(...)`` with a ``DataFrame`` with a ``DatetimeIndex``, a ``window`` of a timedelta-convertible and ``min_periods >= 1` (:issue:`15305`) +- Bug in ``.rolling(...).apply(...)`` with a ``DataFrame`` with a ``DatetimeIndex``, a ``window`` of a timedelta-convertible and ``min_periods >= 1`` (:issue:`15305`) - Bug in ``DataFrame.groupby`` where index and column keys were not recognized correctly when the number of keys equaled the number of elements on the groupby axis (:issue:`16859`) - Bug in ``groupby.nunique()`` with ``TimeGrouper`` which cannot handle ``NaT`` correctly (:issue:`17575`) - Bug in ``DataFrame.groupby`` where a single level selection from a ``MultiIndex`` unexpectedly sorts (:issue:`17537`)
https://api.github.com/repos/pandas-dev/pandas/pulls/17802
2017-10-06T07:11:25Z
2017-10-06T08:03:30Z
2017-10-06T08:03:30Z
2017-10-06T08:06:57Z
Closes #17788: Fix resample's deprecated `how` parameter documentation
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index d2d5ee344591a..36ffe8806f373 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1456,8 +1456,9 @@ The ``resample`` function is very flexible and allows you to specify many different parameters to control the frequency conversion and resampling operation. -The ``how`` parameter can be a function name or numpy array function that takes -an array and produces aggregated values: +Any function available via :ref:`dispatching <groupby.dispatch>` is available as +a method of the returned object, including ``sum``, ``mean``, ``std``, ``sem``, +``max``, ``min``, ``median``, ``first``, ``last``, ``ohlc``: .. ipython:: python @@ -1467,9 +1468,6 @@ an array and produces aggregated values: ts.resample('5Min').max() -Any function available via :ref:`dispatching <groupby.dispatch>` can be given to -the ``how`` parameter by name, including ``sum``, ``mean``, ``std``, ``sem``, -``max``, ``min``, ``median``, ``first``, ``last``, ``ohlc``. For downsampling, ``closed`` can be set to 'left' or 'right' to specify which end of the interval is closed:
Doc only pull request that closes #17788.
https://api.github.com/repos/pandas-dev/pandas/pulls/17801
2017-10-05T23:48:03Z
2017-10-06T01:47:13Z
2017-10-06T01:47:13Z
2017-10-06T11:05:42Z
API: Added axis argument to rename, reindex
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 0990d2bd15ee6..be9d1a5d83b85 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1217,6 +1217,15 @@ following can be done: This means that the reindexed Series's index is the same Python object as the DataFrame's index. +.. versionadded:: 0.21.0 + +:meth:`DataFrame.reindex` also supports an "axis-style" calling convention, +where you specify a single ``labels`` argument and the ``axis`` it applies to. + +.. ipython:: python + + df.reindex(['c', 'f', 'b'], axis='index') + df.reindex(['three', 'two', 'one'], axis='columns') .. seealso:: @@ -1413,12 +1422,23 @@ Series can also be used: .. ipython:: python - df.rename(columns={'one' : 'foo', 'two' : 'bar'}, - index={'a' : 'apple', 'b' : 'banana', 'd' : 'durian'}) + df.rename(columns={'one': 'foo', 'two': 'bar'}, + index={'a': 'apple', 'b': 'banana', 'd': 'durian'}) If the mapping doesn't include a column/index label, it isn't renamed. Also extra labels in the mapping don't throw an error. +.. versionadded:: 0.21.0 + +:meth:`DataFrame.rename` also supports an "axis-style" calling convention, where +you specify a single ``mapper`` and the ``axis`` to apply that mapping to. + +.. ipython:: python + + df.rename({'one': 'foo', 'two': 'bar'}, axis='columns'}) + df.rename({'a': 'apple', 'b': 'banana', 'd': 'durian'}, axis='columns'}) + + The :meth:`~DataFrame.rename` method also provides an ``inplace`` named parameter that is by default ``False`` and copies the underlying data. Pass ``inplace=True`` to rename the data in place. diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 1c4af579d16dc..f04410ef63531 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -111,6 +111,40 @@ For example: # the following is now equivalent df.drop(columns=['B', 'C']) +.. _whatsnew_0210.enhancements.rename_reindex_axis: + +``rename``, ``reindex`` now also accept axis keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :meth:`DataFrame.rename` and :meth:`DataFrame.reindex` methods have gained +the ``axis`` keyword to specify the axis to target with the operation +(:issue:`12392`). + +Here's ``rename``: + +.. ipython:: python + + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df.rename(str.lower, axis='columns') + df.rename(id, axis='index') + +And ``reindex``: + +.. ipython:: python + + df.reindex(['A', 'B', 'C'], axis='columns') + df.reindex([0, 1, 3], axis='index') + +The "index, columns" style continues to work as before. + +.. ipython:: python + + df.rename(index=id, columns=str.lower) + df.reindex(index=[0, 1, 3], columns=['A', 'B', 'C']) + +We *highly* encourage using named arguments to avoid confusion when using either +style. + .. _whatsnew_0210.enhancements.categorical_dtype: ``CategoricalDtype`` for specifying categoricals diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c536cc9f2b82c..94ff70f287fbe 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -65,6 +65,7 @@ _values_from_object, _maybe_box_datetimelike, _dict_compat, + _all_not_none, standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -111,7 +112,13 @@ optional_by=""" by : str or list of str Name or list of names which refer to the axis items.""", - versionadded_to_excel='') + versionadded_to_excel='', + optional_labels="""labels : array-like, optional + New labels / index to conform the axis specified by 'axis' to.""", + optional_axis="""axis : int or str, optional + Axis to target. Can be either the axis name ('index', 'columns') + or number (0, 1).""", +) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use @@ -2776,6 +2783,47 @@ def reindexer(value): return np.atleast_2d(np.asarray(value)) + def _validate_axis_style_args(self, arg, arg_name, index, columns, + axis, method_name): + if axis is not None: + # Using "axis" style, along with a positional arg + # Both index and columns should be None then + axis = self._get_axis_name(axis) + if index is not None or columns is not None: + msg = ( + "Can't specify both 'axis' and 'index' or 'columns'. " + "Specify either\n" + "\t.{method_name}.rename({arg_name}, axis=axis), or\n" + "\t.{method_name}.rename(index=index, columns=columns)" + ).format(arg_name=arg_name, method_name=method_name) + raise TypeError(msg) + if axis == 'index': + index = arg + elif axis == 'columns': + columns = arg + + elif _all_not_none(arg, index, columns): + msg = ( + "Cannot specify all of '{arg_name}', 'index', and 'columns'. " + "Specify either {arg_name} and 'axis', or 'index' and " + "'columns'." + ).format(arg_name=arg_name) + raise TypeError(msg) + + elif _all_not_none(arg, index): + # This is the "ambiguous" case, so emit a warning + msg = ( + "Interpreting call to '.{method_name}(a, b)' as " + "'.{method_name}(index=a, columns=b)'. " + "Use keyword arguments to remove any ambiguity." + ).format(method_name=method_name) + warnings.warn(msg, stacklevel=3) + index, columns = arg, index + elif index is None: + # This is for the default axis, like reindex([0, 1]) + index = arg + return index, columns + @property def _series(self): result = {} @@ -2902,7 +2950,11 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, broadcast_axis=broadcast_axis) @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) - def reindex(self, index=None, columns=None, **kwargs): + def reindex(self, labels=None, index=None, columns=None, axis=None, + **kwargs): + index, columns = self._validate_axis_style_args(labels, 'labels', + index, columns, + axis, 'reindex') return super(DataFrame, self).reindex(index=index, columns=columns, **kwargs) @@ -2914,8 +2966,84 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) - @Appender(_shared_docs['rename'] % _shared_doc_kwargs) - def rename(self, index=None, columns=None, **kwargs): + def rename(self, mapper=None, index=None, columns=None, axis=None, + **kwargs): + """Alter axes labels. + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + See the :ref:`user guide <basics.rename>` for more. + + Parameters + ---------- + mapper, index, columns : dict-like or function, optional + dict-like or functions transformations to apply to + that axis' values. Use either ``mapper`` and ``axis`` to + specify the axis to target with ``mapper``, or ``index`` and + ``columns``. + axis : int or str, optional + Axis to target with ``mapper``. Can be either the axis name + ('index', 'columns') or number (0, 1). The default is 'index'. + copy : boolean, default True + Also copy underlying data + inplace : boolean, default False + Whether to return a new %(klass)s. If True then value of copy is + ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. + + Returns + ------- + renamed : DataFrame + + See Also + -------- + pandas.DataFrame.rename_axis + + Examples + -------- + + ``DataFrame.rename`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...) + * ``(mapper, axis={'index', 'columns'}, ...) + + We *highly* recommend using keyword arguments to clarify your + intent. + + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + >>> df.rename(index=str, columns={"A": "a", "B": "c"}) + a c + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename(index=str, columns={"A": "a", "C": "c"}) + a B + 0 1 4 + 1 2 5 + 2 3 6 + + Using axis-style parameters + + >>> df.rename(str.lower, axis='columns') + a b + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename({1: 2, 2: 4}, axis='index') + A B + 0 1 4 + 2 2 5 + 4 3 6 + """ + index, columns = self._validate_axis_style_args(mapper, 'mapper', + index, columns, + axis, 'rename') return super(DataFrame, self).rename(index=index, columns=columns, **kwargs) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bc0f10a3f79ab..9d9d8334fcaf4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -742,11 +742,13 @@ def swaplevel(self, i=-2, j=-1, axis=0): Parameters ---------- + %(optional_mapper)s %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame or Panel. dict-like or functions are transformations to apply to that axis' values + %(optional_axis)s copy : boolean, default True Also copy underlying data inplace : boolean, default False @@ -766,6 +768,7 @@ def swaplevel(self, i=-2, j=-1, axis=0): Examples -------- + >>> s = pd.Series([1, 2, 3]) >>> s 0 1 @@ -787,27 +790,58 @@ def swaplevel(self, i=-2, j=-1, axis=0): 3 2 5 3 dtype: int64 + + Since ``DataFrame`` doesn't have a ``.name`` attribute, + only mapping-type arguments are allowed. + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable + + ``DataFrame.rename`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...) + * ``(mapper, axis={'index', 'columns'}, ...) + + We *highly* recommend using keyword arguments to clarify your + intent. + >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 + >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 + + Using axis-style parameters + + >>> df.rename(str.lower, axis='columns') + a b + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename({1: 2, 2: 4}, axis='index') + A B + 0 1 4 + 2 2 5 + 4 3 6 + + See the :ref:`user guide <basics.rename>` for more. """ @Appender(_shared_docs['rename'] % dict(axes='axes keywords for this' - ' object', klass='NDFrame')) + ' object', klass='NDFrame', + optional_mapper='', + optional_axis='')) def rename(self, *args, **kwargs): - axes, kwargs = self._construct_axes_from_arguments(args, kwargs) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) @@ -886,6 +920,7 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): Examples -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename_axis("foo") # scalar, alters df.index.name A B @@ -2746,10 +2781,11 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, Parameters ---------- - %(axes)s : array-like, optional (can be specified in order, or as - keywords) + %(optional_labels)s + %(axes)s : array-like, optional (should be specified using keywords) New labels / index to conform to. Preferably an Index object to avoid duplicating data + %(optional_axis)s method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a @@ -2781,6 +2817,14 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, Examples -------- + ``DataFrame.reindex`` supports two calling conventions + + * ``(index=index_labels, columns=column_labels, ...) + * ``(labels, axis={'index', 'columns'}, ...) + + We *highly* recommend using keyword arguments to clarify your + intent. + Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] @@ -2831,6 +2875,26 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, IE10 404 0.08 Chrome 200 0.02 + We can also reindex the columns. + + >>> df.reindex(columns=['http_status', 'user_agent']) + http_status user_agent + Firefox 200 NaN + Chrome 200 NaN + Safari 404 NaN + IE10 404 NaN + Konqueror 301 NaN + + Or we can use "axis-style" keyword arguments + + >>> df.reindex(['http_status', 'user_agent'], axis="columns") + http_status user_agent + Firefox 200 NaN + Chrome 200 NaN + Safari 404 NaN + IE10 404 NaN + Konqueror 301 NaN + To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence @@ -2893,6 +2957,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. + See the :ref:`user guide <basics.reindexing>` for more. + Returns ------- reindexed : %(klass)s @@ -2901,7 +2967,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, # TODO: Decide if we care about having different examples for different # kinds - @Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame")) + @Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame", + optional_labels="", + optional_axis="")) def reindex(self, *args, **kwargs): # construct the args diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 14fba9560cae2..b2f50eaf733d8 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -39,7 +39,8 @@ _shared_doc_kwargs = dict( axes='items, major_axis, minor_axis', klass="Panel", - axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}") + axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}", + optional_mapper='', optional_axis='', optional_labels='') _shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one" "of\n%s" % _shared_doc_kwargs['axes_single_arg']) diff --git a/pandas/core/series.py b/pandas/core/series.py index be4066f0c39b9..93afdc5151b35 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -85,7 +85,7 @@ inplace="""inplace : boolean, default False If True, performs operation inplace and returns None.""", unique='np.ndarray', duplicated='Series', - optional_by='', + optional_by='', optional_mapper='', optional_labels='', optional_axis='', versionadded_to_excel='\n .. versionadded:: 0.20.0\n') @@ -2525,8 +2525,67 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) - @Appender(generic._shared_docs['rename'] % _shared_doc_kwargs) def rename(self, index=None, **kwargs): + """Alter Series index labels or name + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + Alternatively, change ``Series.name`` with a scalar value. + + See the :ref:`user guide <basics.rename>` for more. + + Parameters + ---------- + index : scalar, hashable sequence, dict-like or function, optional + dict-like or functions are transformations to apply to + the index. + Scalar or hashable sequence-like will alter the ``Series.name`` + attribute. + copy : boolean, default True + Also copy underlying data + inplace : boolean, default False + Whether to return a new %(klass)s. If True then value of copy is + ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. + + Returns + ------- + renamed : Series (new object) + + See Also + -------- + pandas.Series.rename_axis + + Examples + -------- + + >>> s = pd.Series([1, 2, 3]) + >>> s + 0 1 + 1 2 + 2 3 + dtype: int64 + >>> s.rename("my_name") # scalar, changes Series.name + 0 1 + 1 2 + 2 3 + Name: my_name, dtype: int64 + >>> s.rename(lambda x: x ** 2) # function, changes labels + 0 1 + 1 2 + 4 3 + dtype: int64 + >>> s.rename({1: 3, 2: 5}) # mapping, changes labels + 0 1 + 3 2 + 5 3 + dtype: int64 + + """ kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False), 'inplace') diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 3255bd6bd17e8..5c76cca08f609 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -35,7 +35,8 @@ _shared_doc_kwargs = dict(axes='index', klass='SparseSeries', - axes_single_arg="{0, 'index'}") + axes_single_arg="{0, 'index'}", + optional_labels='', optional_axis='') # ----------------------------------------------------------------------------- # Wrapper function for Series arithmetic methods diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 27906838abb2d..feb32324ff1b1 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -837,6 +837,106 @@ def test_rename_objects(self): assert 'FOO' in renamed assert 'foo' not in renamed + def test_rename_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['X', 'Y']) + expected = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y']) + + result = df.rename(str.lower, axis=1) + assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis='columns') + assert_frame_equal(result, expected) + + result = df.rename({"A": 'a', 'B': 'b'}, axis=1) + assert_frame_equal(result, expected) + + result = df.rename({"A": 'a', 'B': 'b'}, axis='columns') + assert_frame_equal(result, expected) + + # Index + expected = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y']) + result = df.rename(str.lower, axis=0) + assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis='index') + assert_frame_equal(result, expected) + + result = df.rename({'X': 'x', 'Y': 'y'}, axis=0) + assert_frame_equal(result, expected) + + result = df.rename({'X': 'x', 'Y': 'y'}, axis='index') + assert_frame_equal(result, expected) + + def test_rename_mapper_multi(self): + df = pd.DataFrame({"A": ['a', 'b'], "B": ['c', 'd'], + 'C': [1, 2]}).set_index(["A", "B"]) + result = df.rename(str.upper) + expected = df.rename(index=str.upper) + assert_frame_equal(result, expected) + + def test_rename_positional_named(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y']) + result = df.rename(str.lower, columns=str.upper) + expected = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y']) + assert_frame_equal(result, expected) + + def test_rename_axis_style_raises(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2], "B": [1, 2]}, index=['0', '1']) + + # Named target and axis + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis=1) + + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis='columns') + + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis='columns') + + with tm.assert_raises_regex(TypeError, None): + df.rename(columns=str.lower, axis='columns') + + with tm.assert_raises_regex(TypeError, None): + df.rename(index=str.lower, axis=0) + + # Multiple targets and axis + with tm.assert_raises_regex(TypeError, None): + df.rename(str.lower, str.lower, axis='columns') + + # Too many targets + with tm.assert_raises_regex(TypeError, None): + df.rename(str.lower, str.lower, str.lower) + + def test_reindex_api_equivalence(self): + # equivalence of the labels/axis and index/columns API's + df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=['a', 'b', 'c'], + columns=['d', 'e', 'f']) + + res1 = df.reindex(['b', 'a']) + res2 = df.reindex(index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a']) + res4 = df.reindex(labels=['b', 'a'], axis=0) + res5 = df.reindex(['b', 'a'], axis=0) + for res in [res2, res3, res4, res5]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(columns=['e', 'd']) + res2 = df.reindex(['e', 'd'], axis=1) + res3 = df.reindex(labels=['e', 'd'], axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(index=['b', 'a'], columns=['e', 'd']) + res2 = df.reindex(columns=['e', 'd'], index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'], + axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + def test_assign_columns(self): self.frame['hi'] = 'there' @@ -860,6 +960,14 @@ def test_set_index_preserve_categorical_dtype(self): result = result.reindex(columns=df.columns) tm.assert_frame_equal(result, df) + def test_ambiguous_warns(self): + df = pd.DataFrame({"A": [1, 2]}) + with tm.assert_produces_warning(UserWarning): + df.rename(id, id) + + with tm.assert_produces_warning(UserWarning): + df.rename({0: 10}, {"A": "B"}) + class TestIntervalIndex(object): diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index f9a4275d14f55..38ed8ee20bc50 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -447,6 +447,98 @@ def test_reindex_dups(self): # reindex fails pytest.raises(ValueError, df.reindex, index=list(range(len(df)))) + def test_reindex_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, + index=[0, 1, 3]) + result = df.reindex([0, 1, 3]) + assert_frame_equal(result, expected) + + result = df.reindex([0, 1, 3], axis=0) + assert_frame_equal(result, expected) + + result = df.reindex([0, 1, 3], axis='index') + assert_frame_equal(result, expected) + + def test_reindex_positional_warns(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5], + "C": [np.nan, np.nan]}) + with tm.assert_produces_warning(UserWarning): + result = df.reindex([0, 1], ['A', 'B', 'C']) + + assert_frame_equal(result, expected) + + def test_reindex_axis_style_raises(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]}) + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex([0, 1], ['A'], axis=1) + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex([0, 1], ['A'], axis='index') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='index') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='columns') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(columns=[0, 1], axis='columns') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], columns=[0, 1], axis='columns') + + with tm.assert_raises_regex(TypeError, 'Cannot specify all'): + df.reindex([0, 1], [0], ['A']) + + # Mixing styles + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='index') + + with tm.assert_raises_regex(TypeError, 'reindex'): + df.reindex(index=[0, 1], axis='columns') + + def test_reindex_single_named_indexer(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}) + result = df.reindex([0, 1], columns=['A']) + expected = pd.DataFrame({"A": [1, 2]}) + assert_frame_equal(result, expected) + + def test_reindex_api_equivalence(self): + # https://github.com/pandas-dev/pandas/issues/12392 + # equivalence of the labels/axis and index/columns API's + df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=['a', 'b', 'c'], + columns=['d', 'e', 'f']) + + res1 = df.reindex(['b', 'a']) + res2 = df.reindex(index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a']) + res4 = df.reindex(labels=['b', 'a'], axis=0) + res5 = df.reindex(['b', 'a'], axis=0) + for res in [res2, res3, res4, res5]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(columns=['e', 'd']) + res2 = df.reindex(['e', 'd'], axis=1) + res3 = df.reindex(labels=['e', 'd'], axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + + with tm.assert_produces_warning(UserWarning) as m: + res1 = df.reindex(['b', 'a'], ['e', 'd']) + assert 'reindex' in str(m[0].message) + res2 = df.reindex(columns=['e', 'd'], index=['b', 'a']) + res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'], + axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + def test_align(self): af, bf = self.frame.align(self.frame) assert af._data is not self.frame._data @@ -974,21 +1066,21 @@ def test_reindex_with_nans(self): def test_reindex_multi(self): df = DataFrame(np.random.randn(3, 3)) - result = df.reindex(lrange(4), lrange(4)) + result = df.reindex(index=lrange(4), columns=lrange(4)) expected = df.reindex(lrange(4)).reindex(columns=lrange(4)) assert_frame_equal(result, expected) df = DataFrame(np.random.randint(0, 10, (3, 3))) - result = df.reindex(lrange(4), lrange(4)) + result = df.reindex(index=lrange(4), columns=lrange(4)) expected = df.reindex(lrange(4)).reindex(columns=lrange(4)) assert_frame_equal(result, expected) df = DataFrame(np.random.randint(0, 10, (3, 3))) - result = df.reindex(lrange(2), lrange(2)) + result = df.reindex(index=lrange(2), columns=lrange(2)) expected = df.reindex(lrange(2)).reindex(columns=lrange(2)) assert_frame_equal(result, expected)
xref: https://github.com/pandas-dev/pandas/issues/12392 I want to test this a bit further and clean up the code for a bit, but figured I'd put this up as I'm going offline for a few hours. Any comments on the general approach of adding `*args`, and then manually validating that the parameters are consistent?
https://api.github.com/repos/pandas-dev/pandas/pulls/17800
2017-10-05T22:15:24Z
2017-10-10T15:17:58Z
2017-10-10T15:17:58Z
2017-10-24T13:26:04Z
DOC: Add examples to MultiIndex.slice_locs + note that index.slice requires
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c4e1398d0178f..29aace2a94c5a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3572,6 +3572,19 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): ------- start, end : int + Notes + ----- + This method only works if the index is monotonic or unique. + + Examples + --------- + >>> idx = pd.Index(list('abcd')) + >>> idx.slice_locs(start='b', end='c') + (1, 3) + + See Also + -------- + Index.get_loc : Get location for a single label """ inc = (step is None or step >= 0) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 9ffac0832062d..f091a2b74596f 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1924,7 +1924,9 @@ def get_slice_bound(self, label, side, kind): def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input - labels. They can be tuples representing partial levels, e.g. for a + labels. + + The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. @@ -1944,7 +1946,32 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): Notes ----- - This function assumes that the data is sorted by the first level + This method only works if the MultiIndex is properly lex-sorted. So, + if only the first 2 levels of a 3-level MultiIndex are lexsorted, + you can only pass two levels to ``.slice_locs``. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], + ... names=['A', 'B']) + + Get the slice locations from the beginning of 'b' in the first level + until the end of the multiindex: + + >>> mi.slice_locs(start='b') + (1, 4) + + Like above, but stop at the end of 'b' in the first level and 'f' in + the second level: + + >>> mi.slice_locs(start='b', end=('b', 'f')) + (1, 3) + + See Also + -------- + MultiIndex.get_loc : Get location for a label or a tuple of labels. + MultiIndex.get_locs : Get location for a label/slice/list/mask or a + sequence of such. """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. @@ -2015,6 +2042,8 @@ def get_loc(self, key, method=None): See also -------- Index.get_loc : get_loc method for (single-level) index. + MultiIndex.slice_locs : Get slice location given start label(s) and + end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ @@ -2368,6 +2397,8 @@ def get_locs(self, seq): See also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. + MultiIndex.slice_locs : Get slice location given start label(s) and + end label(s). """ # must be lexsorted to at least as many levels
Clarify ``.slice_locs`` and set up examples for how to use the method. Also, for ``pd.Index`` clarify that using ``.slice_locs`` requires that the index is monotonic.
https://api.github.com/repos/pandas-dev/pandas/pulls/17799
2017-10-05T21:52:29Z
2017-10-06T11:46:32Z
2017-10-06T11:46:32Z
2017-10-09T21:00:02Z
ENH: Add tranparent compression to json reading/writing
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 7fbf2533428dc..8d6d7947b6892 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -195,7 +195,7 @@ Other Enhancements - :func:`read_json` now accepts a ``chunksize`` parameter that can be used when ``lines=True``. If ``chunksize`` is passed, read_json now returns an iterator which reads in ``chunksize`` lines with each iteration. (:issue:`17048`) - :meth:`DataFrame.assign` will preserve the original order of ``**kwargs`` for Python 3.6+ users instead of sorting the column names - Improved the import time of pandas by about 2.25x (:issue:`16764`) - +- :func:`read_json` and :func:`to_json` now accept a ``compression`` argument which allows them to transparently handle compressed files. (:issue:`17798`) .. _whatsnew_0210.api_breaking: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 942a9ff279092..c7ae9bbee9013 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1258,7 +1258,7 @@ def _repr_latex_(self): def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', - default_handler=None, lines=False): + default_handler=None, lines=False, compression=None): """ Convert the object to a JSON string. @@ -1320,6 +1320,12 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, .. versionadded:: 0.19.0 + compression : {None, 'gzip', 'bz2', 'xz'} + A string representing the compression to use in the output file, + only used when the first argument is a filename + + .. versionadded:: 0.21.0 + Returns ------- same type as input object with filtered info axis @@ -1372,7 +1378,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, - lines=lines) + lines=lines, compression=compression) def to_hdf(self, path_or_buf, key, **kwargs): """Write the contained data to an HDF5 file using HDFStore. diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index ab74b265b6a06..be39f4baba0fb 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -9,7 +9,8 @@ from pandas import compat, isna from pandas import Series, DataFrame, to_datetime, MultiIndex from pandas.io.common import (get_filepath_or_buffer, _get_handle, - _stringify_path, BaseIterator) + _infer_compression, _stringify_path, + BaseIterator) from pandas.io.parsers import _validate_integer from pandas.core.common import AbstractMethodError from pandas.core.reshape.concat import concat @@ -27,7 +28,7 @@ # interface to/from def to_json(path_or_buf, obj, orient=None, date_format='epoch', double_precision=10, force_ascii=True, date_unit='ms', - default_handler=None, lines=False): + default_handler=None, lines=False, compression=None): path_or_buf = _stringify_path(path_or_buf) if lines and orient != 'records': @@ -54,8 +55,11 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch', s = _convert_to_line_delimits(s) if isinstance(path_or_buf, compat.string_types): - with open(path_or_buf, 'w') as fh: + fh, handles = _get_handle(path_or_buf, 'w', compression=compression) + try: fh.write(s) + finally: + fh.close() elif path_or_buf is None: return s else: @@ -178,7 +182,7 @@ def write(self): def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, convert_axes=True, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, - lines=False, chunksize=None): + lines=False, chunksize=None, compression='infer'): """ Convert a JSON string to pandas object @@ -277,6 +281,15 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, .. versionadded:: 0.21.0 + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + For on-the-fly decompression of on-disk data. If 'infer', then use + gzip, bz2, zip or xz if path_or_buf is a string ending in + '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression + otherwise. If using 'zip', the ZIP file must contain only one data + file to be read in. Set to None for no decompression. + + .. versionadded:: 0.21.0 + Returns ------- result : Series or DataFrame, depending on the value of `typ`. @@ -334,15 +347,17 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ - filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf, - encoding=encoding) + compression = _infer_compression(path_or_buf, compression) + filepath_or_buffer, _, compression = get_filepath_or_buffer( + path_or_buf, encoding=encoding, compression=compression, + ) json_reader = JsonReader( filepath_or_buffer, orient=orient, typ=typ, dtype=dtype, convert_axes=convert_axes, convert_dates=convert_dates, keep_default_dates=keep_default_dates, numpy=numpy, precise_float=precise_float, date_unit=date_unit, encoding=encoding, - lines=lines, chunksize=chunksize + lines=lines, chunksize=chunksize, compression=compression, ) if chunksize: @@ -361,7 +376,7 @@ class JsonReader(BaseIterator): """ def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes, convert_dates, keep_default_dates, numpy, precise_float, - date_unit, encoding, lines, chunksize): + date_unit, encoding, lines, chunksize, compression): self.path_or_buf = filepath_or_buffer self.orient = orient @@ -374,6 +389,7 @@ def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes, self.precise_float = precise_float self.date_unit = date_unit self.encoding = encoding + self.compression = compression self.lines = lines self.chunksize = chunksize self.nrows_seen = 0 @@ -415,20 +431,20 @@ def _get_data_from_filepath(self, filepath_or_buffer): data = filepath_or_buffer + exists = False if isinstance(data, compat.string_types): try: exists = os.path.exists(filepath_or_buffer) - # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass - else: - if exists: - data, _ = _get_handle(filepath_or_buffer, 'r', - encoding=self.encoding) - self.should_close = True - self.open_stream = data + if exists or self.compression is not None: + data, _ = _get_handle(filepath_or_buffer, 'r', + encoding=self.encoding, + compression=self.compression) + self.should_close = True + self.open_stream = data return data diff --git a/pandas/tests/io/json/data/tsframe_v012.json.zip b/pandas/tests/io/json/data/tsframe_v012.json.zip new file mode 100644 index 0000000000000..100ba0c87b2ba Binary files /dev/null and b/pandas/tests/io/json/data/tsframe_v012.json.zip differ diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py new file mode 100644 index 0000000000000..e9976da6f6774 --- /dev/null +++ b/pandas/tests/io/json/test_compression.py @@ -0,0 +1,133 @@ +import pytest +import moto + +import pandas as pd +from pandas import compat +import pandas.util.testing as tm +from pandas.util.testing import assert_frame_equal, assert_raises_regex + + +COMPRESSION_TYPES = [None, 'bz2', 'gzip', 'xz'] + + +def decompress_file(path, compression): + if compression is None: + f = open(path, 'rb') + elif compression == 'gzip': + import gzip + f = gzip.GzipFile(path, 'rb') + elif compression == 'bz2': + import bz2 + f = bz2.BZ2File(path, 'rb') + elif compression == 'xz': + lzma = compat.import_lzma() + f = lzma.open(path, 'rb') + else: + msg = 'Unrecognized compression type: {}'.format(compression) + raise ValueError(msg) + + result = f.read().decode('utf8') + f.close() + return result + + +@pytest.mark.parametrize('compression', COMPRESSION_TYPES) +def test_compression_roundtrip(compression): + if compression == 'xz': + tm._skip_if_no_lzma() + + df = pd.DataFrame([[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + index=['A', 'B'], columns=['X', 'Y', 'Z']) + + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + assert_frame_equal(df, pd.read_json(path, compression=compression)) + + # explicitly ensure file was compressed. + uncompressed_content = decompress_file(path, compression) + assert_frame_equal(df, pd.read_json(uncompressed_content)) + + +def test_compress_zip_value_error(): + df = pd.DataFrame([[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + index=['A', 'B'], columns=['X', 'Y', 'Z']) + + with tm.ensure_clean() as path: + import zipfile + pytest.raises(zipfile.BadZipfile, df.to_json, path, compression="zip") + + +def test_read_zipped_json(): + uncompressed_path = tm.get_data_path("tsframe_v012.json") + uncompressed_df = pd.read_json(uncompressed_path) + + compressed_path = tm.get_data_path("tsframe_v012.json.zip") + compressed_df = pd.read_json(compressed_path, compression='zip') + + assert_frame_equal(uncompressed_df, compressed_df) + + +@pytest.mark.parametrize('compression', COMPRESSION_TYPES) +def test_with_s3_url(compression): + boto3 = pytest.importorskip('boto3') + pytest.importorskip('s3fs') + if compression == 'xz': + tm._skip_if_no_lzma() + + df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') + with moto.mock_s3(): + conn = boto3.resource("s3", region_name="us-east-1") + bucket = conn.create_bucket(Bucket="pandas-test") + + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + with open(path, 'rb') as f: + bucket.put_object(Key='test-1', Body=f) + + roundtripped_df = pd.read_json('s3://pandas-test/test-1', + compression=compression) + assert_frame_equal(df, roundtripped_df) + + +@pytest.mark.parametrize('compression', COMPRESSION_TYPES) +def test_lines_with_compression(compression): + if compression == 'xz': + tm._skip_if_no_lzma() + + with tm.ensure_clean() as path: + df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') + df.to_json(path, orient='records', lines=True, compression=compression) + roundtripped_df = pd.read_json(path, lines=True, + compression=compression) + assert_frame_equal(df, roundtripped_df) + + +@pytest.mark.parametrize('compression', COMPRESSION_TYPES) +def test_chunksize_with_compression(compression): + if compression == 'xz': + tm._skip_if_no_lzma() + + with tm.ensure_clean() as path: + df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}') + df.to_json(path, orient='records', lines=True, compression=compression) + + roundtripped_df = pd.concat(pd.read_json(path, lines=True, chunksize=1, + compression=compression)) + assert_frame_equal(df, roundtripped_df) + + +def test_write_unsupported_compression_type(): + df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') + with tm.ensure_clean() as path: + msg = "Unrecognized compression type: unsupported" + assert_raises_regex(ValueError, msg, df.to_json, + path, compression="unsupported") + + +def test_read_unsupported_compression_type(): + with tm.ensure_clean() as path: + msg = "Unrecognized compression type: unsupported" + assert_raises_regex(ValueError, msg, pd.read_json, + path, compression="unsupported") diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index d14355b07cf20..95f23e82fced0 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -128,7 +128,7 @@ def test_readjson_chunks_closes(chunksize): path, orient=None, typ="frame", dtype=True, convert_axes=True, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, - lines=True, chunksize=chunksize) + lines=True, chunksize=chunksize, compression=None) reader.read() assert reader.open_stream.closed, "didn't close stream with \ chunksize = %s" % chunksize
This works in the same way as the argument to ``read_csv``and ``to_csv``. I've added tests confirming that it works with both file paths, and S3 URLs. (obviously there will be edge cases I've missed - please let me know if there are important ones that I should add coverage for). The implementation is mostly plumbing, using the logic that was in place for the same functionality in `read_csv`. - [x] closes #15644 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17798
2017-10-05T20:07:05Z
2017-10-06T14:08:23Z
2017-10-06T14:08:22Z
2017-10-06T14:15:34Z
BUG/API: Raise when extension class passed to astype
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 7fbf2533428dc..5f0af8859a133 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -768,6 +768,7 @@ Conversion - Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`) - Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`) - Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`) +- Bug in :meth:`~DataFrame.astype` converting to object dtype when passeed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). Indexing ^^^^^^^^ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a8f1a0c78c238..689f5521e1ccb 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1,6 +1,7 @@ import warnings import copy from warnings import catch_warnings +import inspect import itertools import re import operator @@ -552,6 +553,11 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, list(errors_legal_values), errors)) raise ValueError(invalid_arg) + if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): + msg = ("Expected an instance of {}, but got the class instead. " + "Try instantiating 'dtype'.".format(dtype.__name__)) + raise TypeError(msg) + # may need to convert to categorical # this is only called for non-categoricals if self.is_categorical_astype(dtype): diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 5941b2ab7c2cb..abb528f0d2179 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -612,6 +612,20 @@ def test_astype_duplicate_col(self): expected = concat([a1_str, b, a2_str], axis=1) assert_frame_equal(result, expected) + @pytest.mark.parametrize("cls", [ + pd.api.types.CategoricalDtype, + pd.api.types.DatetimeTZDtype, + pd.api.types.IntervalDtype + ]) + def test_astype_categoricaldtype_class_raises(self, cls): + df = DataFrame({"A": ['a', 'a', 'b', 'c']}) + xpr = "Expected an instance of {}".format(cls.__name__) + with tm.assert_raises_regex(TypeError, xpr): + df.astype({"A": cls}) + + with tm.assert_raises_regex(TypeError, xpr): + df['A'].astype(cls) + def test_timedeltas(self): df = DataFrame(dict(A=Series(date_range('2012-1-1', periods=3, freq='D')),
Closes https://github.com/pandas-dev/pandas/issues/17780
https://api.github.com/repos/pandas-dev/pandas/pulls/17796
2017-10-05T19:41:58Z
2017-10-05T23:11:16Z
2017-10-05T23:11:15Z
2017-10-27T12:04:28Z
DOC: Column indexes should use the same metadata as columns
diff --git a/doc/source/developer.rst b/doc/source/developer.rst index a695366d9ada3..9c214020ab43d 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -45,20 +45,19 @@ So that a ``pandas.DataFrame`` can be faithfully reconstructed, we store a .. code-block:: text {'index_columns': ['__index_level_0__', '__index_level_1__', ...], - 'column_index_names': [<column index level name 0>, <column index level name 1>, ...], - 'column_index_dtypes': [<dtype 0>, <dtype 1>, ..., <dtype N>] + 'column_indexes': [<ci0>, <ci1>, ..., <ciN>], 'columns': [<c0>, <c1>, ...], 'pandas_version': $VERSION} -Here, ``<c0>`` and so forth are dictionaries containing the metadata for each -column. This has JSON form: +Here, ``<c0>``/``<ci0>`` and so forth are dictionaries containing the metadata +for each column. This has JSON form: .. code-block:: text {'name': column_name, 'pandas_type': pandas_type, 'numpy_type': numpy_type, - 'metadata': type_metadata} + 'metadata': metadata} ``pandas_type`` is the logical type of the column, and is one of: @@ -75,7 +74,7 @@ result of ``str(dtype)`` for the underlying NumPy array that holds the data. So for ``datetimetz`` this is ``datetime64[ns]`` and for categorical, it may be any of the supported integer categorical types. -The ``type_metadata`` is ``None`` except for: +The ``metadata`` field is ``None`` except for: * ``datetimetz``: ``{'timezone': zone, 'unit': 'ns'}``, e.g. ``{'timezone', 'America/New_York', 'unit': 'ns'}``. The ``'unit'`` is optional, and if @@ -108,8 +107,12 @@ As an example of fully-formed metadata: .. code-block:: text {'index_columns': ['__index_level_0__'], - 'column_index_names': [None], - 'column_index_dtypes': ['object'], + 'column_indexes': [ + {'name': None, + 'pandas_type': 'string', + 'numpy_type': 'object', + 'metadata': None} + ], 'columns': [ {'name': 'c0', 'pandas_type': 'int8',
null
https://api.github.com/repos/pandas-dev/pandas/pulls/17795
2017-10-05T16:53:09Z
2017-10-05T17:28:22Z
2017-10-05T17:28:22Z
2017-10-05T17:28:25Z
have _NaT subclass datetime directly
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 53cde4f9b6b65..5269cddf8d2fd 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -30,7 +30,7 @@ from util cimport (is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object, INT64_MAX) cimport util -from cpython.datetime cimport PyTZInfo_Check +from cpython.datetime cimport PyDelta_Check, PyTZInfo_Check # this is our datetime.pxd from datetime cimport ( pandas_datetimestruct, @@ -50,7 +50,6 @@ from datetime cimport ( check_dts_bounds, PANDAS_FR_ns, PyDateTime_Check, PyDate_Check, - PyDelta_Check, # PyDelta_Check(x) --> isinstance(x, timedelta) PyDateTime_IMPORT, timedelta, datetime ) @@ -840,6 +839,7 @@ class NaTType(_NaT): base = _NaT.__new__(cls, 1, 1, 1) base.value = NPY_NAT + base.freq = None return base @@ -862,6 +862,12 @@ class NaTType(_NaT): def __long__(self): return NPY_NAT + def __reduce_ex__(self, protocol): + # python 3.6 compat + # http://bugs.python.org/issue28730 + # now __reduce_ex__ is defined and higher priority than __reduce__ + return self.__reduce__() + def __reduce__(self): return (__nat_unpickle, (None, )) @@ -997,6 +1003,16 @@ class NaTType(_NaT): tz_localize = _make_nat_func('tz_localize', Timestamp) replace = _make_nat_func('replace', Timestamp) + def to_datetime(self): + """ + DEPRECATED: use :meth:`to_pydatetime` instead. + + Convert a Timestamp object to a native Python datetime object. + """ + warnings.warn("to_datetime is deprecated. Use self.to_pydatetime()", + FutureWarning, stacklevel=2) + return self.to_pydatetime(warn=False) + def __nat_unpickle(*args): # return constant defined in the module @@ -1143,9 +1159,9 @@ cdef class _Timestamp(datetime): int ndim if isinstance(other, _Timestamp): - if other is NaT: - return _cmp_nat_dt(other, self, _reverse_ops[op]) ots = other + elif other is NaT: + return _cmp_nat_dt(other, self, _reverse_ops[op]) elif PyDateTime_Check(other): if self.nanosecond == 0: val = self.to_pydatetime() @@ -1448,8 +1464,7 @@ _nat_scalar_rules[Py_GE] = False cdef _nat_divide_op(self, other): - if (PyDelta_Check(other) or - is_timedelta64_object(other) or other is NaT): + if PyDelta_Check(other) or is_timedelta64_object(other) or other is NaT: return np.nan if is_integer_object(other) or is_float_object(other): return NaT @@ -1461,7 +1476,10 @@ cdef _nat_rdivide_op(self, other): return NotImplemented -cdef class _NaT(_Timestamp): +cdef class _NaT(datetime): + cdef readonly: + int64_t value + object freq def __hash__(_NaT self): # py3k needs this defined here @@ -1475,34 +1493,52 @@ cdef class _NaT(_Timestamp): if ndim == 0: if is_datetime64_object(other): - other = Timestamp(other) + return _nat_scalar_rules[op] else: raise TypeError('Cannot compare type %r with type %r' % (type(self).__name__, type(other).__name__)) return PyObject_RichCompare(other, self, _reverse_ops[op]) def __add__(self, other): - try: - if PyDateTime_Check(other): - return NaT - result = _Timestamp.__add__(self, other) - # Timestamp.__add__ doesn't return DatetimeIndex/TimedeltaIndex - if result is NotImplemented: - return result - except (OverflowError, OutOfBoundsDatetime): - pass + if PyDateTime_Check(other): + return NaT + + elif hasattr(other, 'delta'): + # Timedelta, offsets.Tick, offsets.Week + return NaT + elif getattr(other, '_typ', None) in ['dateoffset', 'series', + 'period', 'datetimeindex', + 'timedeltaindex']: + # Duplicate logic in _Timestamp.__add__ to avoid needing + # to subclass; allows us to @final(_Timestamp.__add__) + return NotImplemented return NaT def __sub__(self, other): - if PyDateTime_Check(other) or PyDelta_Check(other): + # Duplicate some logic from _Timestamp.__sub__ to avoid needing + # to subclass; allows us to @final(_Timestamp.__sub__) + if PyDateTime_Check(other): + return NaT + elif PyDelta_Check(other): return NaT - try: - result = _Timestamp.__sub__(self, other) - # Timestamp.__sub__ may return DatetimeIndex/TimedeltaIndex - if result is NotImplemented or hasattr(result, '_typ'): - return result - except (OverflowError, OutOfBoundsDatetime): - pass + + elif getattr(other, '_typ', None) == 'datetimeindex': + # a Timestamp-DatetimeIndex -> yields a negative TimedeltaIndex + return -other.__sub__(self) + + elif getattr(other, '_typ', None) == 'timedeltaindex': + # a Timestamp-TimedeltaIndex -> yields a negative TimedeltaIndex + return (-other).__add__(self) + + elif hasattr(other, 'delta'): + # offsets.Tick, offsets.Week + neg_other = -other + return self + neg_other + + elif getattr(other, '_typ', None) in ['period', + 'periodindex', 'dateoffset']: + return NotImplemented + return NaT def __pos__(self): @@ -1525,6 +1561,14 @@ cdef class _NaT(_Timestamp): return NaT return NotImplemented + @property + def asm8(self): + return np.datetime64(NPY_NAT, 'ns') + + def to_datetime64(self): + """ Returns a numpy.datetime64 object with 'ns' precision """ + return np.datetime64('NaT') + # lightweight C object to hold datetime & int64 pair cdef class _TSObject:
The main change made by this PR is to have `_NaT` subclass `datetime` directly instead of subclassing `_Timestamp`. Everything else is just to preserve the current behavior. Why you ask? Because once this is OKed, the entire `NaT` machinery can be cut/paste into a stand-alone module whose only intra-pandas dependency is `util`. The only non-trivial behavior is in `__add__` and `__sub__`, where currently `_NaT` dispatches to `_Timestamp` for a few cases. This PR re-implements those cases directly in `_NaT`. While that does mean a small amount of repetition, it opens up the possibility of using `@cython.final` for potential optimization in `_Timestamp.__add__/__sub__`. `NaTType.to_datetime` could use `_make_nat_func` if we were OK with it not producing the same warning as `Timestamp.to_datetime`. - [ ] closes #17435 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17793
2017-10-05T05:28:39Z
2017-10-28T15:34:44Z
2017-10-28T15:34:44Z
2017-10-28T17:04:47Z
DOC: Clarifying use of categorical data in describe docstring (#16722)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5dd770b2600a0..ed2a592a64efe 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6352,20 +6352,22 @@ def describe(self, percentiles=None, include=None, exclude=None): - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit - ``numpy.number``. To limit it instead to categorical - objects submit the ``numpy.object`` data type. Strings + ``numpy.number``. To limit it instead to object columns submit + the ``numpy.object`` data type. Strings can also be used in the style of - ``select_dtypes`` (e.g. ``df.describe(include=['O'])``) + ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To + select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types - from the result. To select numeric types submit - ``numpy.number``. To select categorical objects submit the data + from the result. To exclude numeric types submit + ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of - ``select_dtypes`` (e.g. ``df.describe(include=['O'])``) + ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To + exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns @@ -6390,9 +6392,11 @@ def describe(self, percentiles=None, include=None, exclude=None): among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to - return only an analysis of numeric columns. If ``include='all'`` - is provided as an option, the result will include a union of - attributes of each type. + return only an analysis of numeric columns. If the dataframe consists + only of object and categorical data without any numeric columns, the + default is to return an analysis of both the object and categorical + columns. If ``include='all'`` is provided as an option, the result + will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. @@ -6442,8 +6446,10 @@ def describe(self, percentiles=None, include=None, exclude=None): Describing a ``DataFrame``. By default only numeric fields are returned. - >>> df = pd.DataFrame([[1, 'a'], [2, 'b'], [3, 'c']], - ... columns=['numeric', 'object']) + >>> df = pd.DataFrame({ 'object': ['a', 'b', 'c'], + ... 'numeric': [1, 2, 3], + ... 'categorical': pd.Categorical(['d','e','f']) + ... }) >>> df.describe() numeric count 3.0 @@ -6458,18 +6464,18 @@ def describe(self, percentiles=None, include=None, exclude=None): Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') - numeric object - count 3.0 3 - unique NaN 3 - top NaN b - freq NaN 1 - mean 2.0 NaN - std 1.0 NaN - min 1.0 NaN - 25% 1.5 NaN - 50% 2.0 NaN - 75% 2.5 NaN - max 3.0 NaN + categorical numeric object + count 3 3.0 3 + unique 3 NaN 3 + top f NaN c + freq 1 NaN 1 + mean NaN 2.0 NaN + std NaN 1.0 NaN + min NaN 1.0 NaN + 25% NaN 1.5 NaN + 50% NaN 2.0 NaN + 75% NaN 2.5 NaN + max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. @@ -6504,30 +6510,42 @@ def describe(self, percentiles=None, include=None, exclude=None): object count 3 unique 3 - top b + top c freq 1 + Including only categorical columns from a ``DataFrame`` description. + + >>> df.describe(include=['category']) + categorical + count 3 + unique 3 + top f + freq 1 + Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) - object - count 3 - unique 3 - top b - freq 1 + categorical object + count 3 3 + unique 3 3 + top f c + freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.object]) - numeric - count 3.0 - mean 2.0 - std 1.0 - min 1.0 - 25% 1.5 - 50% 2.0 - 75% 2.5 - max 3.0 + categorical numeric + count 3 3.0 + unique 3 NaN + top f NaN + freq 1 NaN + mean NaN 2.0 + std NaN 1.0 + min NaN 1.0 + 25% NaN 1.5 + 50% NaN 2.0 + 75% NaN 2.5 + max NaN 3.0 See Also --------
- [ ] closes #16722
https://api.github.com/repos/pandas-dev/pandas/pulls/17789
2017-10-04T20:38:50Z
2017-10-05T10:27:06Z
2017-10-05T10:27:06Z
2017-10-05T22:08:23Z
ENH: SparseDataFrame/SparseSeries value assignment
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9b70bda82e247..fa20a110133ce 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -175,6 +175,7 @@ Other Enhancements (:issue:`21627`) - New method :meth:`HDFStore.walk` will recursively walk the group hierarchy of an HDF5 file (:issue:`10932`) - :func:`read_html` copies cell data across ``colspan`` and ``rowspan``, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) +- :class:`SparseDataFrame` and :class:`SparseSeries` support value assignment (:issue:`21818`) - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv`, :func:`~Series.to_csv`, :func:`~DataFrame.to_json`, and :func:`~Series.to_json` now support ``compression='infer'`` to infer compression based on filename extension (:issue:`15008`). diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 700562386c838..47f07c01b7785 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -785,11 +785,9 @@ def iterrows(self): iteritems : Iterate over (column name, Series) pairs. """ - columns = self.columns - klass = self._constructor_sliced - for k, v in zip(self.index, self.values): - s = klass(v, index=columns, name=k) - yield k, s + iloc = self.iloc + for i, k in enumerate(self.index): + yield k, iloc[i] def itertuples(self, index=True, name="Pandas"): """ @@ -2550,9 +2548,7 @@ def set_value(self, index, col, value, takeable=False): Returns ------- - frame : DataFrame - If label pair is contained, will be reference to calling DataFrame, - otherwise a new object + self : DataFrame """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " @@ -2765,7 +2761,7 @@ def _getitem_multilevel(self, key): return self._get_item_cache(key) def _getitem_frame(self, key): - if key.values.size and not is_bool_dtype(key.values): + if key.size and not key.dtypes.map(is_bool_dtype).all(): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) @@ -3153,7 +3149,7 @@ def _setitem_frame(self, key, value): ) key = self._constructor(key, **self._construct_axes_dict()) - if key.values.size and not is_bool_dtype(key.values): + if key.size and not key.dtypes.map(is_bool_dtype).all(): raise TypeError( 'Must pass DataFrame or 2-d ndarray with boolean values only' ) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f0635014b166b..1404792546a46 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -921,6 +921,9 @@ def _is_empty_indexer(indexer): if _is_empty_indexer(indexer): pass + elif is_sparse(values): + values = values.set_values(indexer, value) + # setting a single element for each dim and with a rhs that could # be say a list # GH 6043 @@ -3154,6 +3157,17 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, return self.make_block_same_class(values=values, placement=self.mgr_locs) + def _can_hold_element(self, element): + return np.can_cast(np.asarray(element).dtype, self.sp_values.dtype) + + def _try_coerce_result(self, result): + if (isinstance(result, np.ndarray) and + np.ndim(result) == 1 and + not is_sparse(result)): + result = SparseArray(result, kind=self.kind, + fill_value=self.fill_value) + return result + def __len__(self): try: return self.sp_index.length @@ -3246,6 +3260,63 @@ def sparse_reindex(self, new_index): return self.make_block_same_class(values, sparse_index=new_index, placement=self.mgr_locs) + def where(self, other, cond, align=True, errors='raise', + try_cast=False, axis=0, transpose=False, mgr=None): + """ + evaluate the block; return result block(s) from the result + + Parameters + ---------- + other : a ndarray/object + cond : the condition to respect + align : boolean, perform alignment on other/cond + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + axis : int + transpose : boolean + Set to True if self is stored with axes reversed + + Returns + ------- + a new sparse block(s), the result of the func + """ + cond = getattr(cond, 'values', cond) + # For SparseBlock, self.values is always 1D. + # If cond was a frame, its 2D values would incorrectly broadcast + # later on. + if self.values.ndim == 1 and any(ax == 1 for ax in cond.shape): + cond = cond.ravel() + + return super(self, SparseBlock).where( + other, cond, align=align, errors=errors, try_cast=try_cast, + axis=axis, transpose=transpose, mgr=mgr) + + def putmask(self, mask, new, align=True, inplace=False, axis=0, + transpose=False, mgr=None): + """ + putmask the data to the block; we must be a single block and not + generate other blocks + + return the resulting block + + Parameters + ---------- + mask : the condition to respect + new : a ndarray/object + align : boolean, perform alignment on other/cond, default is True + inplace : perform inplace modification, default is False + + Returns + ------- + a new block, the result of the putmask + """ + _, _, new, _ = self._try_coerce_args(self.values, new) + indexer = mask.to_dense().values.ravel().nonzero()[0] + block = self.setitem(indexer, new) + return [block] + # ----------------------------------------------------------------- # Constructor Helpers diff --git a/pandas/core/series.py b/pandas/core/series.py index 4b4fccccda4a0..9e70a476898f0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1071,9 +1071,7 @@ def set_value(self, label, value, takeable=False): Returns ------- - series : Series - If label is contained, will be reference to calling Series, - otherwise a new object + self : Series """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 6f0ffbff22028..8fba29034aeb3 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -37,6 +37,7 @@ import pandas.core.algorithms as algos import pandas.core.ops as ops import pandas.io.formats.printing as printing +from pandas.errors import PerformanceWarning from pandas.util._decorators import Appender from pandas.core.indexes.base import _index_shared_docs @@ -369,6 +370,53 @@ def get_values(self, fill=None): """ return a dense representation """ return self.to_dense(fill=fill) + def set_values(self, indexer, value): + """ + Return new SparseArray with indexed values set to `value`. + + Returns + ------- + SparseArray + A new sparse array with indexer positions filled with value. + """ + # If indexer is not a single int position, easiest to handle via dense + if not is_scalar(indexer): + warnings.warn( + 'Setting SparseSeries/Array values is inefficient when ' + 'indexing with multiple keys because the whole series ' + 'is made dense interim.', + PerformanceWarning, stacklevel=2) + + values = self.to_dense() + values[indexer] = value + return SparseArray(values, kind=self.kind, + fill_value=self.fill_value) + + # If label already in sparse index, just switch the value on a copy + idx = self.sp_index.lookup(indexer) + if idx != -1: + self.sp_values[idx] = value + return self + + warnings.warn( + 'Setting new SparseSeries values is inefficient ' + '(a copy of data is made).', PerformanceWarning, stacklevel=2) + + # Otherwise, construct a new array, and insert the new value in the + # correct position + indices = self.sp_index.to_int_index().indices + pos = np.searchsorted(indices, indexer) + + indices = np.insert(indices, pos, indexer) + sp_values = np.insert(self.sp_values, pos, value) + + # Length can be increased when adding a new value into index + length = max(self.sp_index.length, indexer + 1) + sp_index = _make_index(length, indices, self.kind) + + return SparseArray(sp_values, sparse_index=sp_index, + fill_value=self.fill_value) + def to_dense(self, fill=None): """ Convert SparseArray to a NumPy array. @@ -544,6 +592,10 @@ def astype(self, dtype=None, copy=True): return self._simple_new(sp_values, self.sp_index, fill_value=fill_value) + def tolist(self): + """Return *dense* self as list""" + return self.values.tolist() + def copy(self, deep=True): """ Make a copy of the SparseArray. Only the actual sparse values need to diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 58e3001bcfe6a..d43f1ce5ffef0 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -330,10 +330,11 @@ def _apply_columns(self, func): return self._constructor( data=new_data, index=self.index, columns=self.columns, - default_fill_value=self.default_fill_value).__finalize__(self) + default_fill_value=self.default_fill_value, + default_kind=self.default_kind).__finalize__(self) - def astype(self, dtype): - return self._apply_columns(lambda x: x.astype(dtype)) + def astype(self, dtype, **kwargs): + return self._apply_columns(lambda x: x.astype(dtype, **kwargs)) def copy(self, deep=True): """ @@ -464,44 +465,6 @@ def _get_value(self, index, col, takeable=False): return series._get_value(index, takeable=takeable) _get_value.__doc__ = get_value.__doc__ - def set_value(self, index, col, value, takeable=False): - """ - Put single value at passed column and index - - .. deprecated:: 0.21.0 - - Please use .at[] or .iat[] accessors. - - Parameters - ---------- - index : row label - col : column label - value : scalar value - takeable : interpret the index/col as indexers, default False - - Notes - ----- - This method *always* returns a new object. It is currently not - particularly efficient (and potentially very expensive) but is provided - for API compatibility with DataFrame - - Returns - ------- - frame : DataFrame - """ - warnings.warn("set_value is deprecated and will be removed " - "in a future release. Please use " - ".at[] or .iat[] accessors instead", FutureWarning, - stacklevel=2) - return self._set_value(index, col, value, takeable=takeable) - - def _set_value(self, index, col, value, takeable=False): - dense = self.to_dense()._set_value( - index, col, value, takeable=takeable) - return dense.to_sparse(kind=self._default_kind, - fill_value=self._default_fill_value) - _set_value.__doc__ = set_value.__doc__ - def _slice(self, slobj, axis=0, kind=None): if axis == 0: new_index = self.index[slobj] @@ -576,7 +539,8 @@ def _combine_frame(self, other, func, fill_value=None, level=None): return self._constructor(data=new_data, index=new_index, columns=new_columns, - default_fill_value=new_fill_value + default_fill_value=new_fill_value, + default_kind=self.default_kind, ).__finalize__(self) def _combine_match_index(self, other, func, level=None): @@ -605,7 +569,8 @@ def _combine_match_index(self, other, func, level=None): return self._constructor( new_data, index=new_index, columns=self.columns, - default_fill_value=fill_value).__finalize__(self) + default_fill_value=fill_value, + default_kind=self.default_kind).__finalize__(self) def _combine_match_columns(self, other, func, level=None, try_cast=True): # patched version of DataFrame._combine_match_columns to account for @@ -629,7 +594,8 @@ def _combine_match_columns(self, other, func, level=None, try_cast=True): return self._constructor( new_data, index=self.index, columns=union, - default_fill_value=self.default_fill_value).__finalize__(self) + default_fill_value=self.default_fill_value, + default_kind=self.default_kind).__finalize__(self) def _combine_const(self, other, func, errors='raise', try_cast=True): return self._apply_columns(lambda x: func(x, other)) @@ -673,7 +639,8 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, return self._constructor( new_series, index=index, columns=self.columns, - default_fill_value=self._default_fill_value).__finalize__(self) + default_fill_value=self._default_fill_value, + default_kind=self.default_kind).__finalize__(self) def _reindex_columns(self, columns, method, copy, level, fill_value=None, limit=None, takeable=False): @@ -693,7 +660,8 @@ def _reindex_columns(self, columns, method, copy, level, fill_value=None, sdict = {k: v for k, v in compat.iteritems(self) if k in columns} return self._constructor( sdict, index=self.index, columns=columns, - default_fill_value=self._default_fill_value).__finalize__(self) + default_fill_value=self._default_fill_value, + default_kind=self.default_kind).__finalize__(self) def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit=None, copy=False, allow_dups=False): @@ -725,8 +693,10 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, else: new_arrays[col] = self[col] - return self._constructor(new_arrays, index=index, - columns=columns).__finalize__(self) + return self._constructor( + new_arrays, index=index, columns=columns, + default_fill_value=self.default_fill_value, + default_kind=self.default_kind).__finalize__(self) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 8ac5d81f23bb2..78558f5e7546d 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -18,7 +18,6 @@ import pandas.core.common as com import pandas.core.indexes.base as ibase import pandas.core.ops as ops -import pandas._libs.index as libindex from pandas.util._decorators import Appender from pandas.core.sparse.array import ( @@ -278,8 +277,15 @@ def __array_wrap__(self, result, context=None): else: fill_value = self.fill_value + # Only reuse old sparse index if result size matches + # (fails e.g. for ~sparseseries) + if np.size(result) == self.sp_index.npoints: + sp_index = self.sp_index + else: + sp_index = None + return self._constructor(result, index=self.index, - sparse_index=self.sp_index, + sparse_index=sp_index, fill_value=fill_value, copy=False).__finalize__(self) @@ -480,44 +486,25 @@ def set_value(self, label, value, takeable=False): Returns ------- - series : SparseSeries + self : SparseSeries """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) + self._data = self._data.copy() return self._set_value(label, value, takeable=takeable) def _set_value(self, label, value, takeable=False): - values = self.to_dense() - - # if the label doesn't exist, we will create a new object here - # and possibly change the index - new_values = values._set_value(label, value, takeable=takeable) - if new_values is not None: - values = new_values - new_index = values.index - values = SparseArray(values, fill_value=self.fill_value, - kind=self.kind) - self._data = SingleBlockManager(values, new_index) - self._index = new_index + try: + idx = self.index.get_loc(label) + except KeyError: + idx = len(self) + self._data.axes[0] = self._data.index.append(Index([label])) + self._data = self._data.setitem(indexer=idx, value=value) + return self _set_value.__doc__ = set_value.__doc__ - def _set_values(self, key, value): - - # this might be inefficient as we have to recreate the sparse array - # rather than setting individual elements, but have to convert - # the passed slice/boolean that's in dense space into a sparse indexer - # not sure how to do that! - if isinstance(key, Series): - key = key.values - - values = self.values.to_dense() - values[key] = libindex.convert_scalar(values, value) - values = SparseArray(values, fill_value=self.fill_value, - kind=self.kind) - self._data = SingleBlockManager(values, self.index) - def to_dense(self, sparse_only=False): """ Convert SparseSeries to a Series. diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index be5a1710119ee..07b8267ee7510 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -457,7 +457,6 @@ def test_iloc(self): iframe.iloc[:, 0].sp_index) def test_set_value(self): - # ok, as the index gets converted to object frame = self.frame.copy() with tm.assert_produces_warning(FutureWarning, @@ -471,7 +470,6 @@ def test_set_value(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res = self.frame.set_value('foobar', 'B', 1.5) - assert res is not self.frame assert res.index[-1] == 'foobar' with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): @@ -480,9 +478,8 @@ def test_set_value(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res2 = res.set_value('foobar', 'qux', 1.5) - assert res2 is not res tm.assert_index_equal(res2.columns, - pd.Index(list(self.frame.columns) + ['qux'])) + pd.Index(list(self.frame.columns))) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert res2.get_value('foobar', 'qux') == 1.5 @@ -1302,3 +1299,54 @@ def test_assign_with_sparse_frame(self): for column in res.columns: assert type(res[column]) is SparseSeries + + +def _test_assignment(kind, indexer, key=None): + arr = np.array([[1, nan], + [nan, 1]]) + df = DataFrame(arr, copy=True) + sdf = SparseDataFrame(arr, default_kind=kind).to_sparse(kind=kind) + + def get_indexer(df): + return getattr(df, indexer) if indexer else df + + if key is None: + key = pd.isnull(sdf).to_sparse() + + get_indexer(sdf)[key] = 2 + + get_indexer(df)[key] = 2 + res = df.to_sparse(kind=kind) + + tm.assert_sp_frame_equal(sdf, res) + + +@pytest.fixture(params=['integer', 'block']) +def spindex_kind(request): + return request.param + + +@pytest.mark.parametrize('indexer', ['iat']) +@pytest.mark.parametrize('key', [(0, 0)]) +def test_frame_assignment_at(spindex_kind, indexer, key): + _test_assignment(spindex_kind, indexer, key) + + +@pytest.mark.parametrize('indexer', ['at', 'loc', 'iloc']) +@pytest.mark.parametrize('key', [0, + [0, 1], + [True, False]]) +def test_frame_assignment_loc(spindex_kind, indexer, key): + _test_assignment(spindex_kind, indexer, key) + + +@pytest.mark.parametrize('key', [None, + [True, False]]) +def test_frame_assignment_setitem(spindex_kind, key): + _test_assignment(spindex_kind, None, key) + + +@pytest.mark.parametrize('indexer', ['loc', 'at']) +@pytest.mark.parametrize('key', [3]) +def test_frame_assignment_extend_index(spindex_kind, indexer, key): + _test_assignment(spindex_kind, indexer, key) diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 921c30234660f..916fca5d8182c 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -484,7 +484,6 @@ def test_get_get_value(self): self.bseries.get_value(10), self.bseries[10]) def test_set_value(self): - idx = self.btseries.index[7] with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): @@ -1456,3 +1455,29 @@ def test_constructor_dict_datetime64_index(datetime_type): expected = SparseSeries(values, map(pd.Timestamp, dates)) tm.assert_sp_series_equal(result, expected) + + +@pytest.mark.parametrize('kind', ['integer', 'block']) +@pytest.mark.parametrize('indexer', [None, 'loc', 'iloc', 'at', 'iat']) +@pytest.mark.parametrize('key', [0, [0, 1], 2, [2, 3], + np.r_[True, False, False, False], + np.r_[False, False, False, True]]) +def test_series_assignment(kind, indexer, key): + is_multikey = np.asarray(key).ndim > 0 + skip_multikey = 'at' in (indexer or '') + if is_multikey and skip_multikey: + return + + arr = np.array([0., 0., nan, nan]) + ss = SparseSeries(arr, kind=kind) + assert len(ss.sp_index.to_int_index().indices) == 2 + + res = arr.copy() + res[key] = 1 + res = SparseSeries(res, kind=kind) + + ss_setitem = getattr(ss, indexer) if indexer else ss + + ss_setitem[key] = 1 + + tm.assert_sp_series_equal(ss, res) diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py index 2790464e2f811..f82491e8ac7e1 100644 --- a/pandas/tests/sparse/test_array.py +++ b/pandas/tests/sparse/test_array.py @@ -928,3 +928,9 @@ def test_ufunc_args(self): sparse = SparseArray([1, -1, 0, -2], fill_value=0) result = SparseArray([2, 0, 1, -1], fill_value=1) tm.assert_sp_array_equal(np.add(sparse, 1), result) + + def test_tolist(self): + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + assert isinstance(sparse.tolist(), list) + tm.assert_numpy_array_equal(np.array(sparse.tolist()), + np.array([1, np.nan, 2, np.nan, -2])) diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/sparse/test_format.py index d983bd209085a..238d7e2f9be05 100644 --- a/pandas/tests/sparse/test_format.py +++ b/pandas/tests/sparse/test_format.py @@ -9,7 +9,6 @@ is_platform_32bit) from pandas.core.config import option_context - use_32bit_repr = is_platform_windows() or is_platform_32bit()
- [x] closes https://github.com/pandas-dev/pandas/issues/21818 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Works by as much as possible using `SparseBlock.setitem()` which calls into `SparseArray.set_values()`, which returns a new (replacement) `SparseArray` object.
https://api.github.com/repos/pandas-dev/pandas/pulls/17785
2017-10-04T16:31:31Z
2018-11-26T03:55:35Z
null
2018-11-26T03:55:35Z
API: Change str for CategoricalDtype to category
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 812bc2e031d78..1c9e876d77bf8 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -157,11 +157,10 @@ The values have been correctly interpreted as integers. The ``.dtype`` property of a ``Categorical``, ``CategoricalIndex`` or a ``Series`` with categorical type will now return an instance of -``CategoricalDtype``. For the most part, this is backwards compatible, though -the string repr has changed. If you were previously using ``str(s.dtype) == -'category'`` to detect categorical data, switch to -:func:`pandas.api.types.is_categorical_dtype`, which is compatible with the old -and new ``CategoricalDtype``. +``CategoricalDtype``. This change should be backwards compatible, though the +repr has changed. ``str(CategoricalDtype())`` is still the string +``'category'``, but the preferred way to detect categorical data is to use +:func:`pandas.api.types.is_categorical_dtype`. See the :ref:`CategoricalDtype docs <categorical.categoricaldtype>` for more. diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d2487905caced..4d97b7d17a6dc 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -220,7 +220,7 @@ def __eq__(self, other): # both unordered; this could probably be optimized / cached return hash(self) == hash(other) - def __unicode__(self): + def __repr__(self): tpl = u'CategoricalDtype(categories={}ordered={})' if self.categories is None: data = u"None, " diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index be3e5fdc467d3..0b9e2c9fe5ffc 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import re import pytest from itertools import product @@ -649,3 +650,10 @@ def test_from_categorical_dtype_both(self): result = CategoricalDtype._from_categorical_dtype( c1, categories=[1, 2], ordered=False) assert result == CategoricalDtype([1, 2], ordered=False) + + def test_str_vs_repr(self): + c1 = CategoricalDtype(['a', 'b']) + assert str(c1) == 'category' + # Py2 will have unicode prefixes + pat = r"CategoricalDtype\(categories=\[.*\], ordered=False\)" + assert re.match(pat, repr(c1)) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 9f5e4f2ac4b6e..6495d748e3823 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1784,7 +1784,8 @@ class TestNLargestNSmallest(object): # not supported on some archs # Series([3., 2, 1, 2, 5], dtype='complex256'), Series([3., 2, 1, 2, 5], dtype='complex128'), - Series(list('abcde'))]) + Series(list('abcde')), + Series(list('abcde'), dtype='category')]) def test_error(self, r): dt = r.dtype msg = ("Cannot use method 'n(larg|small)est' with " @@ -1795,16 +1796,6 @@ def test_error(self, r): with tm.assert_raises_regex(TypeError, msg): method(arg) - def test_error_categorical_dtype(self): - # same as test_error, but regex hard to escape properly - msg = ("Cannot use method 'n(larg|small)est' with dtype " - "CategoricalDtype.+") - with tm.assert_raises_regex(TypeError, msg): - Series(list('ab'), dtype='category').nlargest(2) - - with tm.assert_raises_regex(TypeError, msg): - Series(list('ab'), dtype='category').nsmallest(2) - @pytest.mark.parametrize( "s", [v for k, v in s_main_dtypes().iteritems()])
Better compatibility with older versions Closes #17782
https://api.github.com/repos/pandas-dev/pandas/pulls/17783
2017-10-04T14:58:31Z
2017-10-05T18:35:56Z
2017-10-05T18:35:55Z
2017-10-06T11:42:05Z
Use argument dtype to inform coercion
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c2cf6afc1a7b5..f3b11e52cdd7a 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -483,6 +483,39 @@ def infer_dtype_from_array(arr, pandas_dtype=False): return arr.dtype, arr +def maybe_infer_dtype_type(element): + """Try to infer an object's dtype, for use in arithmetic ops + + Uses `element.dtype` if that's available. + Objects implementing the iterator protocol are cast to a NumPy array, + and from there the array's type is used. + + Parameters + ---------- + element : object + Possibly has a `.dtype` attribute, and possibly the iterator + protocol. + + Returns + ------- + tipo : type + + Examples + -------- + >>> from collections import namedtuple + >>> Foo = namedtuple("Foo", "dtype") + >>> maybe_infer_dtype_type(Foo(np.dtype("i8"))) + numpy.int64 + """ + tipo = None + if hasattr(element, 'dtype'): + tipo = element.dtype + elif is_list_like(element): + element = np.asarray(element) + tipo = element.dtype + return tipo + + def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False): """ provide explict type promotion and coercion diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 1fddf985f0cdb..90de4ded18f8c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -44,7 +44,8 @@ soft_convert_objects, maybe_convert_objects, astype_nansafe, - find_common_type) + find_common_type, + maybe_infer_dtype_type) from pandas.core.dtypes.missing import ( isna, notna, array_equivalent, _isna_compat, @@ -629,10 +630,9 @@ def convert(self, copy=True, **kwargs): def _can_hold_element(self, element): """ require the same dtype as ourselves """ dtype = self.values.dtype.type - if is_list_like(element): - element = np.asarray(element) - tipo = element.dtype.type - return issubclass(tipo, dtype) + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, dtype) return isinstance(element, dtype) def _try_cast_result(self, result, dtype=None): @@ -1806,11 +1806,10 @@ class FloatBlock(FloatOrComplexBlock): _downcast_dtype = 'int64' def _can_hold_element(self, element): - if is_list_like(element): - element = np.asarray(element) - tipo = element.dtype.type - return (issubclass(tipo, (np.floating, np.integer)) and - not issubclass(tipo, (np.datetime64, np.timedelta64))) + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return (issubclass(tipo.type, (np.floating, np.integer)) and + not issubclass(tipo.type, (np.datetime64, np.timedelta64))) return (isinstance(element, (float, int, np.floating, np.int_)) and not isinstance(element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))) @@ -1856,9 +1855,9 @@ class ComplexBlock(FloatOrComplexBlock): is_complex = True def _can_hold_element(self, element): - if is_list_like(element): - element = np.array(element) - return issubclass(element.dtype.type, + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) return (isinstance(element, (float, int, complex, np.float_, np.int_)) and @@ -1874,12 +1873,12 @@ class IntBlock(NumericBlock): _can_hold_na = False def _can_hold_element(self, element): - if is_list_like(element): - element = np.array(element) - tipo = element.dtype.type - return (issubclass(tipo, np.integer) and - not issubclass(tipo, (np.datetime64, np.timedelta64)) and - self.dtype.itemsize >= element.dtype.itemsize) + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return (issubclass(tipo.type, np.integer) and + not issubclass(tipo.type, (np.datetime64, + np.timedelta64)) and + self.dtype.itemsize >= tipo.itemsize) return is_integer(element) def should_store(self, value): @@ -1917,10 +1916,9 @@ def _box_func(self): return lambda x: tslib.Timedelta(x, unit='ns') def _can_hold_element(self, element): - if is_list_like(element): - element = np.array(element) - tipo = element.dtype.type - return issubclass(tipo, np.timedelta64) + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.timedelta64) return isinstance(element, (timedelta, np.timedelta64)) def fillna(self, value, **kwargs): @@ -2018,9 +2016,9 @@ class BoolBlock(NumericBlock): _can_hold_na = False def _can_hold_element(self, element): - if is_list_like(element): - element = np.asarray(element) - return issubclass(element.dtype.type, np.bool_) + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.bool_) return isinstance(element, (bool, np.bool_)) def should_store(self, value): @@ -2450,7 +2448,9 @@ def _astype(self, dtype, mgr=None, **kwargs): return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) def _can_hold_element(self, element): - if is_list_like(element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + # TODO: this still uses asarray, instead of dtype.type element = np.array(element) return element.dtype == _NS_DTYPE or element.dtype == np.int64 return (is_integer(element) or isinstance(element, datetime) or diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index f40fc151676da..c182db35c0c89 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -2,6 +2,7 @@ # pylint: disable=W0102 from datetime import datetime, date +import operator import sys import pytest import numpy as np @@ -1213,3 +1214,64 @@ def assert_add_equals(val, inc, result): with pytest.raises(ValueError): BlockPlacement(slice(2, None, -1)).add(-1) + + +class DummyElement(object): + def __init__(self, value, dtype): + self.value = value + self.dtype = np.dtype(dtype) + + def __array__(self): + return np.array(self.value, dtype=self.dtype) + + def __str__(self): + return "DummyElement({}, {})".format(self.value, self.dtype) + + def __repr__(self): + return str(self) + + def astype(self, dtype, copy=False): + self.dtype = dtype + return self + + def view(self, dtype): + return type(self)(self.value.view(dtype), dtype) + + def any(self, axis=None): + return bool(self.value) + + +class TestCanHoldElement(object): + @pytest.mark.parametrize('value, dtype', [ + (1, 'i8'), + (1.0, 'f8'), + (1j, 'complex128'), + (True, 'bool'), + (np.timedelta64(20, 'ns'), '<m8[ns]'), + (np.datetime64(20, 'ns'), '<M8[ns]'), + ]) + @pytest.mark.parametrize('op', [ + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.mod, + operator.pow, + ], ids=lambda x: x.__name__) + def test_binop_other(self, op, value, dtype): + skip = {(operator.add, 'bool'), + (operator.sub, 'bool'), + (operator.mul, 'bool'), + (operator.truediv, 'bool'), + (operator.mod, 'i8'), + (operator.mod, 'complex128'), + (operator.mod, '<M8[ns]'), + (operator.mod, '<m8[ns]'), + (operator.pow, 'bool')} + if (op, dtype) in skip: + pytest.skip("Invalid combination {},{}".format(op, dtype)) + e = DummyElement(value, dtype) + s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype) + result = op(s, e).dtypes + expected = op(s, value).dtypes + assert_series_equal(result, expected)
Master: ```python >>> import dask.dataframe as dd >>> s = dd.core.Scalar({('s', 0): 10}, 's', 'i8') >>> pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7], ... 'b': [7, 6, 5, 4, 3, 2, 1]}) >>> (pdf + s).dtypes a object b object dtype: object ``` Head: ``` >>> (pdf + s).dtypes a int64 b int64 dtype: object ``` This is more consistent with 0.20.3, while keeping the benefits from https://github.com/pandas-dev/pandas/pull/16821 Closes https://github.com/pandas-dev/pandas/issues/17767 --- I may play around with this a bit more. I feel like this is a bit too numpy specific, for example, adding a `DataFrame` and a `pa.Scalar` still fails: ```python In [39]: x = pa.array([1])[0] In [40]: df = pd.DataFrame({"A": [1, 2]}) In [41]: df + x --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in eval(self, func, other, raise_on_error, try_cast, mgr) 1292 try: -> 1293 values, values_mask, other, other_mask = self._try_coerce_args( 1294 transf(values), other) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in _try_coerce_args(self, values, other) 677 raise TypeError("cannot convert {} to an {}".format( --> 678 type(other).__name__, 679 type(self).__name__.lower().replace('Block', ''))) TypeError: cannot convert Int64Value to an intblock During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/ops.py in na_op(x, y) 1199 result = expressions.evaluate(op, str_rep, x, y, -> 1200 raise_on_error=True, **eval_kwargs) 1201 except TypeError: ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/computation/expressions.py in evaluate(op, op_str, a, b, raise_on_error, use_numexpr, **eval_kwargs) 210 return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, --> 211 **eval_kwargs) 212 return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/computation/expressions.py in _evaluate_standard(op, op_str, a, b, raise_on_error, **eval_kwargs) 63 with np.errstate(all='ignore'): ---> 64 return op(a, b) 65 TypeError: unsupported operand type(s) for +: 'int' and 'pyarrow.lib.Int64Value' During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in eval(self, func, other, raise_on_error, try_cast, mgr) 1351 try: -> 1352 with np.errstate(all='ignore'): 1353 result = get_result(other) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in get_result(other) 1320 result = func(values, other) -> 1321 else: 1322 result = func(values, other) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/ops.py in na_op(x, y) 1225 with np.errstate(all='ignore'): -> 1226 result[mask] = op(xrav, y) 1227 else: TypeError: unsupported operand type(s) for +: 'int' and 'pyarrow.lib.Int64Value' During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-41-a01bb1dd67a6> in <module>() ----> 1 df + x ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/ops.py in f(self, other, axis, level, fill_value) 1263 self = self.fillna(fill_value) 1264 -> 1265 return self._combine_const(other, na_op) 1266 1267 f.__name__ = name ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/frame.py in _combine_const(self, other, func, raise_on_error, try_cast) 3836 new_data = self._data.eval(func=func, other=other, 3837 raise_on_error=raise_on_error, -> 3838 try_cast=try_cast) 3839 return self._constructor(new_data) 3840 ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in eval(self, **kwargs) 3375 return self.apply('where', **kwargs) 3376 -> 3377 def eval(self, **kwargs): 3378 return self.apply('eval', **kwargs) 3379 ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs) 3269 copy=align_copy) 3270 -> 3271 kwargs['mgr'] = self 3272 applied = getattr(b, f)(**kwargs) 3273 result_blocks = _extend_blocks(applied, result_blocks) ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in eval(self, func, other, raise_on_error, try_cast, mgr) 1296 block = self.coerce_to_target_dtype(orig_other) 1297 return block.eval(func, orig_other, -> 1298 raise_on_error=raise_on_error, 1299 try_cast=try_cast, mgr=mgr) 1300 ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in eval(self, func, other, raise_on_error, try_cast, mgr) 1357 except ValueError as detail: 1358 raise -> 1359 except Exception as detail: 1360 result = handle_error() 1361 ~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/internals.py in handle_error() 1340 if raise_on_error: 1341 # The 'detail' variable is defined in outer scope. -> 1342 raise TypeError('Could not operate %s with block values %s' % 1343 (repr(other), str(detail))) # noqa 1344 else: TypeError: Could not operate 1 with block values unsupported operand type(s) for +: 'int' and 'pyarrow.lib.Int64Value' ``` Do we want that to work?
https://api.github.com/repos/pandas-dev/pandas/pulls/17779
2017-10-04T13:03:33Z
2017-10-05T11:30:07Z
2017-10-05T11:30:07Z
2017-10-05T12:43:21Z
DEPR: Deprecate parse_cols in read_excel
diff --git a/doc/source/io.rst b/doc/source/io.rst index 8fe5685b33aff..0aa4ea72e3b13 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2800,21 +2800,21 @@ Parsing Specific Columns It is often the case that users will insert columns to do temporary computations in Excel and you may not want to read in those columns. `read_excel` takes -a `parse_cols` keyword to allow you to specify a subset of columns to parse. +a `usecols` keyword to allow you to specify a subset of columns to parse. -If `parse_cols` is an integer, then it is assumed to indicate the last column +If `usecols` is an integer, then it is assumed to indicate the last column to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', parse_cols=2) + read_excel('path_to_file.xls', 'Sheet1', usecols=2) -If `parse_cols` is a list of integers, then it is assumed to be the file column +If `usecols` is a list of integers, then it is assumed to be the file column indices to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', parse_cols=[0, 2, 3]) + read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) Parsing Dates diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 61c05d1b226e0..52b8437ec98b1 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -658,6 +658,7 @@ Deprecations ~~~~~~~~~~~~ - :func:`read_excel()` has deprecated ``sheetname`` in favor of ``sheet_name`` for consistency with ``.to_excel()`` (:issue:`10559`). +- :func:`read_excel()` has deprecated ``parse_cols`` in favor of ``usecols`` for consistency with :func:`read_csv` (:issue:`4988`) - The ``convert`` parameter has been deprecated in the ``.take()`` method, as it was not being respected (:issue:`16948`) - ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`). - :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`). diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 41e3b5283a532..c8d0e42a022ba 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -31,7 +31,7 @@ import pandas.compat.openpyxl_compat as openpyxl_compat from warnings import warn from distutils.version import LooseVersion -from pandas.util._decorators import Appender +from pandas.util._decorators import Appender, deprecate_kwarg from textwrap import fill __all__ = ["read_excel", "ExcelWriter", "ExcelFile"] @@ -86,7 +86,7 @@ Column (0-indexed) to use as the row labels of the DataFrame. Pass None if there is no such column. If a list is passed, those columns will be combined into a ``MultiIndex``. If a - subset of data is selected with ``parse_cols``, index_col + subset of data is selected with ``usecols``, index_col is based on the subset. names : array-like, default None List of column names to use. If file contains no header row, @@ -115,6 +115,10 @@ .. versionadded:: 0.19.0 parse_cols : int or list, default None + .. deprecated:: 0.21.0 + Pass in `usecols` instead. + +usecols : int or list, default None * If None then parse all columns, * If int then indicates last column to be parsed * If list of ints then indicates list of column numbers to be parsed @@ -205,8 +209,9 @@ def get_writer(engine_name): @Appender(_read_excel_doc) +@deprecate_kwarg("parse_cols", "usecols") def read_excel(io, sheet_name=0, header=0, skiprows=None, skip_footer=0, - index_col=None, names=None, parse_cols=None, parse_dates=False, + index_col=None, names=None, usecols=None, parse_dates=False, date_parser=None, na_values=None, thousands=None, convert_float=True, converters=None, dtype=None, true_values=None, false_values=None, engine=None, @@ -226,7 +231,7 @@ def read_excel(io, sheet_name=0, header=0, skiprows=None, skip_footer=0, return io._parse_excel( sheetname=sheet_name, header=header, skiprows=skiprows, names=names, - index_col=index_col, parse_cols=parse_cols, parse_dates=parse_dates, + index_col=index_col, usecols=usecols, parse_dates=parse_dates, date_parser=date_parser, na_values=na_values, thousands=thousands, convert_float=convert_float, skip_footer=skip_footer, converters=converters, dtype=dtype, true_values=true_values, @@ -295,7 +300,7 @@ def __fspath__(self): return self._io def parse(self, sheet_name=0, header=0, skiprows=None, skip_footer=0, - names=None, index_col=None, parse_cols=None, parse_dates=False, + names=None, index_col=None, usecols=None, parse_dates=False, date_parser=None, na_values=None, thousands=None, convert_float=True, converters=None, true_values=None, false_values=None, squeeze=False, **kwds): @@ -309,7 +314,7 @@ def parse(self, sheet_name=0, header=0, skiprows=None, skip_footer=0, return self._parse_excel(sheetname=sheet_name, header=header, skiprows=skiprows, names=names, index_col=index_col, - parse_cols=parse_cols, + usecols=usecols, parse_dates=parse_dates, date_parser=date_parser, na_values=na_values, thousands=thousands, @@ -321,7 +326,7 @@ def parse(self, sheet_name=0, header=0, skiprows=None, skip_footer=0, squeeze=squeeze, **kwds) - def _should_parse(self, i, parse_cols): + def _should_parse(self, i, usecols): def _range2cols(areas): """ @@ -347,15 +352,15 @@ def _excel2num(x): cols.append(_excel2num(rng)) return cols - if isinstance(parse_cols, int): - return i <= parse_cols - elif isinstance(parse_cols, compat.string_types): - return i in _range2cols(parse_cols) + if isinstance(usecols, int): + return i <= usecols + elif isinstance(usecols, compat.string_types): + return i in _range2cols(usecols) else: - return i in parse_cols + return i in usecols def _parse_excel(self, sheetname=0, header=0, skiprows=None, names=None, - skip_footer=0, index_col=None, parse_cols=None, + skip_footer=0, index_col=None, usecols=None, parse_dates=False, date_parser=None, na_values=None, thousands=None, convert_float=True, true_values=None, false_values=None, verbose=False, dtype=None, @@ -470,10 +475,10 @@ def _parse_cell(cell_contents, cell_typ): row = [] for j, (value, typ) in enumerate(zip(sheet.row_values(i), sheet.row_types(i))): - if parse_cols is not None and j not in should_parse: - should_parse[j] = self._should_parse(j, parse_cols) + if usecols is not None and j not in should_parse: + should_parse[j] = self._should_parse(j, usecols) - if parse_cols is None or should_parse[j]: + if usecols is None or should_parse[j]: row.append(_parse_cell(value, typ)) data.append(row) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 4e25fe0371718..f21f638799e57 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -158,56 +158,74 @@ def setup_method(self, method): self.check_skip() super(ReadingTestsBase, self).setup_method(method) - def test_parse_cols_int(self): + def test_usecols_int(self): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['A', 'B', 'C']) - df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, parse_cols=3) + df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, usecols=3) df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, - parse_cols=3) + usecols=3) + + with tm.assert_produces_warning(FutureWarning): + df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], + index_col=0, parse_cols=3) + # TODO add index to xls file) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) + tm.assert_frame_equal(df3, dfref, check_names=False) - def test_parse_cols_list(self): + def test_usecols_list(self): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['B', 'C']) df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, - parse_cols=[0, 2, 3]) + usecols=[0, 2, 3]) df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, - parse_cols=[0, 2, 3]) + usecols=[0, 2, 3]) + + with tm.assert_produces_warning(FutureWarning): + df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], + index_col=0, parse_cols=[0, 2, 3]) + # TODO add index to xls file) tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) + tm.assert_frame_equal(df3, dfref, check_names=False) - def test_parse_cols_str(self): + def test_usecols_str(self): dfref = self.get_csv_refdf('test1') df1 = dfref.reindex(columns=['A', 'B', 'C']) df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, - parse_cols='A:D') + usecols='A:D') df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, - parse_cols='A:D') + usecols='A:D') + + with tm.assert_produces_warning(FutureWarning): + df4 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], + index_col=0, parse_cols='A:D') + # TODO add index to xls, read xls ignores index name ? tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) + tm.assert_frame_equal(df4, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, - parse_cols='A,C,D') + usecols='A,C,D') df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, - parse_cols='A,C,D') + usecols='A,C,D') # TODO add index to xls file tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, - parse_cols='A,C:D') + usecols='A,C:D') df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, - parse_cols='A,C:D') + usecols='A,C:D') tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) @@ -457,14 +475,14 @@ def test_read_one_empty_col_no_header(self): actual_header_none = read_excel( path, 'no_header', - parse_cols=[0], + usecols=[0], header=None ) actual_header_zero = read_excel( path, 'no_header', - parse_cols=[0], + usecols=[0], header=0 ) expected = DataFrame() @@ -486,14 +504,14 @@ def test_read_one_empty_col_with_header(self): actual_header_none = read_excel( path, 'with_header', - parse_cols=[0], + usecols=[0], header=None ) actual_header_zero = read_excel( path, 'with_header', - parse_cols=[0], + usecols=[0], header=0 ) expected_header_none = DataFrame(pd.Series([0], dtype='int64'))
Will now use "usecols" just like in read_csv. closes #4988.
https://api.github.com/repos/pandas-dev/pandas/pulls/17774
2017-10-04T06:31:07Z
2017-10-04T23:56:21Z
2017-10-04T23:56:21Z
2017-10-05T03:37:43Z
ERR: Clarify exceptions for invalid datetimelike operations
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index c3232627fce74..d5b4525e8a1eb 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -621,7 +621,9 @@ def _convert_scalar_indexer(self, key, kind=None): ._convert_scalar_indexer(key, kind=kind)) def _add_datelike(self, other): - raise AbstractMethodError(self) + raise TypeError("cannot add {0} and {1}" + .format(type(self).__name__, + type(other).__name__)) def _sub_datelike(self, other): raise AbstractMethodError(self) @@ -647,16 +649,13 @@ def __add__(self, other): return other._add_delta(self) raise TypeError("cannot add TimedeltaIndex and {typ}" .format(typ=type(other))) - elif isinstance(other, Index): - raise TypeError("cannot add {typ1} and {typ2}" - .format(typ1=type(self).__name__, - typ2=type(other).__name__)) elif isinstance(other, (DateOffset, timedelta, np.timedelta64, Timedelta)): return self._add_delta(other) elif is_integer(other): return self.shift(other) - elif isinstance(other, (Timestamp, datetime)): + elif isinstance(other, (Index, Timestamp, datetime, + np.datetime64)): return self._add_datelike(other) else: # pragma: no cover return NotImplemented diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9127864eab8a1..ef3af2acf46a7 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -747,7 +747,9 @@ def _add_datelike(self, other): # adding a timedeltaindex to a datetimelike if other is libts.NaT: return self._nat_new(box=True) - raise TypeError("cannot add a datelike to a DatetimeIndex") + raise TypeError("cannot add {0} and {1}" + .format(type(self).__name__, + type(other).__name__)) def _sub_datelike(self, other): # subtract a datetime from myself, yielding a TimedeltaIndex diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 86e65feec04f3..f0f9b8ff1fc6d 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -434,31 +434,51 @@ def test_add_iadd(self): tm.assert_index_equal(rng, expected) idx = DatetimeIndex(['2011-01-01', '2011-01-02']) - msg = "cannot add a datelike to a DatetimeIndex" + msg = "cannot add DatetimeIndex and Timestamp" with tm.assert_raises_regex(TypeError, msg): idx + Timestamp('2011-01-01') with tm.assert_raises_regex(TypeError, msg): Timestamp('2011-01-01') + idx - def test_add_dti_dti(self): - # previously performed setop (deprecated in 0.16.0), now raises - # TypeError (GH14164) - - dti = date_range('20130101', periods=3) - dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') - - with pytest.raises(TypeError): - dti + dti - - with pytest.raises(TypeError): - dti_tz + dti_tz - - with pytest.raises(TypeError): - dti_tz + dti - - with pytest.raises(TypeError): - dti + dti_tz + @pytest.mark.parametrize('addend', [ + datetime(2011, 1, 1), + DatetimeIndex(['2011-01-01', '2011-01-02']), + DatetimeIndex(['2011-01-01', '2011-01-02']) + .tz_localize('US/Eastern'), + np.datetime64('2011-01-01'), + Timestamp('2011-01-01'), + ]) + def test_add_datetimelike_and_dti(self, addend): + # issue #9631 + + dti = DatetimeIndex(['2011-01-01', '2011-01-02']) + msg = 'cannot add DatetimeIndex and {0}'.format( + type(addend).__name__) + with tm.assert_raises_regex(TypeError, msg): + dti + addend + with tm.assert_raises_regex(TypeError, msg): + addend + dti + + @pytest.mark.parametrize('addend', [ + datetime(2011, 1, 1), + DatetimeIndex(['2011-01-01', '2011-01-02']), + DatetimeIndex(['2011-01-01', '2011-01-02']) + .tz_localize('US/Eastern'), + np.datetime64('2011-01-01'), + Timestamp('2011-01-01'), + ]) + def test_add_datetimelike_and_dti_tz(self, addend): + # issue #9631 + + dti_tz = DatetimeIndex(['2011-01-01', '2011-01-02']) \ + .tz_localize('US/Eastern') + msg = 'cannot add DatetimeIndex and {0}'.format( + type(addend).__name__) + with tm.assert_raises_regex(TypeError, msg): + dti_tz + addend + with tm.assert_raises_regex(TypeError, msg): + addend + dti_tz def test_difference(self): for tz in self.tz:
closes #9631
https://api.github.com/repos/pandas-dev/pandas/pulls/17772
2017-10-03T23:45:01Z
2017-10-08T16:15:52Z
2017-10-08T16:15:52Z
2017-10-08T16:15:55Z
Move fields functions out of the way
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 096ebe9a5627b..9fd1f367d013d 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -90,6 +90,9 @@ from tslibs.timezones cimport ( get_timezone, get_utcoffset, maybe_get_tz, get_dst_info ) +from tslibs.fields import ( + get_date_name_field, get_start_end_field, get_date_field, + build_field_sarray) cdef inline object create_timestamp_from_ts( @@ -3830,48 +3833,6 @@ cdef inline bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n): # Accessors #---------------------------------------------------------------------- -def build_field_sarray(ndarray[int64_t] dtindex): - """ - Datetime as int64 representation to a structured array of fields - """ - cdef: - Py_ssize_t i, count = 0 - pandas_datetimestruct dts - ndarray[int32_t] years, months, days, hours, minutes, seconds, mus - - count = len(dtindex) - - sa_dtype = [('Y', 'i4'), # year - ('M', 'i4'), # month - ('D', 'i4'), # day - ('h', 'i4'), # hour - ('m', 'i4'), # min - ('s', 'i4'), # second - ('u', 'i4')] # microsecond - - out = np.empty(count, dtype=sa_dtype) - - years = out['Y'] - months = out['M'] - days = out['D'] - hours = out['h'] - minutes = out['m'] - seconds = out['s'] - mus = out['u'] - - for i in range(count): - pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts) - years[i] = dts.year - months[i] = dts.month - days[i] = dts.day - hours[i] = dts.hour - minutes[i] = dts.min - seconds[i] = dts.sec - mus[i] = dts.us - - return out - - def get_time_micros(ndarray[int64_t] dtindex): """ Datetime as int64 representation to a structured array of fields @@ -3891,453 +3852,6 @@ def get_time_micros(ndarray[int64_t] dtindex): return micros -@cython.wraparound(False) -@cython.boundscheck(False) -def get_date_field(ndarray[int64_t] dtindex, object field): - """ - Given a int64-based datetime index, extract the year, month, etc., - field and return an array of these values. - """ - cdef: - Py_ssize_t i, count = 0 - ndarray[int32_t] out - ndarray[int32_t, ndim=2] _month_offset - int isleap, isleap_prev - pandas_datetimestruct dts - int mo_off, doy, dow, woy - - _month_offset = np.array( - [[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], - [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]], - dtype=np.int32 ) - - count = len(dtindex) - out = np.empty(count, dtype='i4') - - if field == 'Y': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.year - return out - - elif field == 'M': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.month - return out - - elif field == 'D': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.day - return out - - elif field == 'h': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.hour - return out - - elif field == 'm': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.min - return out - - elif field == 's': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.sec - return out - - elif field == 'us': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.us - return out - - elif field == 'ns': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.ps / 1000 - return out - elif field == 'doy': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - out[i] = _month_offset[isleap, dts.month -1] + dts.day - return out - - elif field == 'dow': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dayofweek(dts.year, dts.month, dts.day) - return out - - elif field == 'woy': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - isleap_prev = is_leapyear(dts.year - 1) - mo_off = _month_offset[isleap, dts.month - 1] - doy = mo_off + dts.day - dow = dayofweek(dts.year, dts.month, dts.day) - - #estimate - woy = (doy - 1) - dow + 3 - if woy >= 0: - woy = woy / 7 + 1 - - # verify - if woy < 0: - if (woy > -2) or (woy == -2 and isleap_prev): - woy = 53 - else: - woy = 52 - elif woy == 53: - if 31 - dts.day + dow < 3: - woy = 1 - - out[i] = woy - return out - - elif field == 'q': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = dts.month - out[i] = ((out[i] - 1) / 3) + 1 - return out - - elif field == 'dim': - with nogil: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = -1; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - out[i] = days_in_month(dts) - return out - elif field == 'is_leap_year': - return _isleapyear_arr(get_date_field(dtindex, 'Y')) - - raise ValueError("Field %s not supported" % field) - - -@cython.wraparound(False) -def get_start_end_field(ndarray[int64_t] dtindex, object field, - object freqstr=None, int month_kw=12): - """ - Given an int64-based datetime index return array of indicators - of whether timestamps are at the start/end of the month/quarter/year - (defined by frequency). - """ - cdef: - Py_ssize_t i - int count = 0 - bint is_business = 0 - int end_month = 12 - int start_month = 1 - ndarray[int8_t] out - ndarray[int32_t, ndim=2] _month_offset - bint isleap - pandas_datetimestruct dts - int mo_off, dom, doy, dow, ldom - - _month_offset = np.array( - [[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], - [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]], - dtype=np.int32 ) - - count = len(dtindex) - out = np.zeros(count, dtype='int8') - - if freqstr: - if freqstr == 'C': - raise ValueError( - "Custom business days is not supported by %s" % field) - is_business = freqstr[0] == 'B' - - # YearBegin(), BYearBegin() use month = starting month of year. - # QuarterBegin(), BQuarterBegin() use startingMonth = starting - # month of year. Other offests use month, startingMonth as ending - # month of year. - - if (freqstr[0:2] in ['MS', 'QS', 'AS']) or ( - freqstr[1:3] in ['MS', 'QS', 'AS']): - end_month = 12 if month_kw == 1 else month_kw - 1 - start_month = month_kw - else: - end_month = month_kw - start_month = (end_month % 12) + 1 - else: - end_month = 12 - start_month = 1 - - if field == 'is_month_start': - if is_business: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - dom = dts.day - dow = dayofweek(dts.year, dts.month, dts.day) - - if (dom == 1 and dow < 5) or (dom <= 3 and dow == 0): - out[i] = 1 - return out.view(bool) - else: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - dom = dts.day - - if dom == 1: - out[i] = 1 - return out.view(bool) - - elif field == 'is_month_end': - if is_business: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] - dow = dayofweek(dts.year, dts.month, dts.day) - - if (ldom == doy and dow < 5) or ( - dow == 4 and (ldom - doy <= 2)): - out[i] = 1 - return out.view(bool) - else: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] - - if ldom == doy: - out[i] = 1 - return out.view(bool) - - elif field == 'is_quarter_start': - if is_business: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - dom = dts.day - dow = dayofweek(dts.year, dts.month, dts.day) - - if ((dts.month - start_month) % 3 == 0) and ( - (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): - out[i] = 1 - return out.view(bool) - else: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - dom = dts.day - - if ((dts.month - start_month) % 3 == 0) and dom == 1: - out[i] = 1 - return out.view(bool) - - elif field == 'is_quarter_end': - if is_business: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] - dow = dayofweek(dts.year, dts.month, dts.day) - - if ((dts.month - end_month) % 3 == 0) and ( - (ldom == doy and dow < 5) or ( - dow == 4 and (ldom - doy <= 2))): - out[i] = 1 - return out.view(bool) - else: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] - - if ((dts.month - end_month) % 3 == 0) and (ldom == doy): - out[i] = 1 - return out.view(bool) - - elif field == 'is_year_start': - if is_business: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - dom = dts.day - dow = dayofweek(dts.year, dts.month, dts.day) - - if (dts.month == start_month) and ( - (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): - out[i] = 1 - return out.view(bool) - else: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - dom = dts.day - - if (dts.month == start_month) and dom == 1: - out[i] = 1 - return out.view(bool) - - elif field == 'is_year_end': - if is_business: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - dom = dts.day - mo_off = _month_offset[isleap, dts.month - 1] - doy = mo_off + dom - dow = dayofweek(dts.year, dts.month, dts.day) - ldom = _month_offset[isleap, dts.month] - - if (dts.month == end_month) and ( - (ldom == doy and dow < 5) or ( - dow == 4 and (ldom - doy <= 2))): - out[i] = 1 - return out.view(bool) - else: - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = 0; continue - - pandas_datetime_to_datetimestruct( - dtindex[i], PANDAS_FR_ns, &dts) - isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] - - if (dts.month == end_month) and (ldom == doy): - out[i] = 1 - return out.view(bool) - - raise ValueError("Field %s not supported" % field) - - -@cython.wraparound(False) -@cython.boundscheck(False) -def get_date_name_field(ndarray[int64_t] dtindex, object field): - """ - Given a int64-based datetime index, return array of strings of date - name based on requested field (e.g. weekday_name) - """ - cdef: - Py_ssize_t i, count = 0 - ndarray[object] out - pandas_datetimestruct dts - int dow - - _dayname = np.array( - ['Monday', 'Tuesday', 'Wednesday', 'Thursday', - 'Friday', 'Saturday', 'Sunday'], - dtype=np.object_ ) - - count = len(dtindex) - out = np.empty(count, dtype=object) - - if field == 'weekday_name': - for i in range(count): - if dtindex[i] == NPY_NAT: out[i] = np.nan; continue - - pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts) - dow = dayofweek(dts.year, dts.month, dts.day) - out[i] = _dayname[dow] - return out - - raise ValueError("Field %s not supported" % field) - - cdef int64_t DAY_NS = 86400000000000LL @@ -4471,18 +3985,6 @@ def dates_normalized(ndarray[int64_t] stamps, tz=None): #---------------------------------------------------------------------- -cpdef _isleapyear_arr(ndarray years): - cdef: - ndarray[int8_t] out - - # to make NaT result as False - out = np.zeros(len(years), dtype='int8') - out[np.logical_or(years % 400 == 0, - np.logical_and(years % 4 == 0, - years % 100 > 0))] = 1 - return out.view(bool) - - def monthrange(int64_t year, int64_t month): cdef: int64_t days diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx new file mode 100644 index 0000000000000..3ea414b2d4a70 --- /dev/null +++ b/pandas/_libs/tslibs/fields.pyx @@ -0,0 +1,537 @@ +# -*- coding: utf-8 -*- +# cython: profile=False +# cython: linetrace=False +# distutils: define_macros=CYTHON_TRACE=0 +# distutils: define_macros=CYTHON_TRACE_NOGIL=0 +""" +Functions for accessing attributes of Timestamp/datetime64/datetime-like +objects and arrays +""" + +cimport cython +from cython cimport Py_ssize_t + +import numpy as np +cimport numpy as np +from numpy cimport ndarray, int64_t, int32_t, int8_t +np.import_array() + + +from datetime cimport ( + pandas_datetimestruct, + pandas_datetime_to_datetimestruct, + PANDAS_FR_ns, + days_per_month_table, + is_leapyear, + dayofweek) + +cimport util + +cdef int64_t NPY_NAT = util.get_nat() + + +def build_field_sarray(ndarray[int64_t] dtindex): + """ + Datetime as int64 representation to a structured array of fields + """ + cdef: + Py_ssize_t i, count = 0 + pandas_datetimestruct dts + ndarray[int32_t] years, months, days, hours, minutes, seconds, mus + + count = len(dtindex) + + sa_dtype = [('Y', 'i4'), # year + ('M', 'i4'), # month + ('D', 'i4'), # day + ('h', 'i4'), # hour + ('m', 'i4'), # min + ('s', 'i4'), # second + ('u', 'i4')] # microsecond + + out = np.empty(count, dtype=sa_dtype) + + years = out['Y'] + months = out['M'] + days = out['D'] + hours = out['h'] + minutes = out['m'] + seconds = out['s'] + mus = out['u'] + + for i in range(count): + pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts) + years[i] = dts.year + months[i] = dts.month + days[i] = dts.day + hours[i] = dts.hour + minutes[i] = dts.min + seconds[i] = dts.sec + mus[i] = dts.us + + return out + + +@cython.wraparound(False) +@cython.boundscheck(False) +def get_date_name_field(ndarray[int64_t] dtindex, object field): + """ + Given a int64-based datetime index, return array of strings of date + name based on requested field (e.g. weekday_name) + """ + cdef: + Py_ssize_t i, count = 0 + ndarray[object] out + pandas_datetimestruct dts + int dow + + _dayname = np.array( + ['Monday', 'Tuesday', 'Wednesday', 'Thursday', + 'Friday', 'Saturday', 'Sunday'], + dtype=np.object_) + + count = len(dtindex) + out = np.empty(count, dtype=object) + + if field == 'weekday_name': + for i in range(count): + if dtindex[i] == NPY_NAT: + out[i] = np.nan + continue + + pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts) + dow = dayofweek(dts.year, dts.month, dts.day) + out[i] = _dayname[dow] + return out + + raise ValueError("Field %s not supported" % field) + + +@cython.wraparound(False) +def get_start_end_field(ndarray[int64_t] dtindex, object field, + object freqstr=None, int month_kw=12): + """ + Given an int64-based datetime index return array of indicators + of whether timestamps are at the start/end of the month/quarter/year + (defined by frequency). + """ + cdef: + Py_ssize_t i + int count = 0 + bint is_business = 0 + int end_month = 12 + int start_month = 1 + ndarray[int8_t] out + ndarray[int32_t, ndim=2] _month_offset + bint isleap + pandas_datetimestruct dts + int mo_off, dom, doy, dow, ldom + + _month_offset = np.array( + [[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], + [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]], + dtype=np.int32) + + count = len(dtindex) + out = np.zeros(count, dtype='int8') + + if freqstr: + if freqstr == 'C': + raise ValueError( + "Custom business days is not supported by %s" % field) + is_business = freqstr[0] == 'B' + + # YearBegin(), BYearBegin() use month = starting month of year. + # QuarterBegin(), BQuarterBegin() use startingMonth = starting + # month of year. Other offests use month, startingMonth as ending + # month of year. + + if (freqstr[0:2] in ['MS', 'QS', 'AS']) or ( + freqstr[1:3] in ['MS', 'QS', 'AS']): + end_month = 12 if month_kw == 1 else month_kw - 1 + start_month = month_kw + else: + end_month = month_kw + start_month = (end_month % 12) + 1 + else: + end_month = 12 + start_month = 1 + + if field == 'is_month_start': + if is_business: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + dom = dts.day + dow = dayofweek(dts.year, dts.month, dts.day) + + if (dom == 1 and dow < 5) or (dom <= 3 and dow == 0): + out[i] = 1 + return out.view(bool) + else: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + dom = dts.day + + if dom == 1: + out[i] = 1 + return out.view(bool) + + elif field == 'is_month_end': + if is_business: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + mo_off = _month_offset[isleap, dts.month - 1] + dom = dts.day + doy = mo_off + dom + ldom = _month_offset[isleap, dts.month] + dow = dayofweek(dts.year, dts.month, dts.day) + + if (ldom == doy and dow < 5) or ( + dow == 4 and (ldom - doy <= 2)): + out[i] = 1 + return out.view(bool) + else: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + mo_off = _month_offset[isleap, dts.month - 1] + dom = dts.day + doy = mo_off + dom + ldom = _month_offset[isleap, dts.month] + + if ldom == doy: + out[i] = 1 + return out.view(bool) + + elif field == 'is_quarter_start': + if is_business: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + dom = dts.day + dow = dayofweek(dts.year, dts.month, dts.day) + + if ((dts.month - start_month) % 3 == 0) and ( + (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): + out[i] = 1 + return out.view(bool) + else: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + dom = dts.day + + if ((dts.month - start_month) % 3 == 0) and dom == 1: + out[i] = 1 + return out.view(bool) + + elif field == 'is_quarter_end': + if is_business: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + mo_off = _month_offset[isleap, dts.month - 1] + dom = dts.day + doy = mo_off + dom + ldom = _month_offset[isleap, dts.month] + dow = dayofweek(dts.year, dts.month, dts.day) + + if ((dts.month - end_month) % 3 == 0) and ( + (ldom == doy and dow < 5) or ( + dow == 4 and (ldom - doy <= 2))): + out[i] = 1 + return out.view(bool) + else: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + mo_off = _month_offset[isleap, dts.month - 1] + dom = dts.day + doy = mo_off + dom + ldom = _month_offset[isleap, dts.month] + + if ((dts.month - end_month) % 3 == 0) and (ldom == doy): + out[i] = 1 + return out.view(bool) + + elif field == 'is_year_start': + if is_business: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + dom = dts.day + dow = dayofweek(dts.year, dts.month, dts.day) + + if (dts.month == start_month) and ( + (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): + out[i] = 1 + return out.view(bool) + else: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + dom = dts.day + + if (dts.month == start_month) and dom == 1: + out[i] = 1 + return out.view(bool) + + elif field == 'is_year_end': + if is_business: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + dom = dts.day + mo_off = _month_offset[isleap, dts.month - 1] + doy = mo_off + dom + dow = dayofweek(dts.year, dts.month, dts.day) + ldom = _month_offset[isleap, dts.month] + + if (dts.month == end_month) and ( + (ldom == doy and dow < 5) or ( + dow == 4 and (ldom - doy <= 2))): + out[i] = 1 + return out.view(bool) + else: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = 0; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + mo_off = _month_offset[isleap, dts.month - 1] + dom = dts.day + doy = mo_off + dom + ldom = _month_offset[isleap, dts.month] + + if (dts.month == end_month) and (ldom == doy): + out[i] = 1 + return out.view(bool) + + raise ValueError("Field %s not supported" % field) + + +@cython.wraparound(False) +@cython.boundscheck(False) +def get_date_field(ndarray[int64_t] dtindex, object field): + """ + Given a int64-based datetime index, extract the year, month, etc., + field and return an array of these values. + """ + cdef: + Py_ssize_t i, count = 0 + ndarray[int32_t] out + ndarray[int32_t, ndim=2] _month_offset + int isleap, isleap_prev + pandas_datetimestruct dts + int mo_off, doy, dow, woy + + _month_offset = np.array( + [[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], + [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]], + dtype=np.int32 ) + + count = len(dtindex) + out = np.empty(count, dtype='i4') + + if field == 'Y': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.year + return out + + elif field == 'M': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.month + return out + + elif field == 'D': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.day + return out + + elif field == 'h': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.hour + return out + + elif field == 'm': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.min + return out + + elif field == 's': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.sec + return out + + elif field == 'us': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.us + return out + + elif field == 'ns': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.ps / 1000 + return out + elif field == 'doy': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + out[i] = _month_offset[isleap, dts.month -1] + dts.day + return out + + elif field == 'dow': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dayofweek(dts.year, dts.month, dts.day) + return out + + elif field == 'woy': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + isleap = is_leapyear(dts.year) + isleap_prev = is_leapyear(dts.year - 1) + mo_off = _month_offset[isleap, dts.month - 1] + doy = mo_off + dts.day + dow = dayofweek(dts.year, dts.month, dts.day) + + #estimate + woy = (doy - 1) - dow + 3 + if woy >= 0: + woy = woy / 7 + 1 + + # verify + if woy < 0: + if (woy > -2) or (woy == -2 and isleap_prev): + woy = 53 + else: + woy = 52 + elif woy == 53: + if 31 - dts.day + dow < 3: + woy = 1 + + out[i] = woy + return out + + elif field == 'q': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = dts.month + out[i] = ((out[i] - 1) / 3) + 1 + return out + + elif field == 'dim': + with nogil: + for i in range(count): + if dtindex[i] == NPY_NAT: out[i] = -1; continue + + pandas_datetime_to_datetimestruct( + dtindex[i], PANDAS_FR_ns, &dts) + out[i] = days_in_month(dts) + return out + elif field == 'is_leap_year': + return isleapyear_arr(get_date_field(dtindex, 'Y')) + + raise ValueError("Field %s not supported" % field) + + +cdef inline int days_in_month(pandas_datetimestruct dts) nogil: + return days_per_month_table[is_leapyear(dts.year)][dts.month -1] + + +cpdef isleapyear_arr(ndarray years): + """vectorized version of isleapyear; NaT evaluates as False""" + cdef: + ndarray[int8_t] out + + out = np.zeros(len(years), dtype='int8') + out[np.logical_or(years % 400 == 0, + np.logical_and(years % 4 == 0, + years % 100 > 0))] = 1 + return out.view(bool) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index fb47d1db48610..e6fc47845012a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -35,6 +35,7 @@ from pandas._libs.period import (Period, IncompatibleFrequency, get_period_field_arr, _validate_end_alias, _quarter_to_myear) +from pandas._libs.tslibs.fields import isleapyear_arr from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs, _ensure_index @@ -589,7 +590,7 @@ def to_datetime(self, dayfirst=False): @property def is_leap_year(self): """ Logical indicating if the date belongs to a leap year """ - return tslib._isleapyear_arr(np.asarray(self.year)) + return isleapyear_arr(np.asarray(self.year)) @property def start_time(self): diff --git a/setup.py b/setup.py index 793aa089e708f..f58f8cbb5519d 100755 --- a/setup.py +++ b/setup.py @@ -343,6 +343,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/parsers.pyx', 'pandas/_libs/tslibs/strptime.pyx', 'pandas/_libs/tslibs/timezones.pyx', + 'pandas/_libs/tslibs/fields.pyx', 'pandas/_libs/tslibs/frequencies.pyx', 'pandas/_libs/tslibs/parsing.pyx', 'pandas/io/sas/sas.pyx'] @@ -486,6 +487,10 @@ def pxd(name): 'sources': ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c']}, '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'}, + '_libs.tslibs.fields': {'pyxfile': '_libs/tslibs/fields', + 'depends': tseries_depends, + 'sources': ['pandas/_libs/src/datetime/np_datetime.c', + 'pandas/_libs/src/datetime/np_datetime_strings.c']}, '_libs.period': {'pyxfile': '_libs/period', 'depends': (tseries_depends + ['pandas/_libs/src/period_helper.h']),
Notes: - The only usage in `tslib` of `get_date_name_field` is for `Timestamp.weekday` and can be simplified with a more direct call. - Several of the functions from `tslibs.fields` imported into `tslib` are unused. Updating the external imports to use the new locations is for a follow-up. - There is some flake8 cleanup to be done in the the new `tslibs.fields`, but that is being saved for a follow-up. For now this is just cut/paste. - `get_start_end_field` will be largely unnecessary if/when the suggestion to deprecate Timestamp.freq in #15146 is adopted. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17770
2017-10-03T22:13:45Z
2017-10-04T23:53:51Z
2017-10-04T23:53:51Z
2017-10-30T16:22:42Z
DOC: Add column name metadata to spec
diff --git a/doc/source/developer.rst b/doc/source/developer.rst index 78c12b7e23b37..a695366d9ada3 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -45,6 +45,8 @@ So that a ``pandas.DataFrame`` can be faithfully reconstructed, we store a .. code-block:: text {'index_columns': ['__index_level_0__', '__index_level_1__', ...], + 'column_index_names': [<column index level name 0>, <column index level name 1>, ...], + 'column_index_dtypes': [<dtype 0>, <dtype 1>, ..., <dtype N>] 'columns': [<c0>, <c1>, ...], 'pandas_version': $VERSION} @@ -106,6 +108,8 @@ As an example of fully-formed metadata: .. code-block:: text {'index_columns': ['__index_level_0__'], + 'column_index_names': [None], + 'column_index_dtypes': ['object'], 'columns': [ {'name': 'c0', 'pandas_type': 'int8',
This adds an additional metadata field to the spec to allow faithful reproduction of names of column indexes. cc @wesm @martindurant @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/17769
2017-10-03T19:44:20Z
2017-10-04T23:55:18Z
2017-10-04T23:55:18Z
2017-10-05T16:47:04Z
Revert "CI: pin pytables to valid build (#17760)"
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index 721d0c1ad8101..822144a80bc9a 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -7,7 +7,7 @@ xlsxwriter xlrd xlwt numexpr -pytables=3.4.2=np113py36_1 +pytables matplotlib lxml html5lib
This reverts commit 6d30d5f425ddfaf143b8bd878f81395852b50cd9. closes #17757
https://api.github.com/repos/pandas-dev/pandas/pulls/17768
2017-10-03T19:40:49Z
2017-10-05T18:35:20Z
2017-10-05T18:35:20Z
2017-10-05T18:37:13Z
BUG: Validate the justify parameter in to_html
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 0d4eaa90d7ab3..2470c04fb97e8 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -777,7 +777,8 @@ I/O - Bug in :func:`read_stata` where the index was not set (:issue:`16342`) - Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`) - Bug in :func:`read_csv` where automatic delimiter detection caused a ``TypeError`` to be thrown when a bad line was encountered rather than the correct error message (:issue:`13374`) -- Bug in ``DataFrame.to_html()`` with ``notebook=True`` where DataFrames with named indices or non-MultiIndex indices had undesired horizontal or vertical alignment for column or row labels, respectively (:issue:`16792`) +- Bug in :meth:`DataFrame.to_html` with ``notebook=True`` where DataFrames with named indices or non-MultiIndex indices had undesired horizontal or vertical alignment for column or row labels, respectively (:issue:`16792`) +- Bug in :meth:`DataFrame.to_html` in which there was no validation of the ``justify`` parameter (:issue:`17527`) - Bug in :func:`HDFStore.select` when reading a contiguous mixed-data table featuring VLArray (:issue:`17021`) Plotting diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 01e83821d4524..778a3dc9046a3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1685,6 +1685,10 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True, .. versionadded:: 0.19.0 """ + if (justify is not None and + justify not in fmt._VALID_JUSTIFY_PARAMETERS): + raise ValueError("Invalid value for justify parameter") + formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 386d9c3ffe30d..e8ea0714b1dda 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -79,9 +79,15 @@ line_width : int, optional Width to wrap a line in characters, default no wrap""" +_VALID_JUSTIFY_PARAMETERS = ("left", "right", "center", "justify", + "justify-all", "start", "end", "inherit", + "match-parent", "initial", "unset") + justify_docstring = """ - justify : {'left', 'right'}, default None - Left or right-justify the column labels. If None uses the option from + justify : {'left', 'right', 'center', 'justify', + 'justify-all', 'start', 'end', 'inherit', + 'match-parent', 'initial', 'unset'}, default None + How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box.""" diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 194b5ba3e0276..0c8ea98a44d50 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -1557,15 +1557,16 @@ def test_to_html_multiindex(self): assert result == expected - def test_to_html_justify(self): + @pytest.mark.parametrize("justify", fmt._VALID_JUSTIFY_PARAMETERS) + def test_to_html_justify(self, justify): df = DataFrame({'A': [6, 30000, 2], 'B': [1, 2, 70000], 'C': [223442, 0, 1]}, columns=['A', 'B', 'C']) - result = df.to_html(justify='left') + result = df.to_html(justify=justify) expected = ('<table border="1" class="dataframe">\n' ' <thead>\n' - ' <tr style="text-align: left;">\n' + ' <tr style="text-align: {justify};">\n' ' <th></th>\n' ' <th>A</th>\n' ' <th>B</th>\n' @@ -1592,41 +1593,18 @@ def test_to_html_justify(self): ' <td>1</td>\n' ' </tr>\n' ' </tbody>\n' - '</table>') + '</table>'.format(justify=justify)) assert result == expected - result = df.to_html(justify='right') - expected = ('<table border="1" class="dataframe">\n' - ' <thead>\n' - ' <tr style="text-align: right;">\n' - ' <th></th>\n' - ' <th>A</th>\n' - ' <th>B</th>\n' - ' <th>C</th>\n' - ' </tr>\n' - ' </thead>\n' - ' <tbody>\n' - ' <tr>\n' - ' <th>0</th>\n' - ' <td>6</td>\n' - ' <td>1</td>\n' - ' <td>223442</td>\n' - ' </tr>\n' - ' <tr>\n' - ' <th>1</th>\n' - ' <td>30000</td>\n' - ' <td>2</td>\n' - ' <td>0</td>\n' - ' </tr>\n' - ' <tr>\n' - ' <th>2</th>\n' - ' <td>2</td>\n' - ' <td>70000</td>\n' - ' <td>1</td>\n' - ' </tr>\n' - ' </tbody>\n' - '</table>') - assert result == expected + @pytest.mark.parametrize("justify", ["super-right", "small-left", + "noinherit", "tiny", "pandas"]) + def test_to_html_invalid_justify(self, justify): + # see gh-17527 + df = DataFrame() + msg = "Invalid value for justify parameter" + + with tm.assert_raises_regex(ValueError, msg): + df.to_html(justify=justify) def test_to_html_index(self): index = ['foo', 'bar', 'baz']
Closes #17527 cc @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/17766
2017-10-03T16:32:24Z
2017-10-03T20:42:53Z
2017-10-03T20:42:52Z
2017-10-04T01:52:33Z
Implement NaT properties/methods directly
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 745632cf3d719..0ae4548c14f43 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,5 +1,8 @@ # -*- coding: utf-8 -*- # cython: profile=False +# cython: linetrace=False +# distutils: define_macros=CYTHON_TRACE=0 +# distutils: define_macros=CYTHON_TRACE_NOGIL=0 cimport numpy as np from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray, @@ -79,7 +82,6 @@ PyDateTime_IMPORT cdef int64_t NPY_NAT = util.get_nat() iNaT = NPY_NAT - from tslibs.timezones cimport ( is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_dateutil, treat_tz_as_pytz, @@ -780,6 +782,32 @@ class Timestamp(_Timestamp): _nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) +def _make_nat_func(func_name, cls): + def f(*args, **kwargs): + return NaT + f.__name__ = func_name + f.__doc__ = getattr(cls, func_name).__doc__ + return f + + +def _make_nan_func(func_name, cls): + def f(*args, **kwargs): + return np.nan + f.__name__ = func_name + f.__doc__ = getattr(cls, func_name).__doc__ + return f + + +def _make_error_func(func_name, cls): + def f(*args, **kwargs): + raise ValueError("NaTType does not support " + func_name) + + f.__name__ = func_name + if cls is not None: + f.__doc__ = getattr(cls, func_name).__doc__ + return f + + class NaTType(_NaT): """(N)ot-(A)-(T)ime, the time equivalent of NaN""" @@ -862,6 +890,90 @@ class NaTType(_NaT): return NaT return NotImplemented + # ---------------------------------------------------------------------- + # inject the Timestamp field properties + # these by definition return np.nan + + year = property(fget=lambda self: np.nan) + quarter = property(fget=lambda self: np.nan) + month = property(fget=lambda self: np.nan) + day = property(fget=lambda self: np.nan) + hour = property(fget=lambda self: np.nan) + minute = property(fget=lambda self: np.nan) + second = property(fget=lambda self: np.nan) + millisecond = property(fget=lambda self: np.nan) + microsecond = property(fget=lambda self: np.nan) + nanosecond = property(fget=lambda self: np.nan) + + week = property(fget=lambda self: np.nan) + dayofyear = property(fget=lambda self: np.nan) + weekofyear = property(fget=lambda self: np.nan) + days_in_month = property(fget=lambda self: np.nan) + daysinmonth = property(fget=lambda self: np.nan) + dayofweek = property(fget=lambda self: np.nan) + weekday_name = property(fget=lambda self: np.nan) + + # inject Timedelta properties + days = property(fget=lambda self: np.nan) + seconds = property(fget=lambda self: np.nan) + microseconds = property(fget=lambda self: np.nan) + nanoseconds = property(fget=lambda self: np.nan) + + # inject pd.Period properties + qyear = property(fget=lambda self: np.nan) + + # ---------------------------------------------------------------------- + # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or + # return NaT create functions that raise, for binding to NaTType + # These are the ones that can get their docstrings from datetime. + + # nan methods + weekday = _make_nan_func('weekday', datetime) + isoweekday = _make_nan_func('isoweekday', datetime) + + # _nat_methods + date = _make_nat_func('date', datetime) + + utctimetuple = _make_error_func('utctimetuple', datetime) + timetz = _make_error_func('timetz', datetime) + timetuple = _make_error_func('timetuple', datetime) + strptime = _make_error_func('strptime', datetime) + strftime = _make_error_func('strftime', datetime) + isocalendar = _make_error_func('isocalendar', datetime) + dst = _make_error_func('dst', datetime) + ctime = _make_error_func('ctime', datetime) + time = _make_error_func('time', datetime) + toordinal = _make_error_func('toordinal', datetime) + tzname = _make_error_func('tzname', datetime) + utcoffset = _make_error_func('utcoffset', datetime) + + # Timestamp has empty docstring for some methods. + utcfromtimestamp = _make_error_func('utcfromtimestamp', None) + fromtimestamp = _make_error_func('fromtimestamp', None) + combine = _make_error_func('combine', None) + utcnow = _make_error_func('utcnow', None) + + if PY3: + timestamp = _make_error_func('timestamp', datetime) + + # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or + # return NaT create functions that raise, for binding to NaTType + astimezone = _make_error_func('astimezone', Timestamp) + fromordinal = _make_error_func('fromordinal', Timestamp) + + # _nat_methods + to_pydatetime = _make_nat_func('to_pydatetime', Timestamp) + + now = _make_nat_func('now', Timestamp) + today = _make_nat_func('today', Timestamp) + round = _make_nat_func('round', Timestamp) + floor = _make_nat_func('floor', Timestamp) + ceil = _make_nat_func('ceil', Timestamp) + + tz_convert = _make_nat_func('tz_convert', Timestamp) + tz_localize = _make_nat_func('tz_localize', Timestamp) + replace = _make_nat_func('replace', Timestamp) + def __nat_unpickle(*args): # return constant defined in the module @@ -1320,6 +1432,7 @@ cdef _nat_rdivide_op(self, other): return np.nan return NotImplemented + cdef class _NaT(_Timestamp): def __hash__(_NaT self): @@ -1537,7 +1650,7 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, if is_timestamp(ts): obj.value += ts.nanosecond obj.dts.ps = ts.nanosecond * 1000 - + if nanos: obj.value += nanos obj.dts.ps = nanos * 1000 @@ -3255,95 +3368,6 @@ cpdef convert_to_timedelta64(object ts, object unit): return ts.astype('timedelta64[ns]') -#---------------------------------------------------------------------- -# NaT methods/property setups - - -# inject the Timestamp field properties -# these by definition return np.nan -fields = ['year', 'quarter', 'month', 'day', 'hour', - 'minute', 'second', 'millisecond', 'microsecond', 'nanosecond', - 'week', 'dayofyear', 'weekofyear', 'days_in_month', 'daysinmonth', - 'dayofweek', 'weekday_name', 'days', 'seconds', 'microseconds', - 'nanoseconds', 'qyear'] -for field in fields: - prop = property(fget=lambda self: np.nan) - setattr(NaTType, field, prop) - - -# define how we are handling NaT methods & inject -# to the NaTType class; these can return NaT, np.nan -# or raise respectively -_nat_methods = ['date', 'now', 'replace', 'to_pydatetime', - 'today', 'round', 'floor', 'ceil', 'tz_convert', - 'tz_localize'] -_nan_methods = ['weekday', 'isoweekday'] -_implemented_methods = [ - 'to_datetime', 'to_datetime64', 'isoformat', 'total_seconds'] -_implemented_methods.extend(_nat_methods) -_implemented_methods.extend(_nan_methods) - - -def _get_docstring(_method_name): - # NaT serves double duty as Timestamp & Timedelta - # missing value, so need to acquire doc-strings for both - - try: - return getattr(Timestamp, _method_name).__doc__ - except AttributeError: - pass - - try: - return getattr(Timedelta, _method_name).__doc__ - except AttributeError: - pass - - return None - - -for _method_name in _nat_methods: - - def _make_nat_func(func_name): - def f(*args, **kwargs): - return NaT - f.__name__ = func_name - f.__doc__ = _get_docstring(func_name) - return f - - setattr(NaTType, _method_name, _make_nat_func(_method_name)) - - -for _method_name in _nan_methods: - - def _make_nan_func(func_name): - def f(*args, **kwargs): - return np.nan - f.__name__ = func_name - f.__doc__ = _get_docstring(func_name) - return f - - setattr(NaTType, _method_name, _make_nan_func(_method_name)) - - -# GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or -# return NaT create functions that raise, for binding to NaTType -for _maybe_method_name in dir(NaTType): - _maybe_method = getattr(NaTType, _maybe_method_name) - if (callable(_maybe_method) - and not _maybe_method_name.startswith("_") - and _maybe_method_name not in _implemented_methods): - - def _make_error_func(func_name): - def f(*args, **kwargs): - raise ValueError("NaTType does not support " + func_name) - f.__name__ = func_name - f.__doc__ = _get_docstring(func_name) - return f - - setattr(NaTType, _maybe_method_name, - _make_error_func(_maybe_method_name)) - - #---------------------------------------------------------------------- # Conversion routines diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 6f852f2b394e1..135e4c544de41 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -10,6 +10,8 @@ from pandas.util import testing as tm from pandas._libs.tslib import iNaT +from pandas.compat import callable + @pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex), (Timedelta('NaT'), TimedeltaIndex), @@ -156,6 +158,53 @@ def test_NaT_methods(): assert NaT.isoformat() == 'NaT' +def test_NaT_docstrings(): + # GH#17327 + nat_names = dir(NaT) + + # NaT should have *most* of the Timestamp methods, with matching + # docstrings. The attributes that are not expected to be present in NaT + # are private methods plus `ts_expected` below. + ts_names = dir(Timestamp) + ts_missing = [x for x in ts_names if x not in nat_names and + not x.startswith('_')] + ts_missing.sort() + ts_expected = ['freqstr', 'normalize', 'offset', + 'to_julian_date', 'to_period', 'tz'] + assert ts_missing == ts_expected + + ts_overlap = [x for x in nat_names if x in ts_names and + not x.startswith('_') and + callable(getattr(Timestamp, x))] + for name in ts_overlap: + tsdoc = getattr(Timestamp, name).__doc__ + natdoc = getattr(NaT, name).__doc__ + assert tsdoc == natdoc + + # NaT should have *most* of the Timedelta methods, with matching + # docstrings. The attributes that are not expected to be present in NaT + # are private methods plus `td_expected` below. + # For methods that are both Timestamp and Timedelta methods, the + # Timestamp docstring takes priority. + td_names = dir(Timedelta) + td_missing = [x for x in td_names if x not in nat_names and + not x.startswith('_')] + td_missing.sort() + td_expected = ['components', 'delta', 'is_populated', + 'to_pytimedelta', 'to_timedelta64', 'view'] + assert td_missing == td_expected + + td_overlap = [x for x in nat_names if x in td_names and + x not in ts_names and # Timestamp __doc__ takes priority + not x.startswith('_') and + callable(getattr(Timedelta, x))] + assert td_overlap == ['total_seconds'] + for name in td_overlap: + tddoc = getattr(Timedelta, name).__doc__ + natdoc = getattr(NaT, name).__doc__ + assert tddoc == natdoc + + @pytest.mark.parametrize('klass', [Timestamp, Timedelta]) def test_isoformat(klass):
Causes build errors, referenced in #17756 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17765
2017-10-03T16:20:21Z
2017-10-05T00:13:32Z
2017-10-05T00:13:32Z
2017-10-30T16:25:12Z
COMPAT: Suppress .take() warning for numpy < 1.12
diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 8c0ed322028e8..13dab68b2e5b4 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -9,7 +9,8 @@ import numpy as np import pandas as pd -from pandas import Series, DataFrame, bdate_range, isna, compat +from pandas import (Series, DataFrame, bdate_range, + isna, compat, _np_version_under1p12) from pandas.tseries.offsets import BDay import pandas.util.testing as tm from pandas.compat import range @@ -527,8 +528,13 @@ def test_numpy_take(self): sp = SparseSeries([1.0, 2.0, 3.0]) indices = [1, 2] - tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(), - np.take(sp.to_dense(), indices, axis=0)) + # gh-17352: older versions of numpy don't properly + # pass in arguments to downstream .take() implementations. + warning = FutureWarning if _np_version_under1p12 else None + + with tm.assert_produces_warning(warning, check_stacklevel=False): + tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(), + np.take(sp.to_dense(), indices, axis=0)) msg = "the 'out' parameter is not supported" tm.assert_raises_regex(ValueError, msg, np.take,
Follow-up to #17352.
https://api.github.com/repos/pandas-dev/pandas/pulls/17764
2017-10-03T15:52:47Z
2017-10-03T19:49:08Z
2017-10-03T19:49:08Z
2017-10-04T01:52:22Z
TST: remove warnings, xref #15747
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 86211612a5955..09ba0e197438d 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -854,11 +854,15 @@ def test_basic_getitem_with_labels(self): s = Series(np.random.randn(10), index=lrange(0, 20, 2)) inds = [0, 2, 5, 7, 8] arr_inds = np.array([0, 2, 5, 7, 8]) - result = s[inds] + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s[inds] expected = s.reindex(inds) assert_series_equal(result, expected) - result = s[arr_inds] + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s[arr_inds] expected = s.reindex(arr_inds) assert_series_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/17761
2017-10-03T11:38:19Z
2017-10-03T13:01:06Z
2017-10-03T13:01:06Z
2017-10-03T13:01:06Z
CI: pin pytables to valid build
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index 822144a80bc9a..721d0c1ad8101 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -7,7 +7,7 @@ xlsxwriter xlrd xlwt numexpr -pytables +pytables=3.4.2=np113py36_1 matplotlib lxml html5lib
xref #17757
https://api.github.com/repos/pandas-dev/pandas/pulls/17760
2017-10-03T10:21:01Z
2017-10-03T10:48:58Z
2017-10-03T10:48:58Z
2017-10-03T10:48:58Z
TST: remove bunch of warnings for .astype(.....), xref #17636
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 6874fedaa705f..ab6e76c221102 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -8,6 +8,7 @@ Categorical, CategoricalIndex) from pandas.util.testing import assert_series_equal, assert_frame_equal from pandas.util import testing as tm +from pandas.api.types import CategoricalDtype as CDT class TestCategoricalIndex(object): @@ -16,27 +17,24 @@ def setup_method(self, method): self.df = DataFrame({'A': np.arange(6, dtype='int64'), 'B': Series(list('aabbca')).astype( - 'category', categories=list( - 'cab'))}).set_index('B') + CDT(list('cab')))}).set_index('B') self.df2 = DataFrame({'A': np.arange(6, dtype='int64'), 'B': Series(list('aabbca')).astype( - 'category', categories=list( - 'cabe'))}).set_index('B') + CDT(list('cabe')))}).set_index('B') self.df3 = DataFrame({'A': np.arange(6, dtype='int64'), 'B': (Series([1, 1, 2, 1, 3, 2]) - .astype('category', categories=[3, 2, 1], - ordered=True))}).set_index('B') + .astype(CDT([3, 2, 1], ordered=True))) + }).set_index('B') self.df4 = DataFrame({'A': np.arange(6, dtype='int64'), 'B': (Series([1, 1, 2, 1, 3, 2]) - .astype('category', categories=[3, 2, 1], - ordered=False))}).set_index('B') + .astype(CDT([3, 2, 1], ordered=False))) + }).set_index('B') def test_loc_scalar(self): result = self.df.loc['a'] expected = (DataFrame({'A': [0, 1, 5], 'B': (Series(list('aaa')) - .astype('category', - categories=list('cab')))}) + .astype(CDT(list('cab'))))}) .set_index('B')) assert_frame_equal(result, expected) @@ -44,8 +42,7 @@ def test_loc_scalar(self): df.loc['a'] = 20 expected = (DataFrame({'A': [20, 20, 2, 3, 4, 20], 'B': (Series(list('aabbca')) - .astype('category', - categories=list('cab')))}) + .astype(CDT(list('cab'))))}) .set_index('B')) assert_frame_equal(df, expected) @@ -319,13 +316,13 @@ def test_reindexing(self): result = self.df2.reindex(Categorical(['a', 'd'], categories=cats)) expected = DataFrame({'A': [0, 1, 5, np.nan], 'B': Series(list('aaad')).astype( - 'category', categories=cats)}).set_index('B') + CDT(cats))}).set_index('B') assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(Categorical(['a'], categories=cats)) expected = DataFrame({'A': [0, 1, 5], 'B': Series(list('aaa')).astype( - 'category', categories=cats)}).set_index('B') + CDT(cats))}).set_index('B') assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(['a', 'b', 'e']) @@ -348,16 +345,15 @@ def test_reindexing(self): ['a', 'd'], categories=cats, ordered=True)) expected = DataFrame( {'A': [0, 1, 5, np.nan], - 'B': Series(list('aaad')).astype('category', categories=cats, - ordered=True)}).set_index('B') + 'B': Series(list('aaad')).astype( + CDT(cats, ordered=True))}).set_index('B') assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(Categorical( ['a', 'd'], categories=['a', 'd'])) expected = DataFrame({'A': [0, 1, 5, np.nan], 'B': Series(list('aaad')).astype( - 'category', categories=['a', 'd' - ])}).set_index('B') + CDT(['a', 'd']))}).set_index('B') assert_frame_equal(result, expected, check_index_type=True) # passed duplicate indexers are not allowed diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py index df75983a29d80..ed99814afd20a 100644 --- a/pandas/tests/reshape/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -16,6 +16,7 @@ from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype from pandas import DataFrame, Index, MultiIndex, Series, Categorical import pandas.util.testing as tm +from pandas.api.types import CategoricalDtype as CDT N = 50 @@ -1414,7 +1415,7 @@ def left(): return DataFrame( {'X': Series(np.random.choice( ['foo', 'bar'], - size=(10,))).astype('category', categories=['foo', 'bar']), + size=(10,))).astype(CDT(['foo', 'bar'])), 'Y': np.random.choice(['one', 'two', 'three'], size=(10,))}) @@ -1422,8 +1423,7 @@ def left(): def right(): np.random.seed(1234) return DataFrame( - {'X': Series(['foo', 'bar']).astype('category', - categories=['foo', 'bar']), + {'X': Series(['foo', 'bar']).astype(CDT(['foo', 'bar'])), 'Z': [1, 2]}) @@ -1468,9 +1468,8 @@ def test_other_columns(self, left, right): @pytest.mark.parametrize( 'change', [lambda x: x, - lambda x: x.astype('category', - categories=['foo', 'bar', 'bah']), - lambda x: x.astype('category', ordered=True)]) + lambda x: x.astype(CDT(['foo', 'bar', 'bah'])), + lambda x: x.astype(CDT(ordered=True))]) @pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right']) def test_dtype_on_merged_different(self, change, how, left, right): # our merging columns, X now has 2 different dtypes diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index bd8a999ce2330..07d3052c16756 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1,3 +1,4 @@ + from datetime import datetime, date, timedelta import pytest @@ -13,6 +14,7 @@ from pandas.compat import range, product import pandas.util.testing as tm from pandas.tseries.util import pivot_annual, isleapyear +from pandas.api.types import CategoricalDtype as CDT class TestPivotTable(object): @@ -98,13 +100,12 @@ def test_pivot_table_dropna_categoricals(self): 'B': [1, 2, 3, 1, 2, 3, 1, 2, 3], 'C': range(0, 9)}) - df['A'] = df['A'].astype('category', ordered=False, - categories=categories) + df['A'] = df['A'].astype(CDT(categories, ordered=False)) result_true = df.pivot_table(index='B', columns='A', values='C', dropna=True) expected_columns = Series(['a', 'b', 'c'], name='A') - expected_columns = expected_columns.astype('category', ordered=False, - categories=categories) + expected_columns = expected_columns.astype( + CDT(categories, ordered=False)) expected_index = Series([1, 2, 3], name='B') expected_true = DataFrame([[0.0, 3.0, 6.0], [1.0, 4.0, 7.0], diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 91000747b41bb..4edce8af92f84 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -9,6 +9,7 @@ Interval, IntervalIndex, Categorical, cut, qcut, date_range) import pandas.util.testing as tm +from pandas.api.types import CategoricalDtype as CDT from pandas.core.algorithms import quantile import pandas.core.reshape.tile as tmod @@ -299,7 +300,7 @@ def test_cut_return_intervals(self): exp_bins = np.linspace(0, 8, num=4).round(3) exp_bins[0] -= 0.008 exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take( - [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype('category', ordered=True) + [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True)) tm.assert_series_equal(res, exp) def test_qcut_return_intervals(self): @@ -308,7 +309,7 @@ def test_qcut_return_intervals(self): exp_levels = np.array([Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]) exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype( - 'category', ordered=True) + CDT(ordered=True)) tm.assert_series_equal(res, exp) def test_series_retbins(self): @@ -316,14 +317,14 @@ def test_series_retbins(self): s = Series(np.arange(4)) result, bins = cut(s, 2, retbins=True) expected = Series(IntervalIndex.from_breaks( - [-0.003, 1.5, 3], closed='right').repeat(2)).astype('category', - ordered=True) + [-0.003, 1.5, 3], closed='right').repeat(2)).astype( + CDT(ordered=True)) tm.assert_series_equal(result, expected) result, bins = qcut(s, 2, retbins=True) expected = Series(IntervalIndex.from_breaks( - [-0.001, 1.5, 3], closed='right').repeat(2)).astype('category', - ordered=True) + [-0.001, 1.5, 3], closed='right').repeat(2)).astype( + CDT(ordered=True)) tm.assert_series_equal(result, expected) def test_qcut_duplicates_bin(self): @@ -351,7 +352,7 @@ def test_single_quantile(self): result = qcut(s, 1) intervals = IntervalIndex([Interval(8.999, 9.0), Interval(8.999, 9.0)], closed='right') - expected = Series(intervals).astype('category', ordered=True) + expected = Series(intervals).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) s = Series([-9., -9.]) @@ -361,7 +362,7 @@ def test_single_quantile(self): result = qcut(s, 1) intervals = IntervalIndex([Interval(-9.001, -9.0), Interval(-9.001, -9.0)], closed='right') - expected = Series(intervals).astype('category', ordered=True) + expected = Series(intervals).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) s = Series([0., 0.]) @@ -371,7 +372,7 @@ def test_single_quantile(self): result = qcut(s, 1) intervals = IntervalIndex([Interval(-0.001, 0.0), Interval(-0.001, 0.0)], closed='right') - expected = Series(intervals).astype('category', ordered=True) + expected = Series(intervals).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) s = Series([9]) @@ -380,7 +381,7 @@ def test_single_quantile(self): tm.assert_series_equal(result, expected) result = qcut(s, 1) intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right') - expected = Series(intervals).astype('category', ordered=True) + expected = Series(intervals).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) s = Series([-9]) @@ -389,7 +390,7 @@ def test_single_quantile(self): tm.assert_series_equal(result, expected) result = qcut(s, 1) intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right') - expected = Series(intervals).astype('category', ordered=True) + expected = Series(intervals).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) s = Series([0]) @@ -398,7 +399,7 @@ def test_single_quantile(self): tm.assert_series_equal(result, expected) result = qcut(s, 1) intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right') - expected = Series(intervals).astype('category', ordered=True) + expected = Series(intervals).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) def test_single_bin(self): @@ -450,7 +451,7 @@ def test_datetime_cut(self): Timestamp('2013-01-02 08:00:00')), Interval(Timestamp('2013-01-02 08:00:00'), Timestamp('2013-01-03 00:00:00'))])) - .astype('category', ordered=True)) + .astype(CDT(ordered=True))) tm.assert_series_equal(result, expected) @@ -479,7 +480,7 @@ def test_datetime_bin(self): Series(IntervalIndex.from_intervals([ Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])) - .astype('category', ordered=True)) + .astype(CDT(ordered=True))) for conv in [Timestamp, Timestamp, np.datetime64]: bins = [conv(v) for v in bin_data]
https://api.github.com/repos/pandas-dev/pandas/pulls/17759
2017-10-03T09:59:08Z
2017-10-03T10:54:31Z
2017-10-03T10:54:31Z
2017-10-03T10:54:31Z
Rearrange _NaT to be valid python; add attributes tests
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 745632cf3d719..53d553f31cccb 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -777,98 +777,6 @@ class Timestamp(_Timestamp): return self + other -_nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) - - -class NaTType(_NaT): - """(N)ot-(A)-(T)ime, the time equivalent of NaN""" - - def __new__(cls): - cdef _NaT base - - base = _NaT.__new__(cls, 1, 1, 1) - base.value = NPY_NAT - - return base - - def __repr__(self): - return 'NaT' - - def __str__(self): - return 'NaT' - - def isoformat(self, sep='T'): - # This allows Timestamp(ts.isoformat()) to always correctly roundtrip. - return 'NaT' - - def __hash__(self): - return NPY_NAT - - def __int__(self): - return NPY_NAT - - def __long__(self): - return NPY_NAT - - def __reduce__(self): - return (__nat_unpickle, (None, )) - - def total_seconds(self): - """ - Total duration of timedelta in seconds (to ns precision) - """ - # GH 10939 - return np.nan - - @property - def is_leap_year(self): - return False - - @property - def is_month_start(self): - return False - - @property - def is_quarter_start(self): - return False - - @property - def is_year_start(self): - return False - - @property - def is_month_end(self): - return False - - @property - def is_quarter_end(self): - return False - - @property - def is_year_end(self): - return False - - def __rdiv__(self, other): - return _nat_rdivide_op(self, other) - - def __rtruediv__(self, other): - return _nat_rdivide_op(self, other) - - def __rfloordiv__(self, other): - return _nat_rdivide_op(self, other) - - def __rmul__(self, other): - if is_integer_object(other) or is_float_object(other): - return NaT - return NotImplemented - - -def __nat_unpickle(*args): - # return constant defined in the module - return NaT - -NaT = NaTType() - cdef inline bint _checknull_with_nat(object val): """ utility to check if a value is a nat or not """ return val is None or ( @@ -921,19 +829,6 @@ cpdef object get_value_box(ndarray arr, object loc): return util.get_value_1d(arr, i) -# Add the min and max fields at the class level -cdef int64_t _NS_UPPER_BOUND = INT64_MAX -# the smallest value we could actually represent is -# INT64_MIN + 1 == -9223372036854775807 -# but to allow overflow free conversion with a microsecond resolution -# use the smallest value with a 0 nanosecond unit (0s in last 3 digits) -cdef int64_t _NS_LOWER_BOUND = -9223372036854775000 - -# Resolution is in nanoseconds -Timestamp.min = Timestamp(_NS_LOWER_BOUND) -Timestamp.max = Timestamp(_NS_UPPER_BOUND) - - #---------------------------------------------------------------------- # Frequency inference @@ -1297,7 +1192,8 @@ cdef PyTypeObject* ts_type = <PyTypeObject*> Timestamp cdef inline bint is_timestamp(object o): return Py_TYPE(o) == ts_type # isinstance(o, Timestamp) - +#---------------------------------------------------------------------- +# NaT Construction cdef bint _nat_scalar_rules[6] _nat_scalar_rules[Py_EQ] = False @@ -1385,6 +1281,208 @@ cdef class _NaT(_Timestamp): return NotImplemented +class NaTType(_NaT): + """(N)ot-(A)-(T)ime, the time equivalent of NaN""" + + def __new__(cls): + cdef _NaT base + + base = _NaT.__new__(cls, 1, 1, 1) + base.value = NPY_NAT + + return base + + def __repr__(self): + return 'NaT' + + def __str__(self): + return 'NaT' + + def isoformat(self, sep='T'): + # This allows Timestamp(ts.isoformat()) to always correctly roundtrip. + return 'NaT' + + def __hash__(self): + return NPY_NAT + + def __int__(self): + return NPY_NAT + + def __long__(self): + return NPY_NAT + + def __reduce__(self): + return (__nat_unpickle, (None, )) + + def total_seconds(self): + """ + Total duration of timedelta in seconds (to ns precision) + """ + # GH 10939 + return np.nan + + @property + def is_leap_year(self): + return False + + @property + def is_month_start(self): + return False + + @property + def is_quarter_start(self): + return False + + @property + def is_year_start(self): + return False + + @property + def is_month_end(self): + return False + + @property + def is_quarter_end(self): + return False + + @property + def is_year_end(self): + return False + + def __rdiv__(self, other): + return _nat_rdivide_op(self, other) + + def __rtruediv__(self, other): + return _nat_rdivide_op(self, other) + + def __rfloordiv__(self, other): + return _nat_rdivide_op(self, other) + + def __rmul__(self, other): + if is_integer_object(other) or is_float_object(other): + return NaT + return NotImplemented + + +_nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) + + +def __nat_unpickle(*args): + # return constant defined in the module + return NaT + +NaT = NaTType() + + +#---------------------------------------------------------------------- +# NaT methods/property setups + + +# inject the Timestamp field properties +# these by definition return np.nan +fields = ['year', 'quarter', 'month', 'day', 'hour', + 'minute', 'second', 'millisecond', 'microsecond', 'nanosecond', + 'week', 'dayofyear', 'weekofyear', 'days_in_month', 'daysinmonth', + 'dayofweek', 'weekday_name', 'days', 'seconds', 'microseconds', + 'nanoseconds', 'qyear'] +for field in fields: + prop = property(fget=lambda self: np.nan) + setattr(NaTType, field, prop) + + +# define how we are handling NaT methods & inject +# to the NaTType class; these can return NaT, np.nan +# or raise respectively +_nat_methods = ['date', 'now', 'replace', 'to_pydatetime', + 'today', 'round', 'floor', 'ceil', 'tz_convert', + 'tz_localize'] +_nan_methods = ['weekday', 'isoweekday'] +_implemented_methods = [ + 'to_datetime', 'to_datetime64', 'isoformat', 'total_seconds'] +_implemented_methods.extend(_nat_methods) +_implemented_methods.extend(_nan_methods) + + +def _get_docstring(_method_name): + # NaT serves double duty as Timestamp & Timedelta + # missing value, so need to acquire doc-strings for both + + try: + return getattr(Timestamp, _method_name).__doc__ + except AttributeError: + pass + + try: + return getattr(Timedelta, _method_name).__doc__ + except AttributeError: + pass + + return None + + +for _method_name in _nat_methods: + + def _make_nat_func(func_name): + def f(*args, **kwargs): + return NaT + f.__name__ = func_name + f.__doc__ = _get_docstring(func_name) + return f + + setattr(NaTType, _method_name, _make_nat_func(_method_name)) + + +for _method_name in _nan_methods: + + def _make_nan_func(func_name): + def f(*args, **kwargs): + return np.nan + f.__name__ = func_name + f.__doc__ = _get_docstring(func_name) + return f + + setattr(NaTType, _method_name, _make_nan_func(_method_name)) + + +# GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or +# return NaT create functions that raise, for binding to NaTType +for _maybe_method_name in dir(NaTType): + _maybe_method = getattr(NaTType, _maybe_method_name) + if (callable(_maybe_method) + and not _maybe_method_name.startswith("_") + and _maybe_method_name not in _implemented_methods): + + def _make_error_func(func_name): + def f(*args, **kwargs): + raise ValueError("NaTType does not support " + func_name) + f.__name__ = func_name + f.__doc__ = _get_docstring(func_name) + return f + + setattr(NaTType, _maybe_method_name, + _make_error_func(_maybe_method_name)) + + +#---------------------------------------------------------------------- +# Add the min and max fields at the class level +cdef int64_t _NS_UPPER_BOUND = INT64_MAX +# the smallest value we could actually represent is +# INT64_MIN + 1 == -9223372036854775807 +# but to allow overflow free conversion with a microsecond resolution +# use the smallest value with a 0 nanosecond unit (0s in last 3 digits) +cdef int64_t _NS_LOWER_BOUND = -9223372036854775000 + +cdef pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS +pandas_datetime_to_datetimestruct(_NS_LOWER_BOUND, PANDAS_FR_ns, &_NS_MIN_DTS) +pandas_datetime_to_datetimestruct(_NS_UPPER_BOUND, PANDAS_FR_ns, &_NS_MAX_DTS) + +# Resolution is in nanoseconds +Timestamp.min = Timestamp(_NS_LOWER_BOUND) +Timestamp.max = Timestamp(_NS_UPPER_BOUND) +# These cannot be defined until after `NaT` is defined. + + +#---------------------------------------------------------------------- # lightweight C object to hold datetime & int64 pair cdef class _TSObject: cdef: @@ -3255,95 +3353,6 @@ cpdef convert_to_timedelta64(object ts, object unit): return ts.astype('timedelta64[ns]') -#---------------------------------------------------------------------- -# NaT methods/property setups - - -# inject the Timestamp field properties -# these by definition return np.nan -fields = ['year', 'quarter', 'month', 'day', 'hour', - 'minute', 'second', 'millisecond', 'microsecond', 'nanosecond', - 'week', 'dayofyear', 'weekofyear', 'days_in_month', 'daysinmonth', - 'dayofweek', 'weekday_name', 'days', 'seconds', 'microseconds', - 'nanoseconds', 'qyear'] -for field in fields: - prop = property(fget=lambda self: np.nan) - setattr(NaTType, field, prop) - - -# define how we are handling NaT methods & inject -# to the NaTType class; these can return NaT, np.nan -# or raise respectively -_nat_methods = ['date', 'now', 'replace', 'to_pydatetime', - 'today', 'round', 'floor', 'ceil', 'tz_convert', - 'tz_localize'] -_nan_methods = ['weekday', 'isoweekday'] -_implemented_methods = [ - 'to_datetime', 'to_datetime64', 'isoformat', 'total_seconds'] -_implemented_methods.extend(_nat_methods) -_implemented_methods.extend(_nan_methods) - - -def _get_docstring(_method_name): - # NaT serves double duty as Timestamp & Timedelta - # missing value, so need to acquire doc-strings for both - - try: - return getattr(Timestamp, _method_name).__doc__ - except AttributeError: - pass - - try: - return getattr(Timedelta, _method_name).__doc__ - except AttributeError: - pass - - return None - - -for _method_name in _nat_methods: - - def _make_nat_func(func_name): - def f(*args, **kwargs): - return NaT - f.__name__ = func_name - f.__doc__ = _get_docstring(func_name) - return f - - setattr(NaTType, _method_name, _make_nat_func(_method_name)) - - -for _method_name in _nan_methods: - - def _make_nan_func(func_name): - def f(*args, **kwargs): - return np.nan - f.__name__ = func_name - f.__doc__ = _get_docstring(func_name) - return f - - setattr(NaTType, _method_name, _make_nan_func(_method_name)) - - -# GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or -# return NaT create functions that raise, for binding to NaTType -for _maybe_method_name in dir(NaTType): - _maybe_method = getattr(NaTType, _maybe_method_name) - if (callable(_maybe_method) - and not _maybe_method_name.startswith("_") - and _maybe_method_name not in _implemented_methods): - - def _make_error_func(func_name): - def f(*args, **kwargs): - raise ValueError("NaTType does not support " + func_name) - f.__name__ = func_name - f.__doc__ = _get_docstring(func_name) - return f - - setattr(NaTType, _maybe_method_name, - _make_error_func(_maybe_method_name)) - - #---------------------------------------------------------------------- # Conversion routines diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 6f852f2b394e1..c1c07ce1c9998 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -156,6 +156,44 @@ def test_NaT_methods(): assert NaT.isoformat() == 'NaT' +def test_NaT_docstrings(): + nat_names = dir(NaT) + + ts_names = dir(Timestamp) + ts_missing = [x for x in ts_names if x not in nat_names and + not x.startswith('_')] + ts_missing.sort() + ts_expected = ['freqstr', 'normalize', 'offset', + 'to_julian_date', 'to_period', 'tz'] + assert ts_missing == ts_expected + + ts_overlap = [x for x in nat_names if x in ts_names and + not x.startswith('_') and + callable(getattr(Timestamp, x))] + for name in ts_overlap: + tsdoc = getattr(Timestamp, name).__doc__ + natdoc = getattr(NaT, name).__doc__ + assert tsdoc == natdoc + + td_names = dir(Timedelta) + td_missing = [x for x in td_names if x not in nat_names and + not x.startswith('_')] + td_missing.sort() + td_expected = ['components', 'delta', 'is_populated', + 'to_pytimedelta', 'to_timedelta64', 'view'] + assert td_missing == td_expected + + td_overlap = [x for x in nat_names if x in td_names and + x not in ts_names and # Timestamp __doc__ takes priority + not x.startswith('_') and + callable(getattr(Timedelta, x))] + assert td_overlap == ['total_seconds'] + for name in td_overlap: + tddoc = getattr(Timedelta, name).__doc__ + natdoc = getattr(NaT, name).__doc__ + assert tddoc == natdoc + + @pytest.mark.parametrize('klass', [Timestamp, Timedelta]) def test_isoformat(klass):
At the moment the organization of `tslib` is not valid python: child classes are defined above parent classes. This PR fixes that for `_NaT` and `NaTType`. This is necessary because trying to implement more important changes in `NaTType` is leading to build errors: ``` pandas/_libs/tslib.c:103540:3: error: use of undeclared identifier '__pyx_base'; did you mean '__pyx_k_base'? __pyx_base.nanosecond = __pyx_t_12; ``` _Hopefully_ fixing this will fix the build error. This also implements tests for NaT namespace/docstrings. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17756
2017-10-03T00:17:32Z
2017-10-03T21:29:32Z
null
2017-10-30T16:25:46Z
BUG: Implement PeriodEngine to fix PeriodIndex truncate bug
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index c41da4d67afe5..5c64b0a55c09b 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -100,7 +100,7 @@ Conversion Indexing ^^^^^^^^ -- +- Bug in :func:`PeriodIndex.truncate` which raises ``TypeError`` when ``PeriodIndex`` is monotonic (:issue:`17717`) - - diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index e98c0131e9c44..78eb7b3ae483e 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -17,7 +17,7 @@ from tslib cimport _to_i8 from hashtable cimport HashTable -from pandas._libs import algos, hashtable as _hash +from pandas._libs import algos, period as periodlib, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta from datetime import datetime, timedelta @@ -270,13 +270,16 @@ cdef class IndexEngine: values = self._get_index_values() self.mapping = self._make_hash_table(len(values)) - self.mapping.map_locations(values) + self._call_map_locations(values) if len(self.mapping) == len(values): self.unique = 1 self.need_unique_check = 0 + cpdef _call_map_locations(self, values): + self.mapping.map_locations(values) + def clear_mapping(self): self.mapping = None self.need_monotonic_check = 1 @@ -490,6 +493,53 @@ cdef class TimedeltaEngine(DatetimeEngine): cdef _get_box_dtype(self): return 'm8[ns]' + +cdef class PeriodEngine(Int64Engine): + + cdef _get_index_values(self): + return super(PeriodEngine, self).vgetter() + + cpdef _call_map_locations(self, values): + super(PeriodEngine, self)._call_map_locations(values.view('i8')) + + def _call_monotonic(self, values): + return super(PeriodEngine, self)._call_monotonic(values.view('i8')) + + def get_indexer(self, values): + cdef ndarray[int64_t, ndim=1] ordinals + + super(PeriodEngine, self)._ensure_mapping_populated() + + freq = super(PeriodEngine, self).vgetter().freq + ordinals = periodlib.extract_ordinals(values, freq) + + return self.mapping.lookup(ordinals) + + def get_pad_indexer(self, other, limit=None): + freq = super(PeriodEngine, self).vgetter().freq + ordinal = periodlib.extract_ordinals(other, freq) + + return algos.pad_int64(self._get_index_values(), + np.asarray(ordinal), limit=limit) + + def get_backfill_indexer(self, other, limit=None): + freq = super(PeriodEngine, self).vgetter().freq + ordinal = periodlib.extract_ordinals(other, freq) + + return algos.backfill_int64(self._get_index_values(), + np.asarray(ordinal), limit=limit) + + def get_indexer_non_unique(self, targets): + freq = super(PeriodEngine, self).vgetter().freq + ordinal = periodlib.extract_ordinals(targets, freq) + ordinal_array = np.asarray(ordinal) + + return super(PeriodEngine, self).get_indexer_non_unique(ordinal_array) + + cdef _get_index_values_for_bool_indexer(self): + return self._get_index_values().view('i8') + + cpdef convert_scalar(ndarray arr, object value): # we don't turn integers # into datetimes/timedeltas diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index 76c0deef7ebee..b9fc0ddd7ea1c 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -66,7 +66,7 @@ cdef class {{name}}Engine(IndexEngine): raise KeyError(val) {{endif}} - values = self._get_index_values() + values = self._get_index_values_for_bool_indexer() n = len(values) result = np.empty(n, dtype=bool) @@ -86,6 +86,9 @@ cdef class {{name}}Engine(IndexEngine): return last_true return result + + cdef _get_index_values_for_bool_indexer(self): + return self._get_index_values() {{endif}} {{endfor}} diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 148ca2725fbdc..c4938b556c8dd 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -31,7 +31,7 @@ import pandas.tseries.offsets as offsets from pandas._libs.lib import infer_dtype -from pandas._libs import tslib, period +from pandas._libs import tslib, period, index as libindex from pandas._libs.period import (Period, IncompatibleFrequency, get_period_field_arr, _validate_end_alias, _quarter_to_myear) @@ -192,6 +192,8 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): freq = None + _engine_type = libindex.PeriodEngine + __eq__ = _period_index_cmp('__eq__') __ne__ = _period_index_cmp('__ne__', nat_result=True) __lt__ = _period_index_cmp('__lt__') @@ -275,6 +277,10 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, data = period.extract_ordinals(data, freq) return cls._from_ordinals(data, name=name, freq=freq) + @cache_readonly + def _engine(self): + return self._engine_type(lambda: self, len(self)) + @classmethod def _generate_range(cls, start, end, periods, freq, fields): if freq is not None: diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index efc13a56cd77e..d99eba3e2d5e9 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -6,7 +6,7 @@ import pandas as pd from pandas.util import testing as tm from pandas.compat import lrange -from pandas._libs import tslib +from pandas._libs import tslib, tslibs from pandas import (PeriodIndex, Series, DatetimeIndex, period_range, Period) @@ -310,3 +310,197 @@ def test_take_fill_value(self): with pytest.raises(IndexError): idx.take(np.array([1, -5])) + + def test_get_loc(self): + # GH 17717 + p0 = pd.Period('2017-09-01') + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + + # get the location of p1/p2 from + # monotonic increasing PeriodIndex with non-duplicate + idx0 = pd.PeriodIndex([p0, p1, p2]) + expected_idx1_p1 = 1 + expected_idx1_p2 = 2 + + assert idx0.get_loc(p1) == expected_idx1_p1 + assert idx0.get_loc(str(p1)) == expected_idx1_p1 + assert idx0.get_loc(p2) == expected_idx1_p2 + assert idx0.get_loc(str(p2)) == expected_idx1_p2 + + pytest.raises(tslibs.parsing.DateParseError, idx0.get_loc, 'foo') + pytest.raises(KeyError, idx0.get_loc, 1.1) + pytest.raises(TypeError, idx0.get_loc, idx0) + + # get the location of p1/p2 from + # monotonic increasing PeriodIndex with duplicate + idx1 = pd.PeriodIndex([p1, p1, p2]) + expected_idx1_p1 = slice(0, 2) + expected_idx1_p2 = 2 + + assert idx1.get_loc(p1) == expected_idx1_p1 + assert idx1.get_loc(str(p1)) == expected_idx1_p1 + assert idx1.get_loc(p2) == expected_idx1_p2 + assert idx1.get_loc(str(p2)) == expected_idx1_p2 + + pytest.raises(tslibs.parsing.DateParseError, idx1.get_loc, 'foo') + pytest.raises(KeyError, idx1.get_loc, 1.1) + pytest.raises(TypeError, idx1.get_loc, idx1) + + # get the location of p1/p2 from + # non-monotonic increasing/decreasing PeriodIndex with duplicate + idx2 = pd.PeriodIndex([p2, p1, p2]) + expected_idx2_p1 = 1 + expected_idx2_p2 = np.array([True, False, True]) + + assert idx2.get_loc(p1) == expected_idx2_p1 + assert idx2.get_loc(str(p1)) == expected_idx2_p1 + tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2) + tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2) + + def test_is_monotonic_increasing(self): + # GH 17717 + p0 = pd.Period('2017-09-01') + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + + idx_inc0 = pd.PeriodIndex([p0, p1, p2]) + idx_inc1 = pd.PeriodIndex([p0, p1, p1]) + idx_dec0 = pd.PeriodIndex([p2, p1, p0]) + idx_dec1 = pd.PeriodIndex([p2, p1, p1]) + idx = pd.PeriodIndex([p1, p2, p0]) + + assert idx_inc0.is_monotonic_increasing + assert idx_inc1.is_monotonic_increasing + assert not idx_dec0.is_monotonic_increasing + assert not idx_dec1.is_monotonic_increasing + assert not idx.is_monotonic_increasing + + def test_is_monotonic_decreasing(self): + # GH 17717 + p0 = pd.Period('2017-09-01') + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + + idx_inc0 = pd.PeriodIndex([p0, p1, p2]) + idx_inc1 = pd.PeriodIndex([p0, p1, p1]) + idx_dec0 = pd.PeriodIndex([p2, p1, p0]) + idx_dec1 = pd.PeriodIndex([p2, p1, p1]) + idx = pd.PeriodIndex([p1, p2, p0]) + + assert not idx_inc0.is_monotonic_decreasing + assert not idx_inc1.is_monotonic_decreasing + assert idx_dec0.is_monotonic_decreasing + assert idx_dec1.is_monotonic_decreasing + assert not idx.is_monotonic_decreasing + + def test_is_unique(self): + # GH 17717 + p0 = pd.Period('2017-09-01') + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + + idx0 = pd.PeriodIndex([p0, p1, p2]) + assert idx0.is_unique + + idx1 = pd.PeriodIndex([p1, p1, p2]) + assert not idx1.is_unique + + def test_contains(self): + # GH 17717 + p0 = pd.Period('2017-09-01') + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + p3 = pd.Period('2017-09-04') + + ps0 = [p0, p1, p2] + idx0 = pd.PeriodIndex(ps0) + + for p in ps0: + assert idx0.contains(p) + assert p in idx0 + + assert idx0.contains(str(p)) + assert str(p) in idx0 + + assert idx0.contains('2017-09-01 00:00:01') + assert '2017-09-01 00:00:01' in idx0 + + assert idx0.contains('2017-09') + assert '2017-09' in idx0 + + assert not idx0.contains(p3) + assert p3 not in idx0 + + def test_get_value(self): + # GH 17717 + p0 = pd.Period('2017-09-01') + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + + idx0 = pd.PeriodIndex([p0, p1, p2]) + input0 = np.array([1, 2, 3]) + expected0 = 2 + + result0 = idx0.get_value(input0, p1) + assert result0 == expected0 + + idx1 = pd.PeriodIndex([p1, p1, p2]) + input1 = np.array([1, 2, 3]) + expected1 = np.array([1, 2]) + + result1 = idx1.get_value(input1, p1) + tm.assert_numpy_array_equal(result1, expected1) + + idx2 = pd.PeriodIndex([p1, p2, p1]) + input2 = np.array([1, 2, 3]) + expected2 = np.array([1, 3]) + + result2 = idx2.get_value(input2, p1) + tm.assert_numpy_array_equal(result2, expected2) + + def test_get_indexer(self): + # GH 17717 + p1 = pd.Period('2017-09-01') + p2 = pd.Period('2017-09-04') + p3 = pd.Period('2017-09-07') + + tp0 = pd.Period('2017-08-31') + tp1 = pd.Period('2017-09-02') + tp2 = pd.Period('2017-09-05') + tp3 = pd.Period('2017-09-09') + + idx = pd.PeriodIndex([p1, p2, p3]) + + tm.assert_numpy_array_equal(idx.get_indexer(idx), + np.array([0, 1, 2], dtype=np.intp)) + + target = pd.PeriodIndex([tp0, tp1, tp2, tp3]) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), + np.array([-1, 0, 1, 2], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), + np.array([0, 1, 2, -1], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), + np.array([0, 0, 1, 2], dtype=np.intp)) + + res = idx.get_indexer(target, 'nearest', + tolerance=pd.Timedelta('1 day')) + tm.assert_numpy_array_equal(res, + np.array([0, 0, 1, -1], dtype=np.intp)) + + def test_get_indexer_non_unique(self): + # GH 17717 + p1 = pd.Period('2017-09-02') + p2 = pd.Period('2017-09-03') + p3 = pd.Period('2017-09-04') + p4 = pd.Period('2017-09-05') + + idx1 = pd.PeriodIndex([p1, p2, p1]) + idx2 = pd.PeriodIndex([p2, p1, p3, p4]) + + result = idx1.get_indexer_non_unique(idx2) + expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.int64) + expected_missing = np.array([2, 3], dtype=np.int64) + + tm.assert_numpy_array_equal(result[0], expected_indexer) + tm.assert_numpy_array_equal(result[1], expected_missing) diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index e907b0edd5c6a..b4ff25d2630b8 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -249,3 +249,33 @@ def test_align_series(self): msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)" with tm.assert_raises_regex(period.IncompatibleFrequency, msg): ts + ts.asfreq('D', how="end") + + def test_truncate(self): + # GH 17717 + idx1 = pd.PeriodIndex([ + pd.Period('2017-09-02'), + pd.Period('2017-09-02'), + pd.Period('2017-09-03') + ]) + series1 = pd.Series([1, 2, 3], index=idx1) + result1 = series1.truncate(after='2017-09-02') + + expected_idx1 = pd.PeriodIndex([ + pd.Period('2017-09-02'), + pd.Period('2017-09-02') + ]) + tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1)) + + idx2 = pd.PeriodIndex([ + pd.Period('2017-09-03'), + pd.Period('2017-09-02'), + pd.Period('2017-09-03') + ]) + series2 = pd.Series([1, 2, 3], index=idx2) + result2 = series2.truncate(after='2017-09-02') + + expected_idx2 = pd.PeriodIndex([ + pd.Period('2017-09-03'), + pd.Period('2017-09-02') + ]) + tm.assert_series_equal(result2, pd.Series([1, 2], index=expected_idx2))
- [x] closes #17717 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17755
2017-10-02T23:35:04Z
2017-11-04T17:07:36Z
2017-11-04T17:07:36Z
2017-11-06T13:57:51Z
API: change IntervalIndex.contains to work elementwise
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index 77a87cafb9258..bf9520c54040d 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -335,6 +335,7 @@ A collection of intervals may be stored in an :class:`arrays.IntervalArray`. arrays.IntervalArray.from_arrays arrays.IntervalArray.from_tuples arrays.IntervalArray.from_breaks + arrays.IntervalArray.contains arrays.IntervalArray.overlaps arrays.IntervalArray.set_closed arrays.IntervalArray.to_tuples diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index bbac964e8a201..65860eb5c2f51 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -248,7 +248,6 @@ IntervalIndex components IntervalIndex.from_arrays IntervalIndex.from_tuples IntervalIndex.from_breaks - IntervalIndex.contains IntervalIndex.left IntervalIndex.right IntervalIndex.mid @@ -260,6 +259,7 @@ IntervalIndex components IntervalIndex.get_loc IntervalIndex.get_indexer IntervalIndex.set_closed + IntervalIndex.contains IntervalIndex.overlaps IntervalIndex.to_tuples diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 008f6f0b8643e..48d0675c86d0c 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -615,6 +615,7 @@ Other deprecations - :attr:`Series.imag` and :attr:`Series.real` are deprecated. (:issue:`18262`) - :meth:`Series.put` is deprecated. (:issue:`18262`) - :meth:`Index.item` and :meth:`Series.item` is deprecated. (:issue:`18262`) +- :meth:`Index.contains` is deprecated. Use ``key in index`` (``__contains__``) instead (:issue:`17753`). .. _whatsnew_0250.prior_deprecations: diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index aaa4124182598..8ed28065ee7aa 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -79,6 +79,7 @@ from_arrays from_tuples from_breaks +contains overlaps set_closed to_tuples @@ -1017,6 +1018,52 @@ def repeat(self, repeats, axis=None): right_repeat = self.right.repeat(repeats) return self._shallow_copy(left=left_repeat, right=right_repeat) + _interval_shared_docs['contains'] = """ + Check elementwise if the Intervals contain the value. + + Return a boolean mask whether the value is contained in the Intervals + of the %(klass)s. + + .. versionadded:: 0.25.0 + + Parameters + ---------- + other : scalar + The value to check whether it is contained in the Intervals. + + Returns + ------- + boolean array + + See Also + -------- + Interval.contains : Check whether Interval object contains value. + %(klass)s.overlaps : Check if an Interval overlaps the values in the + %(klass)s. + + Examples + -------- + >>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)]) + >>> intervals + %(klass)s([(0, 1], (1, 3], (2, 4]], + closed='right', + dtype='interval[int64]') + >>> intervals.contains(0.5) + array([ True, False, False]) + """ + + @Appender(_interval_shared_docs['contains'] % _shared_docs_kwargs) + def contains(self, other): + if isinstance(other, Interval): + raise NotImplementedError( + 'contains not implemented for two intervals' + ) + + return ( + (self.left < other if self.open_left else self.left <= other) & + (other < self.right if self.open_right else other <= self.right) + ) + _interval_shared_docs['overlaps'] = """ Check elementwise if an Interval overlaps the values in the %(klass)s. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 4b7582fcf7cc0..f122810a2fe21 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4015,13 +4015,6 @@ def is_type_compatible(self, kind): >>> idx Int64Index([1, 2, 3, 4], dtype='int64') - >>> idx.contains(2) - True - >>> idx.contains(6) - False - - This is equivalent to: - >>> 2 in idx True >>> 6 in idx @@ -4036,8 +4029,21 @@ def __contains__(self, key): except (OverflowError, TypeError, ValueError): return False - @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) def contains(self, key): + """ + Return a boolean indicating whether the provided key is in the index. + + .. deprecated:: 0.25.0 + Use ``key in index`` instead of ``index.contains(key)``. + + Returns + ------- + bool + """ + warnings.warn( + "The 'contains' method is deprecated and will be removed in a " + "future version. Use 'key in index' instead of " + "'index.contains(key)'", FutureWarning, stacklevel=2) return key in self def __hash__(self): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 3d3774ce48e8b..4040d889b91eb 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -385,10 +385,6 @@ def __contains__(self, key): return contains(self, key, container=self._engine) - @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) - def contains(self, key): - return key in self - def __array__(self, dtype=None): """ the array interface, return my values """ return np.array(self._data, dtype=dtype) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 7c90fb11aa1bf..e141f7b5c5b23 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -221,8 +221,6 @@ def __contains__(self, key): except (KeyError, TypeError, ValueError): return False - contains = __contains__ - # Try to run function on index first, and then on elements of index # Especially important for group-by functionality def map(self, mapper, na_action=None): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 777fa2eadd289..9f9ebcf67cee6 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -139,7 +139,7 @@ def func(intvidx_self, other, sort=False): name=_index_doc_kwargs['name'], versionadded="0.20.0", extra_attributes="is_overlapping\nvalues\n", - extra_methods="contains\n", + extra_methods="", examples=textwrap.dedent("""\ Examples -------- @@ -291,27 +291,6 @@ def __contains__(self, key): except KeyError: return False - def contains(self, key): - """ - Return a boolean indicating if the key is IN the index - - We accept / allow keys to be not *just* actual - objects. - - Parameters - ---------- - key : int, float, Interval - - Returns - ------- - boolean - """ - try: - self.get_loc(key) - return True - except KeyError: - return False - @Appender(_interval_shared_docs['to_tuples'] % dict( return_type="Index", examples=""" @@ -1137,6 +1116,10 @@ def equals(self, other): self.right.equals(other.right) and self.closed == other.closed) + @Appender(_interval_shared_docs['contains'] % _index_doc_kwargs) + def contains(self, other): + return self._data.contains(other) + @Appender(_interval_shared_docs['overlaps'] % _index_doc_kwargs) def overlaps(self, other): return self._data.overlaps(other) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a06d304fb5a22..628cf500621d8 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -922,8 +922,6 @@ def __contains__(self, key): except (LookupError, TypeError, ValueError): return False - contains = __contains__ - @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index dc11099c3e903..f61b2e679f0c8 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -433,8 +433,6 @@ def __contains__(self, key): except Exception: return False - contains = __contains__ - @cache_readonly def _int64index(self): return Int64Index._simple_new(self.asi8, name=self.name) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 1539feb2e0856..7e199c6c9f66b 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2393,7 +2393,7 @@ def convert_to_index_sliceable(obj, key): elif isinstance(key, str): # we are an actual column - if obj._data.items.contains(key): + if key in obj._data.items: return None # We might have a datetimelike string that we can translate to a diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index b2f409837344a..a5e9f5902f565 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -753,23 +753,28 @@ def test_contains(self): assert Interval(3, 5) not in i assert Interval(-1, 0, closed='left') not in i - # To be removed, replaced by test_interval_new.py (see #16316, #16386) - def testcontains(self): + def test_contains_method(self): # can select values that are IN the range of a value i = IntervalIndex.from_arrays([0, 1], [1, 2]) - assert i.contains(0.1) - assert i.contains(0.5) - assert i.contains(1) - assert i.contains(Interval(0, 1)) - assert i.contains(Interval(0, 2)) + expected = np.array([False, False], dtype='bool') + actual = i.contains(0) + tm.assert_numpy_array_equal(actual, expected) + actual = i.contains(3) + tm.assert_numpy_array_equal(actual, expected) - # these overlaps completely - assert i.contains(Interval(0, 3)) - assert i.contains(Interval(1, 3)) + expected = np.array([True, False], dtype='bool') + actual = i.contains(0.5) + tm.assert_numpy_array_equal(actual, expected) + actual = i.contains(1) + tm.assert_numpy_array_equal(actual, expected) - assert not i.contains(20) - assert not i.contains(-20) + # __contains__ not implemented for "interval in interval", follow + # that for the contains method for now + with pytest.raises( + NotImplementedError, + match='contains not implemented for two'): + i.contains(Interval(0, 1)) def test_dropna(self, closed): @@ -939,11 +944,9 @@ def test_datetime(self, tz): assert iv_false not in index # .contains does check individual points - assert not index.contains(Timestamp('2000-01-01', tz=tz)) - assert index.contains(Timestamp('2000-01-01T12', tz=tz)) - assert index.contains(Timestamp('2000-01-02', tz=tz)) - assert index.contains(iv_true) - assert not index.contains(iv_false) + assert not index.contains(Timestamp('2000-01-01', tz=tz)).any() + assert index.contains(Timestamp('2000-01-01T12', tz=tz)).any() + assert index.contains(Timestamp('2000-01-02', tz=tz)).any() # test get_indexer start = Timestamp('1999-12-31T12:00', tz=tz) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 0801b36402870..27a690e58b70f 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -464,19 +464,13 @@ def test_contains(self): idx0 = pd.PeriodIndex(ps0) for p in ps0: - assert idx0.contains(p) assert p in idx0 - - assert idx0.contains(str(p)) assert str(p) in idx0 - assert idx0.contains('2017-09-01 00:00:01') assert '2017-09-01 00:00:01' in idx0 - assert idx0.contains('2017-09') assert '2017-09' in idx0 - assert not idx0.contains(p3) assert p3 not in idx0 def test_get_value(self): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 1de20dc765655..fc55887a933f8 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2156,6 +2156,11 @@ def test_tab_complete_warning(self, ip): with provisionalcompleter('ignore'): list(ip.Completer.completions('idx.', 4)) + def test_deprecated_contains(self): + for index in self.indices.values(): + with tm.assert_produces_warning(FutureWarning): + index.contains(1) + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 5f7f10e881ced..de756bf720a81 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -271,10 +271,12 @@ def test_cached_data(self): 91 in idx assert idx._cached_data is None - idx.contains(90) + with tm.assert_produces_warning(FutureWarning): + idx.contains(90) assert idx._cached_data is None - idx.contains(91) + with tm.assert_produces_warning(FutureWarning): + idx.contains(91) assert idx._cached_data is None idx.all()
xref https://github.com/pandas-dev/pandas/issues/16316 This is more a proof-of-concept, to see if there is agreement on the behaviour change (I suppose the implementation itself can be easily vectorized based on the left/right attributes). cc @zfrenchee @shoyer @buyology @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/17753
2017-10-02T21:06:26Z
2019-07-01T23:56:50Z
2019-07-01T23:56:50Z
2019-07-02T12:41:30Z
CI: Unpin Miniconda for CI
diff --git a/appveyor.yml b/appveyor.yml index f1259f271ee39..a1f8886f6d068 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -59,7 +59,7 @@ install: # install our build environment - cmd: conda config --set show_channel_urls true --set always_yes true --set changeps1 false - # - cmd: conda update -q conda + - cmd: conda update -q conda - cmd: conda config --set ssl_verify false # add the pandas channel *before* defaults to have defaults take priority diff --git a/ci/install.ps1 b/ci/install.ps1 index b784b4ebf5e6a..64ec7f81884cd 100644 --- a/ci/install.ps1 +++ b/ci/install.ps1 @@ -7,7 +7,7 @@ $MINICONDA_URL = "http://repo.continuum.io/miniconda/" function DownloadMiniconda ($python_version, $platform_suffix) { $webclient = New-Object System.Net.WebClient - $filename = "Miniconda3-4.3.21-Windows-" + $platform_suffix + ".exe" + $filename = "Miniconda3-latest-Windows-" + $platform_suffix + ".exe" $url = $MINICONDA_URL + $filename $basedir = $pwd.Path + "\" @@ -85,7 +85,7 @@ function UpdateConda ($python_home) { function main () { InstallMiniconda "3.5" $env:PYTHON_ARCH $env:CONDA_ROOT - # UpdateConda $env:CONDA_ROOT + UpdateConda $env:CONDA_ROOT InstallCondaPackages $env:CONDA_ROOT "conda-build jinja2 anaconda-client" } diff --git a/ci/install_circle.sh b/ci/install_circle.sh index eba98be561397..fd79f907625e9 100755 --- a/ci/install_circle.sh +++ b/ci/install_circle.sh @@ -10,9 +10,7 @@ echo "[Using clean Miniconda install]" rm -rf "$MINICONDA_DIR" # install miniconda -# wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 -# Pin miniconda -wget https://repo.continuum.io/miniconda/Miniconda2-4.3.21-Linux-x86_64.sh -q -O miniconda.sh || exit 1 +wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 export PATH="$MINICONDA_DIR/bin:$PATH" @@ -20,7 +18,7 @@ export PATH="$MINICONDA_DIR/bin:$PATH" echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set always_yes true --set changeps1 false || exit 1 -# conda update -q conda +conda update -q conda # add the pandas channel to take priority # to add extra packages diff --git a/ci/install_travis.sh b/ci/install_travis.sh index faf404ddcd293..b85263daa1eac 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -34,13 +34,9 @@ fi # install miniconda if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - # temporarily pin miniconda - # time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 - time wget https://repo.continuum.io/miniconda/Miniconda2-4.3.21-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 else - # temporarily pin miniconda - # time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 - time wget https://repo.continuum.io/miniconda/Miniconda2-4.3.21-Linux-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 fi time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 @@ -52,7 +48,7 @@ echo echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 -# conda update -q conda +conda update -q conda echo echo "[add channels]" diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run index 7152cb2c8b605..a68e1d256058d 100644 --- a/ci/requirements-2.7.run +++ b/ci/requirements-2.7.run @@ -8,7 +8,7 @@ matplotlib openpyxl=1.6.2 xlrd=0.9.2 sqlalchemy=0.9.6 -lxml=3.2.1 +lxml scipy xlsxwriter=0.5.2 s3fs diff --git a/ci/requirements-2.7_LOCALE.run b/ci/requirements-2.7_LOCALE.run index 00006106f7009..978bbf6a051c5 100644 --- a/ci/requirements-2.7_LOCALE.run +++ b/ci/requirements-2.7_LOCALE.run @@ -8,5 +8,5 @@ xlrd=0.9.2 bottleneck=1.0.0 matplotlib=1.4.3 sqlalchemy=0.8.1 -lxml=3.2.1 +lxml scipy diff --git a/ci/requirements-2.7_WIN.run b/ci/requirements-2.7_WIN.run index a81542ee5006c..c4ca7fc736bb1 100644 --- a/ci/requirements-2.7_WIN.run +++ b/ci/requirements-2.7_WIN.run @@ -8,7 +8,7 @@ matplotlib openpyxl xlrd sqlalchemy -lxml=3.2.1 +lxml scipy xlsxwriter s3fs
Closes https://github.com/pandas-dev/pandas/issues/17696 xref https://github.com/pandas-dev/pandas/pull/17700
https://api.github.com/repos/pandas-dev/pandas/pulls/17752
2017-10-02T20:46:33Z
2017-10-05T18:37:16Z
2017-10-05T18:37:16Z
2017-10-27T12:04:46Z
DOC: Changing forking instructions to https (#16419)
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index e172d0d2a71a2..d8d57a8bfffdd 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -101,7 +101,7 @@ You will need your own fork to work on the code. Go to the `pandas project page <https://github.com/pandas-dev/pandas>`_ and hit the ``Fork`` button. You will want to clone your fork to your machine:: - git clone git@github.com:your-user-name/pandas.git pandas-yourname + git clone https://github.com/your-user-name/pandas.git pandas-yourname cd pandas-yourname git remote add upstream git://github.com/pandas-dev/pandas.git
- [ ] closes #16419
https://api.github.com/repos/pandas-dev/pandas/pulls/17751
2017-10-02T20:40:12Z
2017-10-03T01:21:41Z
2017-10-03T01:21:41Z
2017-10-03T19:06:49Z
Explicitly define cmp_pandas_datetimestruct
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index ffb901981f939..f8254ed9d8418 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -269,8 +269,8 @@ static void set_datetimestruct_days(npy_int64 days, /* * Compares two pandas_datetimestruct objects chronologically */ -int cmp_pandas_datetimestruct(pandas_datetimestruct *a, - pandas_datetimestruct *b) { +int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, + const pandas_datetimestruct *b) { if (a->year > b->year) { return 1; } else if (a->year < b->year) { diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index a20bff60126aa..af3d2e0f01c1b 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -99,6 +99,14 @@ convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta, npy_int64 get_datetimestruct_days(const pandas_datetimestruct *dts); + +/* + * Compares two pandas_datetimestruct objects chronologically + */ +int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, + const pandas_datetimestruct *b); + + /* * Adjusts a datetimestruct based on a minutes offset. Assumes * the current values are valid. diff --git a/setup.py b/setup.py index 793aa089e708f..80be007ba2115 100755 --- a/setup.py +++ b/setup.py @@ -511,7 +511,7 @@ def pxd(name): 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['join']}, '_libs.reshape': {'pyxfile': '_libs/reshape', - 'depends': _pxi_dep['reshape'], 'include': []}, + 'depends': _pxi_dep['reshape']}, '_libs.interval': {'pyxfile': '_libs/interval', 'pxdfiles': ['_libs/hashtable'], 'depends': _pxi_dep['interval']}, @@ -527,7 +527,7 @@ def pxd(name): 'pandas/_libs/src/parser/io.c']}, '_libs.sparse': {'pyxfile': '_libs/sparse', 'depends': (['pandas/_libs/sparse.pyx'] + - _pxi_dep['sparse']), 'include': []}, + _pxi_dep['sparse'])}, '_libs.testing': {'pyxfile': '_libs/testing', 'depends': ['pandas/_libs/testing.pyx']}, '_libs.hashing': {'pyxfile': '_libs/hashing',
Fixes build warning ``` pandas/_libs/tslib.c:78967:17: warning: implicit declaration of function 'cmp_pandas_datetimestruct' is invalid in C99 [-Wimplicit-function-declaration] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/17750
2017-10-02T19:44:17Z
2017-10-03T19:57:06Z
2017-10-03T19:57:06Z
2017-10-30T16:25:26Z
Adding lxml to requirements_dev.txt
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 6fc080c8d9090..effa5ef4184be 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -953,6 +953,7 @@ def test_importcheck_thread_safety(): # see gh-16928 # force import check by reinitalising global vars in html.py + pytest.importorskip('lxml') reload(pandas.io.html) filename = os.path.join(DATA_PATH, 'valid_markup.html')
- [X ] closes #17747 - [X ] tests added / passed - [X ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/17748
2017-10-02T18:28:05Z
2017-11-01T01:23:10Z
2017-11-01T01:23:10Z
2017-11-01T01:23:13Z
Move frequencies functions to cython
diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd index 974eb4ab45df0..98d600c540ace 100644 --- a/pandas/_libs/tslibs/frequencies.pxd +++ b/pandas/_libs/tslibs/frequencies.pxd @@ -1,4 +1,10 @@ # -*- coding: utf-8 -*- # cython: profile=False +cpdef object get_rule_month(object source, object default=*) + cpdef get_freq_code(freqstr) +cpdef object get_freq(object freq) +cpdef str get_base_alias(freqstr) +cpdef int get_to_timestamp_base(int base) +cpdef str get_freq_str(base, mult=*) diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 2a700d52eaaf3..cce3600371300 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -6,9 +6,12 @@ cimport cython import numpy as np cimport numpy as np +from numpy cimport int64_t np.import_array() -from util cimport is_integer_object +from util cimport is_integer_object, is_string_object + +from ccalendar import MONTH_NUMBERS # ---------------------------------------------------------------------- # Constants @@ -23,6 +26,22 @@ _INVALID_FREQ_ERROR = "Invalid frequency: {0}" # --------------------------------------------------------------------- # Period codes + +class FreqGroup(object): + FR_ANN = 1000 + FR_QTR = 2000 + FR_MTH = 3000 + FR_WK = 4000 + FR_BUS = 5000 + FR_DAY = 6000 + FR_HR = 7000 + FR_MIN = 8000 + FR_SEC = 9000 + FR_MS = 10000 + FR_US = 11000 + FR_NS = 12000 + + # period frequency constants corresponding to scikits timeseries # originals _period_code_map = { @@ -125,8 +144,8 @@ cpdef get_freq_code(freqstr): ------- return : tuple of base frequency code and stride (mult) - Example - ------- + Examples + -------- >>> get_freq_code('3D') (6000, 3) @@ -203,3 +222,292 @@ cpdef _period_str_to_code(freqstr): return _period_code_map[freqstr] except KeyError: raise ValueError(_INVALID_FREQ_ERROR.format(freqstr)) + + +cpdef str get_freq_str(base, mult=1): + """ + Return the summary string associated with this offset code, possibly + adjusted by a multiplier. + + Parameters + ---------- + base : int (member of FreqGroup) + + Returns + ------- + freq_str : str + + Examples + -------- + >>> get_freq_str(1000) + 'A-DEC' + + >>> get_freq_str(2000, 2) + '2Q-DEC' + + >>> get_freq_str("foo") + """ + code = _reverse_period_code_map.get(base) + if mult == 1: + return code + return str(mult) + code + + +cpdef str get_base_alias(freqstr): + """ + Returns the base frequency alias, e.g., '5D' -> 'D' + + Parameters + ---------- + freqstr : str + + Returns + ------- + base_alias : str + """ + return _base_and_stride(freqstr)[0] + + +cpdef int get_to_timestamp_base(int base): + """ + Return frequency code group used for base of to_timestamp against + frequency code. + + Parameters + ---------- + base : int (member of FreqGroup) + + Returns + ------- + base : int + + Examples + -------- + # Return day freq code against longer freq than day + >>> get_to_timestamp_base(get_freq_code('D')[0]) + 6000 + >>> get_to_timestamp_base(get_freq_code('W')[0]) + 6000 + >>> get_to_timestamp_base(get_freq_code('M')[0]) + 6000 + + # Return second freq code against hour between second + >>> get_to_timestamp_base(get_freq_code('H')[0]) + 9000 + >>> get_to_timestamp_base(get_freq_code('S')[0]) + 9000 + """ + if base < FreqGroup.FR_BUS: + return FreqGroup.FR_DAY + elif FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC: + return FreqGroup.FR_SEC + return base + + +cpdef object get_freq(object freq): + """ + Return frequency code of given frequency str. + If input is not string, return input as it is. + + Examples + -------- + >>> get_freq('A') + 1000 + + >>> get_freq('3A') + 1000 + """ + if is_string_object(freq): + base, mult = get_freq_code(freq) + freq = base + return freq + + +# ---------------------------------------------------------------------- +# Frequency comparison + +cpdef bint is_subperiod(source, target): + """ + Returns True if downsampling is possible between source and target + frequencies + + Parameters + ---------- + source : string or DateOffset + Frequency converting from + target : string or DateOffset + Frequency converting to + + Returns + ------- + is_subperiod : boolean + """ + + if target is None or source is None: + return False + source = _maybe_coerce_freq(source) + target = _maybe_coerce_freq(target) + + if _is_annual(target): + if _is_quarterly(source): + return _quarter_months_conform(get_rule_month(source), + get_rule_month(target)) + return source in {'D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'} + elif _is_quarterly(target): + return source in {'D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'} + elif _is_monthly(target): + return source in {'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif _is_weekly(target): + return source in {target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif target == 'B': + return source in {'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif target == 'C': + return source in {'C', 'H', 'T', 'S', 'L', 'U', 'N'} + elif target == 'D': + return source in {'D', 'H', 'T', 'S', 'L', 'U', 'N'} + elif target == 'H': + return source in {'H', 'T', 'S', 'L', 'U', 'N'} + elif target == 'T': + return source in {'T', 'S', 'L', 'U', 'N'} + elif target == 'S': + return source in {'S', 'L', 'U', 'N'} + elif target == 'L': + return source in {'L', 'U', 'N'} + elif target == 'U': + return source in {'U', 'N'} + elif target == 'N': + return source in {'N'} + + +cpdef bint is_superperiod(source, target): + """ + Returns True if upsampling is possible between source and target + frequencies + + Parameters + ---------- + source : string + Frequency converting from + target : string + Frequency converting to + + Returns + ------- + is_superperiod : boolean + """ + if target is None or source is None: + return False + source = _maybe_coerce_freq(source) + target = _maybe_coerce_freq(target) + + if _is_annual(source): + if _is_annual(target): + return get_rule_month(source) == get_rule_month(target) + + if _is_quarterly(target): + smonth = get_rule_month(source) + tmonth = get_rule_month(target) + return _quarter_months_conform(smonth, tmonth) + return target in {'D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'} + elif _is_quarterly(source): + return target in {'D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'} + elif _is_monthly(source): + return target in {'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif _is_weekly(source): + return target in {source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif source == 'B': + return target in {'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif source == 'C': + return target in {'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif source == 'D': + return target in {'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'} + elif source == 'H': + return target in {'H', 'T', 'S', 'L', 'U', 'N'} + elif source == 'T': + return target in {'T', 'S', 'L', 'U', 'N'} + elif source == 'S': + return target in {'S', 'L', 'U', 'N'} + elif source == 'L': + return target in {'L', 'U', 'N'} + elif source == 'U': + return target in {'U', 'N'} + elif source == 'N': + return target in {'N'} + + +cdef str _maybe_coerce_freq(code): + """ we might need to coerce a code to a rule_code + and uppercase it + + Parameters + ---------- + source : string or DateOffset + Frequency converting from + + Returns + ------- + code : string + """ + assert code is not None + if getattr(code, '_typ', None) == 'dateoffset': + # i.e. isinstance(code, ABCDateOffset): + code = code.rule_code + return code.upper() + + +cdef bint _quarter_months_conform(str source, str target): + snum = MONTH_NUMBERS[source] + tnum = MONTH_NUMBERS[target] + return snum % 3 == tnum % 3 + + +cdef bint _is_annual(str rule): + rule = rule.upper() + return rule == 'A' or rule.startswith('A-') + + +cdef bint _is_quarterly(str rule): + rule = rule.upper() + return rule == 'Q' or rule.startswith('Q-') or rule.startswith('BQ') + + +cdef bint _is_monthly(str rule): + rule = rule.upper() + return rule == 'M' or rule == 'BM' + + +cdef bint _is_weekly(str rule): + rule = rule.upper() + return rule == 'W' or rule.startswith('W-') + + +# ---------------------------------------------------------------------- + +cpdef object get_rule_month(object source, object default='DEC'): + """ + Return starting month of given freq, default is December. + + Parameters + ---------- + source : object + default : object (default "DEC") + + Returns + ------- + rule_month: object (usually string) + + Examples + -------- + >>> get_rule_month('D') + 'DEC' + + >>> get_rule_month('A-JAN') + 'JAN' + """ + if hasattr(source, 'freqstr'): + source = source.freqstr + source = source.upper() + if '-' not in source: + return default + else: + return source.split('-')[1] diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 42570e355e2bf..46365035a0b9a 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -28,14 +28,16 @@ cimport util from util cimport is_period_object, is_string_object, INT32_MIN from pandas._libs.missing cimport is_null_datetimelike + from timestamps import Timestamp from timezones cimport is_utc, is_tzlocal, get_utcoffset, get_dst_info from timedeltas cimport delta_to_nanoseconds from ccalendar import MONTH_NUMBERS -from parsing import (parse_time_string, NAT_SENTINEL, - _get_rule_month) -from frequencies cimport get_freq_code +from frequencies cimport (get_freq_code, get_base_alias, + get_to_timestamp_base, get_freq_str, + get_rule_month) +from parsing import parse_time_string, NAT_SENTINEL from resolution import resolution, Resolution from nattype import nat_strings, NaT, iNaT from nattype cimport _nat_scalar_rules, NPY_NAT @@ -572,7 +574,7 @@ cdef class _Period(object): if isinstance(freq, (int, tuple)): code, stride = get_freq_code(freq) - freq = frequencies._get_freq_str(code, stride) + freq = get_freq_str(code, stride) freq = frequencies.to_offset(freq) @@ -630,7 +632,7 @@ cdef class _Period(object): raise IncompatibleFrequency(msg.format(self.freqstr)) elif isinstance(other, offsets.DateOffset): freqstr = other.rule_code - base = frequencies.get_base_alias(freqstr) + base = get_base_alias(freqstr) if base == self.freq.rule_code: ordinal = self.ordinal + other.n return Period(ordinal=ordinal, freq=self.freq) @@ -756,7 +758,7 @@ cdef class _Period(object): if freq is None: base, mult = get_freq_code(self.freq) - freq = frequencies.get_to_timestamp_base(base) + freq = get_to_timestamp_base(base) base, mult = get_freq_code(freq) val = self.asfreq(freq, how) @@ -1149,7 +1151,7 @@ def _quarter_to_myear(year, quarter, freq): if quarter <= 0 or quarter > 4: raise ValueError('Quarter must be 1 <= q <= 4') - mnum = MONTH_NUMBERS[_get_rule_month(freq)] + 1 + mnum = MONTH_NUMBERS[get_rule_month(freq)] + 1 month = (mnum + (quarter - 1) * 3) % 12 + 1 if month > mnum: year -= 1 diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 9cb2c450524fb..6eb867377bf54 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -23,7 +23,7 @@ from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info, get_utcoffset) from fields import build_field_sarray from conversion import tz_convert -from ccalendar import DAYS, MONTH_ALIASES, int_to_weekday +from ccalendar import MONTH_ALIASES, int_to_weekday from pandas._libs.properties import cache_readonly from pandas._libs.tslib import Timestamp diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 41d0dd38cd5f6..a7a6e3caab727 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -161,7 +161,7 @@ def test_round(self): tm.assert_index_equal(rng.round(freq='H'), expected_rng) assert elt.round(freq='H') == expected_elt - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): rng.round(freq='foo') with tm.assert_raises_regex(ValueError, msg): diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 9df23948ae627..0e72cadb5d494 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -5,10 +5,11 @@ import pandas.util.testing as tm import pandas.core.indexes.period as period from pandas.compat import lrange -from pandas.tseries.frequencies import get_freq -from pandas._libs.tslibs.ccalendar import MONTHS +from pandas._libs.tslibs.frequencies import get_freq from pandas._libs.tslibs.period import period_ordinal, period_asfreq +from pandas._libs.tslibs.ccalendar import MONTHS + from pandas import (PeriodIndex, Period, DatetimeIndex, Timestamp, Series, date_range, to_datetime, period_range) @@ -369,7 +370,7 @@ def test_to_period_monthish(self): prng = rng.to_period() assert prng.freq == 'M' - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): date_range('01-Jan-2012', periods=8, freq='EOM') diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index fac3745ba4fb4..081e299caa876 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -114,7 +114,7 @@ def test_round(self): tm.assert_index_equal(td.round(freq='H'), expected_rng) assert elt.round(freq='H') == expected_elt - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): td.round(freq='foo') with tm.assert_raises_regex(ValueError, msg): diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index 792eb0d49077f..ce733829c2315 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -514,7 +514,7 @@ def test_period_deprecated_freq(self): "U": ["MICROSECOND", "MICROSECONDLY", "microsecond"], "N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]} - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR for exp, freqs in iteritems(cases): for freq in freqs: with tm.assert_raises_regex(ValueError, msg): @@ -758,7 +758,7 @@ def test_properties_weekly_legacy(self): exp = Period(freq='W', year=2012, month=2, day=1) assert exp.days_in_month == 29 - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): Period(freq='WK', year=2007, month=1, day=7) diff --git a/pandas/tests/scalar/test_period_asfreq.py b/pandas/tests/scalar/test_period_asfreq.py index 32cea60c333b7..a2819a3478f79 100644 --- a/pandas/tests/scalar/test_period_asfreq.py +++ b/pandas/tests/scalar/test_period_asfreq.py @@ -1,7 +1,7 @@ import pandas as pd from pandas import Period, offsets from pandas.util import testing as tm -from pandas.tseries.frequencies import _period_code_map +from pandas._libs.tslibs.frequencies import _period_code_map class TestFreqConversion(object): @@ -293,13 +293,13 @@ def test_conv_weekly(self): assert ival_W.asfreq('W') == ival_W - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): ival_W.asfreq('WK') def test_conv_weekly_legacy(self): # frequency conversion tests: from Weekly Frequency - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): Period(freq='WK', year=2007, month=1, day=1) @@ -706,7 +706,7 @@ def test_asfreq_MS(self): assert initial.asfreq(freq="M", how="S") == Period('2013-01', 'M') - msg = pd.tseries.frequencies._INVALID_FREQ_ERROR + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): initial.asfreq(freq="MS", how="S") diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 4f4f2648d3834..a3e9a0442ea0b 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -16,9 +16,12 @@ import pandas.util.testing as tm import pandas.util._test_decorators as td -from pandas.tseries import offsets, frequencies -from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz + +from pandas.tseries import offsets + from pandas._libs.tslibs import conversion, period +from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz +from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR from pandas.compat import long, PY3 from pandas.util.testing import assert_series_equal @@ -753,8 +756,7 @@ def _check_round(freq, expected): ('S', Timestamp('2000-01-05 05:09:15'))]: _check_round(freq, expected) - msg = frequencies._INVALID_FREQ_ERROR - with tm.assert_raises_regex(ValueError, msg): + with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR): stamp.round('foo') def test_class_ops_pytz(self): diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index f71480e1f83a5..c084cccbb74ac 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -10,7 +10,8 @@ import pandas.util.testing as tm from pandas import Timestamp -from pandas.tseries.frequencies import get_offset, _INVALID_FREQ_ERROR +from pandas.tseries.frequencies import get_offset +from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR from pandas.tseries.offsets import FY5253Quarter, FY5253 from pandas._libs.tslibs.offsets import WeekDay diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 1a032182319f2..23e627aeba017 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -11,8 +11,9 @@ from pandas.compat.numpy import np_datetime64_compat from pandas.core.series import Series -from pandas.tseries.frequencies import (_offset_map, get_freq_code, get_offset, - _get_freq_str, _INVALID_FREQ_ERROR) +from pandas._libs.tslibs.frequencies import (get_freq_code, get_freq_str, + _INVALID_FREQ_ERROR) +from pandas.tseries.frequencies import _offset_map, get_offset from pandas.core.indexes.datetimes import ( _to_m8, DatetimeIndex, _daterange_cache) import pandas._libs.tslibs.offsets as liboffsets @@ -2825,7 +2826,7 @@ def test_rule_code(self): code, stride = get_freq_code('3' + k) assert isinstance(code, int) assert stride == 3 - assert k == _get_freq_str(code) + assert k == get_freq_str(code) def test_dateoffset_misc(): diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py index 2486895086b2f..92d7eb15c929c 100644 --- a/pandas/tests/tseries/test_frequencies.py +++ b/pandas/tests/tseries/test_frequencies.py @@ -7,6 +7,9 @@ from pandas import (Index, DatetimeIndex, Timestamp, Series, date_range, period_range) +from pandas._libs.tslibs.frequencies import (_period_code_map, + _INVALID_FREQ_ERROR) +from pandas._libs.tslibs.ccalendar import MONTHS from pandas._libs.tslibs import resolution import pandas.tseries.frequencies as frequencies from pandas.core.tools.datetimes import to_datetime @@ -284,87 +287,6 @@ def test_rule_aliases(): assert rule == offsets.Micro(10) -def test_get_rule_month(): - result = frequencies._get_rule_month('W') - assert (result == 'DEC') - result = frequencies._get_rule_month(offsets.Week()) - assert (result == 'DEC') - - result = frequencies._get_rule_month('D') - assert (result == 'DEC') - result = frequencies._get_rule_month(offsets.Day()) - assert (result == 'DEC') - - result = frequencies._get_rule_month('Q') - assert (result == 'DEC') - result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12)) - print(result == 'DEC') - - result = frequencies._get_rule_month('Q-JAN') - assert (result == 'JAN') - result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1)) - assert (result == 'JAN') - - result = frequencies._get_rule_month('A-DEC') - assert (result == 'DEC') - result = frequencies._get_rule_month('Y-DEC') - assert (result == 'DEC') - result = frequencies._get_rule_month(offsets.YearEnd()) - assert (result == 'DEC') - - result = frequencies._get_rule_month('A-MAY') - assert (result == 'MAY') - result = frequencies._get_rule_month('Y-MAY') - assert (result == 'MAY') - result = frequencies._get_rule_month(offsets.YearEnd(month=5)) - assert (result == 'MAY') - - -def test_period_str_to_code(): - assert (frequencies._period_str_to_code('A') == 1000) - assert (frequencies._period_str_to_code('A-DEC') == 1000) - assert (frequencies._period_str_to_code('A-JAN') == 1001) - assert (frequencies._period_str_to_code('Y') == 1000) - assert (frequencies._period_str_to_code('Y-DEC') == 1000) - assert (frequencies._period_str_to_code('Y-JAN') == 1001) - - assert (frequencies._period_str_to_code('Q') == 2000) - assert (frequencies._period_str_to_code('Q-DEC') == 2000) - assert (frequencies._period_str_to_code('Q-FEB') == 2002) - - def _assert_depr(freq, expected, aliases): - assert isinstance(aliases, list) - assert (frequencies._period_str_to_code(freq) == expected) - - msg = frequencies._INVALID_FREQ_ERROR - for alias in aliases: - with tm.assert_raises_regex(ValueError, msg): - frequencies._period_str_to_code(alias) - - _assert_depr("M", 3000, ["MTH", "MONTH", "MONTHLY"]) - - assert (frequencies._period_str_to_code('W') == 4000) - assert (frequencies._period_str_to_code('W-SUN') == 4000) - assert (frequencies._period_str_to_code('W-FRI') == 4005) - - _assert_depr("B", 5000, ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]) - _assert_depr("D", 6000, ["DAY", "DLY", "DAILY"]) - _assert_depr("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]) - - _assert_depr("T", 8000, ["minute", "MINUTE", "MINUTELY"]) - assert (frequencies._period_str_to_code('Min') == 8000) - - _assert_depr("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]) - _assert_depr("L", 10000, ["MILLISECOND", "MILLISECONDLY"]) - assert (frequencies._period_str_to_code('ms') == 10000) - - _assert_depr("U", 11000, ["MICROSECOND", "MICROSECONDLY"]) - assert (frequencies._period_str_to_code('US') == 11000) - - _assert_depr("N", 12000, ["NANOSECOND", "NANOSECONDLY"]) - assert (frequencies._period_str_to_code('NS') == 12000) - - class TestFrequencyCode(object): def test_freq_code(self): @@ -380,7 +302,7 @@ def test_freq_code(self): assert frequencies.get_freq('W-MON') == 4001 assert frequencies.get_freq('W-FRI') == 4005 - for freqstr, code in compat.iteritems(frequencies._period_code_map): + for freqstr, code in compat.iteritems(_period_code_map): result = frequencies.get_freq(freqstr) assert result == code @@ -875,40 +797,10 @@ def test_legacy_offset_warnings(self): 'WOM@4THU', 'WOM@1FRI', 'WOM@2FRI', 'WOM@3FRI', 'WOM@4FRI'] - msg = frequencies._INVALID_FREQ_ERROR + msg = _INVALID_FREQ_ERROR for freq in freqs: with tm.assert_raises_regex(ValueError, msg): frequencies.get_offset(freq) with tm.assert_raises_regex(ValueError, msg): date_range('2011-01-01', periods=5, freq=freq) - - -MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', - 'NOV', 'DEC'] - - -def test_is_superperiod_subperiod(): - - # input validation - assert not (frequencies.is_superperiod(offsets.YearEnd(), None)) - assert not (frequencies.is_subperiod(offsets.MonthEnd(), None)) - assert not (frequencies.is_superperiod(None, offsets.YearEnd())) - assert not (frequencies.is_subperiod(None, offsets.MonthEnd())) - assert not (frequencies.is_superperiod(None, None)) - assert not (frequencies.is_subperiod(None, None)) - - assert (frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd())) - assert (frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd())) - - assert (frequencies.is_superperiod(offsets.Hour(), offsets.Minute())) - assert (frequencies.is_subperiod(offsets.Minute(), offsets.Hour())) - - assert (frequencies.is_superperiod(offsets.Second(), offsets.Milli())) - assert (frequencies.is_subperiod(offsets.Milli(), offsets.Second())) - - assert (frequencies.is_superperiod(offsets.Milli(), offsets.Micro())) - assert (frequencies.is_subperiod(offsets.Micro(), offsets.Milli())) - - assert (frequencies.is_superperiod(offsets.Micro(), offsets.Nano())) - assert (frequencies.is_subperiod(offsets.Nano(), offsets.Micro())) diff --git a/pandas/tests/tseries/test_libfrequencies.py b/pandas/tests/tseries/test_libfrequencies.py new file mode 100644 index 0000000000000..601d542da3095 --- /dev/null +++ b/pandas/tests/tseries/test_libfrequencies.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +import pandas.util.testing as tm + +from pandas.tseries import offsets +from pandas._libs.tslibs.frequencies import (get_rule_month, + _period_str_to_code, + _INVALID_FREQ_ERROR, + is_superperiod, is_subperiod) + + +def assert_aliases_deprecated(freq, expected, aliases): + assert isinstance(aliases, list) + assert (_period_str_to_code(freq) == expected) + + for alias in aliases: + with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR): + _period_str_to_code(alias) + + +def test_get_rule_month(): + result = get_rule_month('W') + assert (result == 'DEC') + result = get_rule_month(offsets.Week()) + assert (result == 'DEC') + + result = get_rule_month('D') + assert (result == 'DEC') + result = get_rule_month(offsets.Day()) + assert (result == 'DEC') + + result = get_rule_month('Q') + assert (result == 'DEC') + result = get_rule_month(offsets.QuarterEnd(startingMonth=12)) + + result = get_rule_month('Q-JAN') + assert (result == 'JAN') + result = get_rule_month(offsets.QuarterEnd(startingMonth=1)) + assert (result == 'JAN') + + result = get_rule_month('A-DEC') + assert (result == 'DEC') + result = get_rule_month('Y-DEC') + assert (result == 'DEC') + result = get_rule_month(offsets.YearEnd()) + assert (result == 'DEC') + + result = get_rule_month('A-MAY') + assert (result == 'MAY') + result = get_rule_month('Y-MAY') + assert (result == 'MAY') + result = get_rule_month(offsets.YearEnd(month=5)) + assert (result == 'MAY') + + +def test_period_str_to_code(): + assert (_period_str_to_code('A') == 1000) + assert (_period_str_to_code('A-DEC') == 1000) + assert (_period_str_to_code('A-JAN') == 1001) + assert (_period_str_to_code('Y') == 1000) + assert (_period_str_to_code('Y-DEC') == 1000) + assert (_period_str_to_code('Y-JAN') == 1001) + + assert (_period_str_to_code('Q') == 2000) + assert (_period_str_to_code('Q-DEC') == 2000) + assert (_period_str_to_code('Q-FEB') == 2002) + + assert_aliases_deprecated("M", 3000, ["MTH", "MONTH", "MONTHLY"]) + + assert (_period_str_to_code('W') == 4000) + assert (_period_str_to_code('W-SUN') == 4000) + assert (_period_str_to_code('W-FRI') == 4005) + + assert_aliases_deprecated("B", 5000, ["BUS", "BUSINESS", + "BUSINESSLY", "WEEKDAY"]) + assert_aliases_deprecated("D", 6000, ["DAY", "DLY", "DAILY"]) + assert_aliases_deprecated("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]) + + assert_aliases_deprecated("T", 8000, ["minute", "MINUTE", "MINUTELY"]) + assert (_period_str_to_code('Min') == 8000) + + assert_aliases_deprecated("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]) + assert_aliases_deprecated("L", 10000, ["MILLISECOND", "MILLISECONDLY"]) + assert (_period_str_to_code('ms') == 10000) + + assert_aliases_deprecated("U", 11000, ["MICROSECOND", "MICROSECONDLY"]) + assert (_period_str_to_code('US') == 11000) + + assert_aliases_deprecated("N", 12000, ["NANOSECOND", "NANOSECONDLY"]) + assert (_period_str_to_code('NS') == 12000) + + +def test_is_superperiod_subperiod(): + + # input validation + assert not (is_superperiod(offsets.YearEnd(), None)) + assert not (is_subperiod(offsets.MonthEnd(), None)) + assert not (is_superperiod(None, offsets.YearEnd())) + assert not (is_subperiod(None, offsets.MonthEnd())) + assert not (is_superperiod(None, None)) + assert not (is_subperiod(None, None)) + + assert (is_superperiod(offsets.YearEnd(), offsets.MonthEnd())) + assert (is_subperiod(offsets.MonthEnd(), offsets.YearEnd())) + + assert (is_superperiod(offsets.Hour(), offsets.Minute())) + assert (is_subperiod(offsets.Minute(), offsets.Hour())) + + assert (is_superperiod(offsets.Second(), offsets.Milli())) + assert (is_subperiod(offsets.Milli(), offsets.Second())) + + assert (is_superperiod(offsets.Milli(), offsets.Micro())) + assert (is_subperiod(offsets.Micro(), offsets.Milli())) + + assert (is_superperiod(offsets.Micro(), offsets.Nano())) + assert (is_subperiod(offsets.Nano(), offsets.Micro())) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 4d1dd422be946..0cffd818202ed 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -13,37 +13,22 @@ is_datetime64_dtype) from pandas.tseries.offsets import DateOffset -import pandas.tseries.offsets as offsets from pandas._libs.tslib import Timedelta -from pandas._libs.tslibs.frequencies import ( # noqa - get_freq_code, _base_and_stride, _period_str_to_code, - _INVALID_FREQ_ERROR, opattern, _lite_rule_alias, _dont_uppercase, - _period_code_map, _reverse_period_code_map) + +import pandas._libs.tslibs.frequencies as libfreqs +from pandas._libs.tslibs.frequencies import ( # noqa, semi-public API + get_freq, get_base_alias, get_to_timestamp_base, get_freq_code, + FreqGroup, + is_subperiod, is_superperiod) + from pandas._libs.tslibs.resolution import (Resolution, _FrequencyInferer, _TimedeltaFrequencyInferer) -from pandas._libs.tslibs.parsing import _get_rule_month -from pandas._libs.tslibs.ccalendar import MONTH_NUMBERS from pytz import AmbiguousTimeError -class FreqGroup(object): - FR_ANN = 1000 - FR_QTR = 2000 - FR_MTH = 3000 - FR_WK = 4000 - FR_BUS = 5000 - FR_DAY = 6000 - FR_HR = 7000 - FR_MIN = 8000 - FR_SEC = 9000 - FR_MS = 10000 - FR_US = 11000 - FR_NS = 12000 - - RESO_NS = 0 RESO_US = 1 RESO_MS = 2 @@ -52,61 +37,6 @@ class FreqGroup(object): RESO_HR = 5 RESO_DAY = 6 - -def get_to_timestamp_base(base): - """ - Return frequency code group used for base of to_timestamp against - frequency code. - - Example - ------- - # Return day freq code against longer freq than day - >>> get_to_timestamp_base(get_freq_code('D')[0]) - 6000 - >>> get_to_timestamp_base(get_freq_code('W')[0]) - 6000 - >>> get_to_timestamp_base(get_freq_code('M')[0]) - 6000 - - # Return second freq code against hour between second - >>> get_to_timestamp_base(get_freq_code('H')[0]) - 9000 - >>> get_to_timestamp_base(get_freq_code('S')[0]) - 9000 - """ - if base < FreqGroup.FR_BUS: - return FreqGroup.FR_DAY - if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC: - return FreqGroup.FR_SEC - return base - - -def get_freq(freq): - """ - Return frequency code of given frequency str. - If input is not string, return input as it is. - - Example - ------- - >>> get_freq('A') - 1000 - - >>> get_freq('3A') - 1000 - """ - if isinstance(freq, compat.string_types): - base, mult = get_freq_code(freq) - freq = base - return freq - - -def _get_freq_str(base, mult=1): - code = _reverse_period_code_map.get(base) - if mult == 1: - return code - return str(mult) + code - - # --------------------------------------------------------------------- # Offset names ("time rules") and related functions @@ -195,7 +125,7 @@ def to_offset(freq): stride = freq[1] if isinstance(stride, compat.string_types): name, stride = stride, name - name, _ = _base_and_stride(name) + name, _ = libfreqs._base_and_stride(name) delta = get_offset(name) * stride elif isinstance(freq, timedelta): @@ -212,13 +142,13 @@ def to_offset(freq): else: delta = delta + offset except Exception: - raise ValueError(_INVALID_FREQ_ERROR.format(freq)) + raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(freq)) else: delta = None stride_sign = None try: - splitted = re.split(opattern, freq) + splitted = re.split(libfreqs.opattern, freq) if splitted[-1] != '' and not splitted[-1].isspace(): # the last element must be blank raise ValueError('last element must be blank') @@ -226,7 +156,7 @@ def to_offset(freq): splitted[2::4]): if sep != '' and not sep.isspace(): raise ValueError('separator must be spaces') - prefix = _lite_rule_alias.get(name) or name + prefix = libfreqs._lite_rule_alias.get(name) or name if stride_sign is None: stride_sign = -1 if stride.startswith('-') else 1 if not stride: @@ -243,21 +173,14 @@ def to_offset(freq): else: delta = delta + offset except Exception: - raise ValueError(_INVALID_FREQ_ERROR.format(freq)) + raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(freq)) if delta is None: - raise ValueError(_INVALID_FREQ_ERROR.format(freq)) + raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(freq)) return delta -def get_base_alias(freqstr): - """ - Returns the base frequency alias, e.g., '5D' -> 'D' - """ - return _base_and_stride(freqstr)[0] - - def get_offset(name): """ Return DateOffset object associated with rule name @@ -266,12 +189,12 @@ def get_offset(name): -------- get_offset('EOM') --> BMonthEnd(1) """ - if name not in _dont_uppercase: + if name not in libfreqs._dont_uppercase: name = name.upper() - name = _lite_rule_alias.get(name, name) - name = _lite_rule_alias.get(name.lower(), name) + name = libfreqs._lite_rule_alias.get(name, name) + name = libfreqs._lite_rule_alias.get(name.lower(), name) else: - name = _lite_rule_alias.get(name, name) + name = libfreqs._lite_rule_alias.get(name, name) if name not in _offset_map: try: @@ -282,7 +205,7 @@ def get_offset(name): offset = klass._from_name(*split[1:]) except (ValueError, TypeError, KeyError): # bad prefix or suffix - raise ValueError(_INVALID_FREQ_ERROR.format(name)) + raise ValueError(libfreqs._INVALID_FREQ_ERROR.format(name)) # cache _offset_map[name] = offset # do not return cache because it's mutable @@ -345,158 +268,3 @@ def infer_freq(index, warn=True): inferer = _FrequencyInferer(index, warn=warn) return inferer.get_freq() - - -def _maybe_coerce_freq(code): - """ we might need to coerce a code to a rule_code - and uppercase it - - Parameters - ---------- - source : string - Frequency converting from - - Returns - ------- - string code - """ - - assert code is not None - if isinstance(code, offsets.DateOffset): - code = code.rule_code - return code.upper() - - -def is_subperiod(source, target): - """ - Returns True if downsampling is possible between source and target - frequencies - - Parameters - ---------- - source : string - Frequency converting from - target : string - Frequency converting to - - Returns - ------- - is_subperiod : boolean - """ - - if target is None or source is None: - return False - source = _maybe_coerce_freq(source) - target = _maybe_coerce_freq(target) - - if _is_annual(target): - if _is_quarterly(source): - return _quarter_months_conform(_get_rule_month(source), - _get_rule_month(target)) - return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] - elif _is_quarterly(target): - return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] - elif _is_monthly(target): - return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif _is_weekly(target): - return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif target == 'B': - return source in ['B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif target == 'C': - return source in ['C', 'H', 'T', 'S', 'L', 'U', 'N'] - elif target == 'D': - return source in ['D', 'H', 'T', 'S', 'L', 'U', 'N'] - elif target == 'H': - return source in ['H', 'T', 'S', 'L', 'U', 'N'] - elif target == 'T': - return source in ['T', 'S', 'L', 'U', 'N'] - elif target == 'S': - return source in ['S', 'L', 'U', 'N'] - elif target == 'L': - return source in ['L', 'U', 'N'] - elif target == 'U': - return source in ['U', 'N'] - elif target == 'N': - return source in ['N'] - - -def is_superperiod(source, target): - """ - Returns True if upsampling is possible between source and target - frequencies - - Parameters - ---------- - source : string - Frequency converting from - target : string - Frequency converting to - - Returns - ------- - is_superperiod : boolean - """ - if target is None or source is None: - return False - source = _maybe_coerce_freq(source) - target = _maybe_coerce_freq(target) - - if _is_annual(source): - if _is_annual(target): - return _get_rule_month(source) == _get_rule_month(target) - - if _is_quarterly(target): - smonth = _get_rule_month(source) - tmonth = _get_rule_month(target) - return _quarter_months_conform(smonth, tmonth) - return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] - elif _is_quarterly(source): - return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] - elif _is_monthly(source): - return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif _is_weekly(source): - return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif source == 'B': - return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif source == 'C': - return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif source == 'D': - return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] - elif source == 'H': - return target in ['H', 'T', 'S', 'L', 'U', 'N'] - elif source == 'T': - return target in ['T', 'S', 'L', 'U', 'N'] - elif source == 'S': - return target in ['S', 'L', 'U', 'N'] - elif source == 'L': - return target in ['L', 'U', 'N'] - elif source == 'U': - return target in ['U', 'N'] - elif source == 'N': - return target in ['N'] - - -def _is_annual(rule): - rule = rule.upper() - return rule == 'A' or rule.startswith('A-') - - -def _quarter_months_conform(source, target): - snum = MONTH_NUMBERS[source] - tnum = MONTH_NUMBERS[target] - return snum % 3 == tnum % 3 - - -def _is_quarterly(rule): - rule = rule.upper() - return rule == 'Q' or rule.startswith('Q-') or rule.startswith('BQ') - - -def _is_monthly(rule): - rule = rule.upper() - return rule == 'M' or rule == 'BM' - - -def _is_weekly(rule): - rule = rule.upper() - return rule == 'W' or rule.startswith('W-')
Move some more functions from `tseries.frequencies` up to `tslibs.frequencies`, update imports in `_libs.period` to cimports - [x] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17746
2017-10-02T17:26:27Z
2018-01-05T00:35:20Z
2018-01-05T00:35:20Z
2018-01-23T04:40:38Z
Remove unused imports
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 9aeb700dd5923..b6b81055f89b2 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -50,12 +50,6 @@ cnp.import_ufunc() cdef int64_t iNaT = util.get_nat() _SIZE_HINT_LIMIT = (1 << 20) + 7 -cdef extern from "datetime.h": - bint PyDateTime_Check(object o) - void PyDateTime_IMPORT() - -PyDateTime_IMPORT - cdef size_t _INIT_VEC_CAP = 128 diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 503badd0ca8bc..e7e92b7ae987a 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -48,7 +48,6 @@ cdef double NAN = nan # this is our tseries.pxd from datetime cimport ( get_timedelta64_value, get_datetime64_value, - npy_timedelta, npy_datetime, PyDateTime_Check, PyDate_Check, PyTime_Check, PyDelta_Check, PyDateTime_IMPORT) @@ -62,11 +61,8 @@ from interval import Interval cdef int64_t NPY_NAT = util.get_nat() -ctypedef unsigned char UChar - cimport util -from util cimport (is_array, _checknull, _checknan, INT64_MAX, - INT64_MIN, UINT8_MAX) +from util cimport is_array, _checknull, _checknan cdef extern from "math.h": double sqrt(double x) diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 725da22104efc..7760df5144117 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -7,8 +7,7 @@ from cpython cimport ( PyObject_RichCompareBool, Py_EQ, Py_NE) -from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray, - NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA) +from numpy cimport int64_t, import_array, ndarray import numpy as np import_array() @@ -23,12 +22,11 @@ from datetime cimport ( pandas_datetimestruct, pandas_datetimestruct_to_datetime, pandas_datetime_to_datetimestruct, - PANDAS_FR_ns, - INT32_MIN) + PANDAS_FR_ns) cimport util -from util cimport is_period_object, is_string_object +from util cimport is_period_object, is_string_object, INT32_MIN from lib cimport is_null_datetimelike from pandas._libs import tslib @@ -90,12 +88,8 @@ cdef extern from "period_helper.h": int microseconds, int picoseconds, int freq) nogil except INT32_MIN - int64_t get_python_ordinal(int64_t period_ordinal, - int freq) except INT32_MIN - int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil except INT32_MIN - double getAbsTime(int, int64_t, int64_t) int pyear(int64_t ordinal, int freq) except INT32_MIN int pqyear(int64_t ordinal, int freq) except INT32_MIN diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index ff20ea287bd9d..745632cf3d719 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -21,7 +21,6 @@ from cpython cimport ( cdef extern from "Python.h": cdef PyTypeObject *Py_TYPE(object) -# this is our datetime.pxd from libc.stdlib cimport free from util cimport (is_integer_object, is_float_object, is_datetime64_object, @@ -65,11 +64,8 @@ from .tslibs.parsing import parse_datetime_string cimport cython -import time - from pandas.compat import iteritems, callable -import operator import collections import warnings @@ -933,10 +929,6 @@ cdef int64_t _NS_UPPER_BOUND = INT64_MAX # use the smallest value with a 0 nanosecond unit (0s in last 3 digits) cdef int64_t _NS_LOWER_BOUND = -9223372036854775000 -cdef pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS -pandas_datetime_to_datetimestruct(_NS_LOWER_BOUND, PANDAS_FR_ns, &_NS_MIN_DTS) -pandas_datetime_to_datetimestruct(_NS_UPPER_BOUND, PANDAS_FR_ns, &_NS_MAX_DTS) - # Resolution is in nanoseconds Timestamp.min = Timestamp(_NS_LOWER_BOUND) Timestamp.max = Timestamp(_NS_UPPER_BOUND)
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17745
2017-10-02T16:56:29Z
2017-10-03T11:25:46Z
2017-10-03T11:25:46Z
2017-10-30T16:25:22Z
DEPR: deprecate raise_on_error in .where/.mask in favor of errors=
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 66b44d4d391e1..e86eae0a5e593 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -666,8 +666,9 @@ Deprecations - ``pd.TimeGrouper`` is deprecated in favor of :class:`pandas.Grouper` (:issue:`16747`) - ``cdate_range`` has been deprecated in favor of :func:`bdate_range`, which has gained ``weekmask`` and ``holidays`` parameters for building custom frequency date ranges. See the :ref:`documentation <timeseries.custom-freq-ranges>` for more details (:issue:`17596`) - passing ``categories`` or ``ordered`` kwargs to :func:`Series.astype` is deprecated, in favor of passing a :ref:`CategoricalDtype <whatsnew_0210.enhancements.categorical_dtype>` (:issue:`17636`) -- Passing a non-existent column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) - ``.get_value`` and ``.set_value`` on ``Series``, ``DataFrame``, ``Panel``, ``SparseSeries``, and ``SparseDataFrame`` are deprecated in favor of using ``.iat[]`` or ``.at[]`` accessors (:issue:`15269`) +- Passing a non-existant column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) +- ``raise_on_error`` parameter to :func:`Series.where`, :func:`Series.mask`, :func:`DataFrame.where`, :func:`DataFrame.mask` is deprecated, in favor of ``errors=`` (:issue:`14968`) .. _whatsnew_0210.deprecations.select: diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 2196fb5917a44..c74da6379e32f 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -56,7 +56,7 @@ def set_numexpr_threads(n=None): ne.set_num_threads(n) -def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs): +def _evaluate_standard(op, op_str, a, b, **eval_kwargs): """ standard evaluation """ if _TEST_MODE: _store_test_result(False) @@ -89,7 +89,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): return False -def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True, +def _evaluate_numexpr(op, op_str, a, b, truediv=True, reversed=False, **eval_kwargs): result = None @@ -111,25 +111,22 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True, except ValueError as detail: if 'unknown type object' in str(detail): pass - except Exception as detail: - if raise_on_error: - raise if _TEST_MODE: _store_test_result(result is not None) if result is None: - result = _evaluate_standard(op, op_str, a, b, raise_on_error) + result = _evaluate_standard(op, op_str, a, b) return result -def _where_standard(cond, a, b, raise_on_error=True): +def _where_standard(cond, a, b): return np.where(_values_from_object(cond), _values_from_object(a), _values_from_object(b)) -def _where_numexpr(cond, a, b, raise_on_error=False): +def _where_numexpr(cond, a, b): result = None if _can_use_numexpr(None, 'where', a, b, 'where'): @@ -147,11 +144,10 @@ def _where_numexpr(cond, a, b, raise_on_error=False): if 'unknown type object' in str(detail): pass except Exception as detail: - if raise_on_error: - raise TypeError(str(detail)) + raise TypeError(str(detail)) if result is None: - result = _where_standard(cond, a, b, raise_on_error) + result = _where_standard(cond, a, b) return result @@ -189,7 +185,7 @@ def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')), return True -def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, +def evaluate(op, op_str, a, b, use_numexpr=True, **eval_kwargs): """ evaluate and return the expression of the op on a and b @@ -200,19 +196,16 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, op_str: the string version of the op a : left operand b : right operand - raise_on_error : pass the error to the higher level if indicated - (default is False), otherwise evaluate the op with and - return the results use_numexpr : whether to try to use numexpr (default True) """ + use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: - return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, - **eval_kwargs) - return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error) + return _evaluate(op, op_str, a, b, **eval_kwargs) + return _evaluate_standard(op, op_str, a, b) -def where(cond, a, b, raise_on_error=False, use_numexpr=True): +def where(cond, a, b, use_numexpr=True): """ evaluate the where condition cond on a and b Parameters @@ -221,15 +214,12 @@ def where(cond, a, b, raise_on_error=False, use_numexpr=True): cond : a boolean array a : return if cond is True b : return if cond is False - raise_on_error : pass the error to the higher level if indicated - (default is False), otherwise evaluate the op with and - return the results use_numexpr : whether to try to use numexpr (default True) """ if use_numexpr: - return _where(cond, a, b, raise_on_error=raise_on_error) - return _where_standard(cond, a, b, raise_on_error=raise_on_error) + return _where(cond, a, b) + return _where_standard(cond, a, b) def set_test_mode(v=True): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a77c002b625cb..142ccf1f034bc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3862,9 +3862,9 @@ def _combine_match_columns(self, other, func, level=None, try_cast=try_cast) return self._constructor(new_data) - def _combine_const(self, other, func, raise_on_error=True, try_cast=True): + def _combine_const(self, other, func, errors='raise', try_cast=True): new_data = self._data.eval(func=func, other=other, - raise_on_error=raise_on_error, + errors=errors, try_cast=try_cast) return self._constructor(new_data) @@ -4035,8 +4035,7 @@ def combiner(x, y, needs_i8_conversion=False): else: mask = isna(x_values) - return expressions.where(mask, y_values, x_values, - raise_on_error=True) + return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) @@ -4091,8 +4090,7 @@ def update(self, other, join='left', overwrite=True, filter_func=None, if mask.all(): continue - self[col] = expressions.where(mask, this, that, - raise_on_error=True) + self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Misc methods diff --git a/pandas/core/generic.py b/pandas/core/generic.py index eecdd8a6109e9..942a9ff279092 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5758,7 +5758,7 @@ def _align_series(self, other, join='outer', axis=None, level=None, return left.__finalize__(self), right.__finalize__(other) def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, - try_cast=False, raise_on_error=True): + errors='raise', try_cast=False): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. @@ -5887,7 +5887,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, else: new_data = self._data.where(other=other, cond=cond, align=align, - raise_on_error=raise_on_error, + errors=errors, try_cast=try_cast, axis=block_axis, transpose=self._AXIS_REVERSED) @@ -5924,12 +5924,21 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, Whether to perform the operation in place on the data axis : alignment axis if needed, default None level : alignment level if needed, default None + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Note that currently this parameter won't affect + the results and will always coerce to a suitable dtype. + try_cast : boolean, default False try to cast the result back to the input type (if possible), raise_on_error : boolean, default True Whether to raise on invalid data types (e.g. trying to where on strings) + .. deprecated:: 0.21.0 + Returns ------- wh : same type as caller @@ -6005,24 +6014,46 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, cond_rev="False", name='where', name_other='mask')) def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, - try_cast=False, raise_on_error=True): + errors='raise', try_cast=False, raise_on_error=None): + + if raise_on_error is not None: + warnings.warn( + "raise_on_error is deprecated in " + "favor of errors='raise|ignore'", + FutureWarning, stacklevel=2) + + if raise_on_error: + errors = 'raise' + else: + errors = 'ignore' other = com._apply_if_callable(other, self) - return self._where(cond, other, inplace, axis, level, try_cast, - raise_on_error) + return self._where(cond, other, inplace, axis, level, + errors=errors, try_cast=try_cast) @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False", cond_rev="True", name='mask', name_other='where')) def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, - try_cast=False, raise_on_error=True): + errors='raise', try_cast=False, raise_on_error=None): + + if raise_on_error is not None: + warnings.warn( + "raise_on_error is deprecated in " + "favor of errors='raise|ignore'", + FutureWarning, stacklevel=2) + + if raise_on_error: + errors = 'raise' + else: + errors = 'ignore' inplace = validate_bool_kwarg(inplace, 'inplace') cond = com._apply_if_callable(cond, self) return self.where(~cond, other=other, inplace=inplace, axis=axis, level=level, try_cast=try_cast, - raise_on_error=raise_on_error) + errors=errors) _shared_docs['shift'] = (""" Shift index by desired number of periods with an optional time freq diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 90de4ded18f8c..a8f1a0c78c238 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -533,10 +533,16 @@ def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): **kwargs) def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, mgr=None, raise_on_error=False, **kwargs): + klass=None, mgr=None, **kwargs): """ - Coerce to the new type (if copy=True, return a new copy) - raise on an except if raise == True + Coerce to the new type + + dtype : str, dtype convertible + copy : boolean, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'ignore' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object """ errors_legal_values = ('raise', 'ignore') @@ -1248,7 +1254,7 @@ def shift(self, periods, axis=0, mgr=None): return [self.make_block(new_values, fastpath=True)] - def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None): + def eval(self, func, other, errors='raise', try_cast=False, mgr=None): """ evaluate the block; return result block from the result @@ -1256,8 +1262,10 @@ def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None): ---------- func : how to combine self, other other : a ndarray/object - raise_on_error : if True, raise when I can't perform the function, - False by default (and just return the data that we had coming in) + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + try_cast : try casting the results to the input type Returns @@ -1295,7 +1303,7 @@ def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None): except TypeError: block = self.coerce_to_target_dtype(orig_other) return block.eval(func, orig_other, - raise_on_error=raise_on_error, + errors=errors, try_cast=try_cast, mgr=mgr) # get the result, may need to transpose the other @@ -1337,7 +1345,7 @@ def get_result(other): # error handler if we have an issue operating with the function def handle_error(): - if raise_on_error: + if errors == 'raise': # The 'detail' variable is defined in outer scope. raise TypeError('Could not operate %s with block values %s' % (repr(other), str(detail))) # noqa @@ -1383,7 +1391,7 @@ def handle_error(): result = _block_shape(result, ndim=self.ndim) return [self.make_block(result, fastpath=True, )] - def where(self, other, cond, align=True, raise_on_error=True, + def where(self, other, cond, align=True, errors='raise', try_cast=False, axis=0, transpose=False, mgr=None): """ evaluate the block; return result block(s) from the result @@ -1393,8 +1401,10 @@ def where(self, other, cond, align=True, raise_on_error=True, other : a ndarray/object cond : the condition to respect align : boolean, perform alignment on other/cond - raise_on_error : if True, raise when I can't perform the function, - False by default (and just return the data that we had coming in) + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + axis : int transpose : boolean Set to True if self is stored with axes reversed @@ -1404,6 +1414,7 @@ def where(self, other, cond, align=True, raise_on_error=True, a new block(s), the result of the func """ import pandas.core.computation.expressions as expressions + assert errors in ['raise', 'ignore'] values = self.values orig_other = other @@ -1436,9 +1447,9 @@ def func(cond, values, other): try: return self._try_coerce_result(expressions.where( - cond, values, other, raise_on_error=True)) + cond, values, other)) except Exception as detail: - if raise_on_error: + if errors == 'raise': raise TypeError('Could not operate [%s] with block values ' '[%s]' % (repr(other), str(detail))) else: @@ -1454,10 +1465,10 @@ def func(cond, values, other): except TypeError: # we cannot coerce, return a compat dtype - # we are explicity ignoring raise_on_error here + # we are explicity ignoring errors block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, align=align, - raise_on_error=raise_on_error, + errors=errors, try_cast=try_cast, axis=axis, transpose=transpose) return self._maybe_downcast(blocks, 'infer') @@ -2745,7 +2756,7 @@ def sp_index(self): def kind(self): return self.values.kind - def _astype(self, dtype, copy=False, raise_on_error=True, values=None, + def _astype(self, dtype, copy=False, errors='raise', values=None, klass=None, mgr=None, **kwargs): if values is None: values = self.values diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 506b9267f32b4..f0bd2477eec07 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -671,8 +671,7 @@ def na_op(x, y): import pandas.core.computation.expressions as expressions try: - result = expressions.evaluate(op, str_rep, x, y, - raise_on_error=True, **eval_kwargs) + result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: if isinstance(y, (np.ndarray, ABCSeries, pd.Index)): dtype = find_common_type([x.dtype, y.dtype]) @@ -1196,8 +1195,7 @@ def na_op(x, y): import pandas.core.computation.expressions as expressions try: - result = expressions.evaluate(op, str_rep, x, y, - raise_on_error=True, **eval_kwargs) + result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: xrav = x.ravel() if isinstance(y, (np.ndarray, ABCSeries)): @@ -1329,7 +1327,7 @@ def f(self, other): # straight boolean comparisions we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. res = self._combine_const(other, func, - raise_on_error=False, + errors='ignore', try_cast=False) return res.fillna(True).astype(bool) @@ -1354,8 +1352,7 @@ def na_op(x, y): import pandas.core.computation.expressions as expressions try: - result = expressions.evaluate(op, str_rep, x, y, - raise_on_error=True, **eval_kwargs) + result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: # TODO: might need to find_common_type here? @@ -1385,8 +1382,7 @@ def na_op(x, y): import pandas.core.computation.expressions as expressions try: - result = expressions.evaluate(op, str_rep, x, y, - raise_on_error=True) + result = expressions.evaluate(op, str_rep, x, y) except TypeError: xrav = x.ravel() result = np.empty(x.size, dtype=bool) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index fad6a39223b9e..14fba9560cae2 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1527,7 +1527,7 @@ def na_op(x, y): try: result = expressions.evaluate(op, str_rep, x, y, - raise_on_error=True, + errors='raise', **eval_kwargs) except TypeError: result = op(x, y) diff --git a/pandas/core/series.py b/pandas/core/series.py index 43b7f1d043e4d..49b6a6651367b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -253,7 +253,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, # create/copy the manager if isinstance(data, SingleBlockManager): if dtype is not None: - data = data.astype(dtype=dtype, raise_on_error=False, + data = data.astype(dtype=dtype, errors='ignore', copy=copy) elif copy: data = data.copy() diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index a43aad9a0204e..1b45b180b8dc1 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -638,7 +638,7 @@ def _combine_match_columns(self, other, func, level=None, fill_value=None, new_data, index=self.index, columns=union, default_fill_value=self.default_fill_value).__finalize__(self) - def _combine_const(self, other, func, raise_on_error=True, try_cast=True): + def _combine_const(self, other, func, errors='raise', try_cast=True): return self._apply_columns(lambda x: func(x, other)) def _reindex_index(self, index, method, copy, level, fill_value=np.nan, diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 0ca319565e24b..75ae47ed2fdc1 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -1096,6 +1096,17 @@ def test_take(self): with tm.assert_produces_warning(FutureWarning): s.take([-1, 3, 4], convert=False) + def test_where_raise_on_error_deprecation(self): + + # gh-14968 + # deprecation of raise_on_error + s = Series(np.random.randn(5)) + cond = s > 0 + with tm.assert_produces_warning(FutureWarning): + s.where(cond, raise_on_error=True) + with tm.assert_produces_warning(FutureWarning): + s.mask(cond, raise_on_error=True) + def test_where(self): s = Series(np.random.randn(5)) cond = s > 0 diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 01bf7274fd384..bd4e8b23f31b4 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -292,15 +292,15 @@ def test_fillna_consistency(self): dtype='object') assert_series_equal(result, expected) - # where (we ignore the raise_on_error) + # where (we ignore the errors=) result = s.where([True, False], Timestamp('20130101', tz='US/Eastern'), - raise_on_error=False) + errors='ignore') assert_series_equal(result, expected) result = s.where([True, False], Timestamp('20130101', tz='US/Eastern'), - raise_on_error=True) + errors='ignore') assert_series_equal(result, expected) # with a non-datetime diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 2b972477ae999..6d2607962dfb0 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -124,6 +124,7 @@ def run_binary(self, df, other, assert_func, test_flex=False, expr._MIN_ELEMENTS = 0 expr.set_test_mode(True) operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne'] + for arith in operations: if test_flex: op = lambda x, y: getattr(df, arith)(y)
closes #14968
https://api.github.com/repos/pandas-dev/pandas/pulls/17744
2017-10-02T16:24:11Z
2017-10-05T18:36:29Z
2017-10-05T18:36:29Z
2017-10-05T18:38:05Z
DEPR: passing categories or ordered kwargs to Series.astype is deprecated
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index d69a5c22acc03..28d4be7fd1850 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -572,6 +572,7 @@ Deprecations - :func:`DataFrame.as_blocks` is deprecated, as this is exposing the internal implementation (:issue:`17302`) - ``pd.TimeGrouper`` is deprecated in favor of :class:`pandas.Grouper` (:issue:`16747`) - ``cdate_range`` has been deprecated in favor of :func:`bdate_range`, which has gained ``weekmask`` and ``holidays`` parameters for building custom frequency date ranges. See the :ref:`documentation <timeseries.custom-freq-ranges>` for more details (:issue:`17596`) +- passing ``categories`` or ``ordered`` kwargs to :func:`Series.astype` is deprecated, in favor of passing a :ref:`CategoricalDtype <whatsnew_0210.enhancements.categorical_dtype>` (:issue:`17636`) .. _whatsnew_0210.deprecations.argmin_min: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 9e348819ce5a3..71c752c328402 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1,3 +1,4 @@ +import warnings import copy from warnings import catch_warnings import itertools @@ -548,12 +549,20 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, # may need to convert to categorical # this is only called for non-categoricals if self.is_categorical_astype(dtype): - if (('categories' in kwargs or 'ordered' in kwargs) and - isinstance(dtype, CategoricalDtype)): - raise TypeError("Cannot specify a CategoricalDtype and also " - "`categories` or `ordered`. Use " - "`dtype=CategoricalDtype(categories, ordered)`" - " instead.") + + # deprecated 17636 + if ('categories' in kwargs or 'ordered' in kwargs): + if isinstance(dtype, CategoricalDtype): + raise TypeError( + "Cannot specify a CategoricalDtype and also " + "`categories` or `ordered`. Use " + "`dtype=CategoricalDtype(categories, ordered)`" + " instead.") + warnings.warn("specifying 'categories' or 'ordered' in " + ".astype() is deprecated; pass a " + "CategoricalDtype instead", + FutureWarning, stacklevel=7) + kwargs = kwargs.copy() categories = getattr(dtype, 'categories', None) ordered = getattr(dtype, 'ordered', False) diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index e6f823bf6fac2..a98439797dc28 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -8,6 +8,7 @@ import pandas as pd from pandas.compat import lrange +from pandas.api.types import CategoricalDtype from pandas import (DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex) @@ -513,7 +514,7 @@ def test_sort_index_categorical_index(self): df = (DataFrame({'A': np.arange(6, dtype='int64'), 'B': Series(list('aabbca')) - .astype('category', categories=list('cab'))}) + .astype(CategoricalDtype(list('cab')))}) .set_index('B')) result = df.sort_index() diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 3099c02e4aabd..b20c1817e5671 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -186,6 +186,16 @@ def test_astype_dict_like(self, dtype_class): with pytest.raises(KeyError): s.astype(dt5) + def test_astype_categories_deprecation(self): + + # deprecated 17636 + s = Series(['a', 'b', 'a']) + expected = s.astype(CategoricalDtype(['a', 'b'], ordered=True)) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s.astype('category', categories=['a', 'b'], ordered=True) + tm.assert_series_equal(result, expected) + def test_astype_categoricaldtype(self): s = Series(['a', 'b', 'a']) result = s.astype(CategoricalDtype(['a', 'b'], ordered=True)) diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index 128a4cdd845e6..e45acdedbd2a9 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -7,7 +7,8 @@ from numpy import nan import numpy as np -from pandas import (Series, date_range, NaT) +from pandas import Series, date_range, NaT +from pandas.api.types import CategoricalDtype from pandas.compat import product from pandas.util.testing import assert_series_equal @@ -123,35 +124,25 @@ def test_rank_categorical(self): exp_desc = Series([6., 5., 4., 3., 2., 1.]) ordered = Series( ['first', 'second', 'third', 'fourth', 'fifth', 'sixth'] - ).astype( - 'category', - categories=['first', 'second', 'third', - 'fourth', 'fifth', 'sixth'], - ordered=True - ) + ).astype(CategoricalDtype(categories=['first', 'second', 'third', + 'fourth', 'fifth', 'sixth'], + ordered=True)) assert_series_equal(ordered.rank(), exp) assert_series_equal(ordered.rank(ascending=False), exp_desc) # Unordered categoricals should be ranked as objects - unordered = Series( - ['first', 'second', 'third', 'fourth', 'fifth', 'sixth'], - ).astype( - 'category', - categories=['first', 'second', 'third', - 'fourth', 'fifth', 'sixth'], - ordered=False - ) + unordered = Series(['first', 'second', 'third', 'fourth', + 'fifth', 'sixth']).astype( + CategoricalDtype(categories=['first', 'second', 'third', + 'fourth', 'fifth', 'sixth'], + ordered=False)) exp_unordered = Series([2., 4., 6., 3., 1., 5.]) res = unordered.rank() assert_series_equal(res, exp_unordered) unordered1 = Series( [1, 2, 3, 4, 5, 6], - ).astype( - 'category', - categories=[1, 2, 3, 4, 5, 6], - ordered=False - ) + ).astype(CategoricalDtype([1, 2, 3, 4, 5, 6], False)) exp_unordered1 = Series([1., 2., 3., 4., 5., 6.]) res1 = unordered1.rank() assert_series_equal(res1, exp_unordered1) @@ -159,14 +150,8 @@ def test_rank_categorical(self): # Test na_option for rank data na_ser = Series( ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', np.NaN] - ).astype( - 'category', - categories=[ - 'first', 'second', 'third', 'fourth', - 'fifth', 'sixth', 'seventh' - ], - ordered=True - ) + ).astype(CategoricalDtype(['first', 'second', 'third', 'fourth', + 'fifth', 'sixth', 'seventh'], True)) exp_top = Series([2., 3., 4., 5., 6., 7., 1.]) exp_bot = Series([1., 2., 3., 4., 5., 6., 7.]) @@ -195,13 +180,8 @@ def test_rank_categorical(self): ) # Test with pct=True - na_ser = Series( - ['first', 'second', 'third', 'fourth', np.NaN], - ).astype( - 'category', - categories=['first', 'second', 'third', 'fourth'], - ordered=True - ) + na_ser = Series(['first', 'second', 'third', 'fourth', np.NaN]).astype( + CategoricalDtype(['first', 'second', 'third', 'fourth'], True)) exp_top = Series([0.4, 0.6, 0.8, 1., 0.2]) exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.]) exp_keep = Series([0.25, 0.5, 0.75, 1., np.NaN]) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index d43901ea091b7..df32437a03f04 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -84,17 +84,17 @@ def test_getitem_category_type(self): # get slice result = s.iloc[0:2] - expected = pd.Series([1, 2]).astype('category', categories=[1, 2, 3]) + expected = pd.Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) # get list of indexes result = s.iloc[[0, 1]] - expected = pd.Series([1, 2]).astype('category', categories=[1, 2, 3]) + expected = pd.Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) # get boolean array result = s.iloc[[True, False, False]] - expected = pd.Series([1]).astype('category', categories=[1, 2, 3]) + expected = pd.Series([1]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) def test_setitem(self): @@ -2042,12 +2042,12 @@ def test_creation_astype(self): l = ["a", "b", "c", "a"] s = pd.Series(l) exp = pd.Series(Categorical(l, ordered=True)) - res = s.astype('category', ordered=True) + res = s.astype(CategoricalDtype(None, ordered=True)) tm.assert_series_equal(res, exp) exp = pd.Series(Categorical( l, categories=list('abcdef'), ordered=True)) - res = s.astype('category', categories=list('abcdef'), ordered=True) + res = s.astype(CategoricalDtype(list('abcdef'), ordered=True)) tm.assert_series_equal(res, exp) def test_construction_series(self): @@ -4228,11 +4228,11 @@ def test_concat_preserve(self): b = Series(list('aabbca')) df2 = DataFrame({'A': a, - 'B': b.astype('category', categories=list('cab'))}) + 'B': b.astype(CategoricalDtype(list('cab')))}) res = pd.concat([df2, df2]) - exp = DataFrame({'A': pd.concat([a, a]), - 'B': pd.concat([b, b]).astype( - 'category', categories=list('cab'))}) + exp = DataFrame( + {'A': pd.concat([a, a]), + 'B': pd.concat([b, b]).astype(CategoricalDtype(list('cab')))}) tm.assert_frame_equal(res, exp) def test_categorical_index_preserver(self): @@ -4241,13 +4241,13 @@ def test_categorical_index_preserver(self): b = Series(list('aabbca')) df2 = DataFrame({'A': a, - 'B': b.astype('category', categories=list('cab')) + 'B': b.astype(CategoricalDtype(list('cab'))) }).set_index('B') result = pd.concat([df2, df2]) - expected = DataFrame({'A': pd.concat([a, a]), - 'B': pd.concat([b, b]).astype( - 'category', categories=list('cab')) - }).set_index('B') + expected = DataFrame( + {'A': pd.concat([a, a]), + 'B': pd.concat([b, b]).astype(CategoricalDtype(list('cab'))) + }).set_index('B') tm.assert_frame_equal(result, expected) # wrong catgories @@ -4290,7 +4290,7 @@ def test_merge(self): cright = right.copy() cright['d'] = cright['d'].astype('category') result = pd.merge(left, cright, how='left', left_on='b', right_on='c') - expected['d'] = expected['d'].astype('category', categories=['null']) + expected['d'] = expected['d'].astype(CategoricalDtype(['null'])) tm.assert_frame_equal(result, expected) # cat-object
closes #17636
https://api.github.com/repos/pandas-dev/pandas/pulls/17742
2017-10-02T13:29:47Z
2017-10-03T01:14:19Z
2017-10-03T01:14:19Z
2017-10-03T08:57:42Z
allow neg index on str_get
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index d69a5c22acc03..a5d5dc28e177d 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -668,6 +668,7 @@ Indexing - Bug in ``IntervalIndex`` where performing a scalar lookup fails for included right endpoints of non-overlapping monotonic decreasing indexes (:issue:`16417`, :issue:`17271`) - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` when no valid entry (:issue:`17400`) - Bug in :func:`Series.rename` when called with a `callable`, incorrectly alters the name of the `Series`, rather than the name of the `Index`. (:issue:`17407`) +- Bug in :func:`String.str_get` raises `index out of range` error instead of inserting NaNs when using a negative index. (:issue:`17704`) I/O ^^^ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 021f88d1aec00..abef6f6086dbd 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1260,7 +1260,7 @@ def str_get(arr, i): ------- items : Series/Index of objects """ - f = lambda x: x[i] if len(x) > i else np.nan + f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan return _na_map(f, arr) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index ec2b0b75b9eed..f1b97081b6d93 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2484,6 +2484,19 @@ def test_get(self): expected = Series([u('b'), u('d'), np.nan, u('g')]) tm.assert_series_equal(result, expected) + # bounds testing + values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12']) + + # positive index + result = values.str.split('_').str.get(2) + expected = Series(['3', '8', np.nan]) + tm.assert_series_equal(result, expected) + + # negative index + result = values.str.split('_').str.get(-3) + expected = Series(['3', '8', np.nan]) + tm.assert_series_equal(result, expected) + def test_more_contains(self): # PR #1179 s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
- [x] closes #17704 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17741
2017-10-02T13:28:39Z
2017-10-02T19:21:42Z
2017-10-02T19:21:42Z
2017-10-02T20:00:35Z
DEPR: deprecate .get_value and .set_value for Series, DataFrame, Panel & Sparse
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index d7789bfbfd04c..66b44d4d391e1 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -666,7 +666,8 @@ Deprecations - ``pd.TimeGrouper`` is deprecated in favor of :class:`pandas.Grouper` (:issue:`16747`) - ``cdate_range`` has been deprecated in favor of :func:`bdate_range`, which has gained ``weekmask`` and ``holidays`` parameters for building custom frequency date ranges. See the :ref:`documentation <timeseries.custom-freq-ranges>` for more details (:issue:`17596`) - passing ``categories`` or ``ordered`` kwargs to :func:`Series.astype` is deprecated, in favor of passing a :ref:`CategoricalDtype <whatsnew_0210.enhancements.categorical_dtype>` (:issue:`17636`) -- Passing a non-existant column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) +- Passing a non-existent column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`) +- ``.get_value`` and ``.set_value`` on ``Series``, ``DataFrame``, ``Panel``, ``SparseSeries``, and ``SparseDataFrame`` are deprecated in favor of using ``.iat[]`` or ``.at[]`` accessors (:issue:`15269`) .. _whatsnew_0210.deprecations.select: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 778a3dc9046a3..a77c002b625cb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -297,7 +297,8 @@ def _constructor(self): return DataFrame _constructor_sliced = Series - _deprecations = NDFrame._deprecations | frozenset(['sortlevel']) + _deprecations = NDFrame._deprecations | frozenset( + ['sortlevel', 'get_value', 'set_value']) @property def _constructor_expanddim(self): @@ -1922,6 +1923,10 @@ def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- index : row label @@ -1933,6 +1938,14 @@ def get_value(self, index, col, takeable=False): value : scalar value """ + warnings.warn("get_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._get_value(index, col, takeable=takeable) + + def _get_value(self, index, col, takeable=False): + if takeable: series = self._iget_item_cache(col) return _maybe_box_datetimelike(series._values[index]) @@ -1948,12 +1961,17 @@ def get_value(self, index, col, takeable=False): # use positional col = self.columns.get_loc(col) index = self.index.get_loc(index) - return self.get_value(index, col, takeable=True) + return self._get_value(index, col, takeable=True) + _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- index : row label @@ -1967,10 +1985,17 @@ def set_value(self, index, col, value, takeable=False): If label pair is contained, will be reference to calling DataFrame, otherwise a new object """ + warnings.warn("set_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._set_value(index, col, value, takeable=takeable) + + def _set_value(self, index, col, value, takeable=False): try: if takeable is True: series = self._iget_item_cache(col) - return series.set_value(index, value, takeable=True) + return series._set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine @@ -1983,6 +2008,7 @@ def set_value(self, index, col, value, takeable=False): self._item_cache.pop(col, None) return self + _set_value.__doc__ = set_value.__doc__ def _ixs(self, i, axis=0): """ @@ -2791,7 +2817,7 @@ def lookup(self, row_labels, col_labels): else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): - result[i] = self.get_value(r, c) + result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 199aa9cfca506..f1a3fe81a4540 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -112,7 +112,7 @@ def __getitem__(self, key): key = tuple(com._apply_if_callable(x, self.obj) for x in key) try: - values = self.obj.get_value(*key) + values = self.obj._get_value(*key) if is_scalar(values): return values except Exception: @@ -1542,7 +1542,7 @@ def _is_scalar_access(self, key): def _getitem_scalar(self, key): # a fast-path to scalar access # if not, raise - values = self.obj.get_value(*key) + values = self.obj._get_value(*key) return values def _get_partial_string_timestamp_match_key(self, key, labels): @@ -1701,7 +1701,7 @@ def _is_scalar_access(self, key): def _getitem_scalar(self, key): # a fast-path to scalar access # if not, raise - values = self.obj.get_value(*key, takeable=True) + values = self.obj._get_value(*key, takeable=True) return values def _is_valid_integer(self, key, axis): @@ -1866,7 +1866,7 @@ def __getitem__(self, key): raise ValueError('Invalid call for scalar access (getting)!') key = self._convert_key(key) - return self.obj.get_value(*key, takeable=self._takeable) + return self.obj._get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if isinstance(key, tuple): @@ -1883,7 +1883,7 @@ def __setitem__(self, key, value): '(setting)!') key = list(self._convert_key(key, is_setter=True)) key.append(value) - self.obj.set_value(*key, takeable=self._takeable) + self.obj._set_value(*key, takeable=self._takeable) class _AtIndexer(_ScalarAccessIndexer): diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 68733a3a8b94e..fad6a39223b9e 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -470,6 +470,10 @@ def get_value(self, *args, **kwargs): """ Quickly retrieve single value at (item, major, minor) location + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- item : item label (panel item) @@ -481,6 +485,13 @@ def get_value(self, *args, **kwargs): ------- value : scalar value """ + warnings.warn("get_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._get_value(*args, **kwargs) + + def _get_value(self, *args, **kwargs): nargs = len(args) nreq = self._AXIS_LEN @@ -500,12 +511,17 @@ def get_value(self, *args, **kwargs): else: lower = self._get_item_cache(args[0]) - return lower.get_value(*args[1:], takeable=takeable) + return lower._get_value(*args[1:], takeable=takeable) + _get_value.__doc__ = get_value.__doc__ def set_value(self, *args, **kwargs): """ Quickly set single value at (item, major, minor) location + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- item : item label (panel item) @@ -520,6 +536,13 @@ def set_value(self, *args, **kwargs): If label combo is contained, will be reference to calling Panel, otherwise a new object """ + warnings.warn("set_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._set_value(*args, **kwargs) + + def _set_value(self, *args, **kwargs): # require an arg for each axis and the value nargs = len(args) nreq = self._AXIS_LEN + 1 @@ -540,7 +563,7 @@ def set_value(self, *args, **kwargs): else: lower = self._get_item_cache(args[0]) - lower.set_value(*args[1:], takeable=takeable) + lower._set_value(*args[1:], takeable=takeable) return self except KeyError: axes = self._expand_axes(args) @@ -553,7 +576,8 @@ def set_value(self, *args, **kwargs): if made_bigger: maybe_cast_item(result, args[0], likely_dtype) - return result.set_value(*args) + return result._set_value(*args) + _set_value.__doc__ = set_value.__doc__ def _box_item_values(self, key, values): if self.ndim == values.ndim: diff --git a/pandas/core/series.py b/pandas/core/series.py index 58cac46f63d7e..43b7f1d043e4d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -147,7 +147,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _metadata = ['name'] _accessors = frozenset(['dt', 'cat', 'str']) _deprecations = generic.NDFrame._deprecations | frozenset( - ['sortlevel', 'reshape']) + ['sortlevel', 'reshape', 'get_value', 'set_value']) _allow_index_ops = True def __init__(self, data=None, index=None, dtype=None, name=None, @@ -902,6 +902,10 @@ def get_value(self, label, takeable=False): """ Quickly retrieve single value at passed index label + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- index : label @@ -911,9 +915,17 @@ def get_value(self, label, takeable=False): ------- value : scalar value """ + warnings.warn("get_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._get_value(label, takeable=takeable) + + def _get_value(self, label, takeable=False): if takeable is True: return _maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) + _get_value.__doc__ = get_value.__doc__ def set_value(self, label, value, takeable=False): """ @@ -921,6 +933,10 @@ def set_value(self, label, value, takeable=False): new object is created with the label placed at the end of the result index + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- label : object @@ -935,17 +951,25 @@ def set_value(self, label, value, takeable=False): If label is contained, will be reference to calling Series, otherwise a new object """ + warnings.warn("set_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._set_value(label, value, takeable=takeable) + + def _set_value(self, label, value, takeable=False): try: if takeable: self._values[label] = value else: self.index._engine.set_value(self._values, label, value) - return self except KeyError: # set using a non-recursive method self.loc[label] = value - return self + + return self + _set_value.__doc__ = set_value.__doc__ def reset_index(self, level=None, drop=False, name=None, inplace=False): """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 7aa49efa82f61..a43aad9a0204e 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -5,6 +5,7 @@ from __future__ import division # pylint: disable=E1101,E1103,W0231,E0202 +import warnings from pandas.compat import lmap from pandas import compat import numpy as np @@ -430,19 +431,47 @@ def __getitem__(self, key): else: return self._get_item_cache(key) - @Appender(DataFrame.get_value.__doc__, indents=0) def get_value(self, index, col, takeable=False): + """ + Quickly retrieve single value at passed column and index + + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + + Parameters + ---------- + index : row label + col : column label + takeable : interpret the index/col as indexers, default False + + Returns + ------- + value : scalar value + """ + warnings.warn("get_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._get_value(index, col, takeable=takeable) + + def _get_value(self, index, col, takeable=False): if takeable is True: series = self._iget_item_cache(col) else: series = self._get_item_cache(col) - return series.get_value(index, takeable=takeable) + return series._get_value(index, takeable=takeable) + _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- index : row label @@ -460,9 +489,18 @@ def set_value(self, index, col, value, takeable=False): ------- frame : DataFrame """ - dense = self.to_dense().set_value(index, col, value, takeable=takeable) + warnings.warn("set_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._set_value(index, col, value, takeable=takeable) + + def _set_value(self, index, col, value, takeable=False): + dense = self.to_dense()._set_value( + index, col, value, takeable=takeable) return dense.to_sparse(kind=self._default_kind, fill_value=self._default_fill_value) + _set_value.__doc__ = set_value.__doc__ def _slice(self, slobj, axis=0, kind=None): if axis == 0: diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 5166dc927989e..3255bd6bd17e8 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -426,7 +426,7 @@ def _get_values(self, indexer): return self[indexer] def _set_with_engine(self, key, value): - return self.set_value(key, value) + return self._set_value(key, value) def abs(self): """ @@ -466,6 +466,10 @@ def get_value(self, label, takeable=False): """ Retrieve single value at passed index label + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- index : label @@ -475,8 +479,17 @@ def get_value(self, label, takeable=False): ------- value : scalar value """ + warnings.warn("get_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + + return self._get_value(label, takeable=takeable) + + def _get_value(self, label, takeable=False): loc = label if takeable is True else self.index.get_loc(label) return self._get_val_at(loc) + _get_value.__doc__ = get_value.__doc__ def set_value(self, label, value, takeable=False): """ @@ -484,6 +497,10 @@ def set_value(self, label, value, takeable=False): new object is created with the label placed at the end of the result index + .. deprecated:: 0.21.0 + + Please use .at[] or .iat[] accessors. + Parameters ---------- label : object @@ -501,11 +518,18 @@ def set_value(self, label, value, takeable=False): ------- series : SparseSeries """ + warnings.warn("set_value is deprecated and will be removed " + "in a future release. Please use " + ".at[] or .iat[] accessors instead", FutureWarning, + stacklevel=2) + return self._set_value(label, value, takeable=takeable) + + def _set_value(self, label, value, takeable=False): values = self.to_dense() # if the label doesn't exist, we will create a new object here # and possibily change the index - new_values = values.set_value(label, value, takeable=takeable) + new_values = values._set_value(label, value, takeable=takeable) if new_values is not None: values = new_values new_index = values.index @@ -513,6 +537,7 @@ def set_value(self, label, value, takeable=False): kind=self.kind) self._data = SingleBlockManager(values, new_index) self._index = new_index + _set_value.__doc__ = set_value.__doc__ def _set_values(self, key, value): diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 5ea8230ced41b..be6d81c63ae1e 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -69,7 +69,9 @@ def test_getitem_pop_assign_name(self): def test_get_value(self): for idx in self.frame.index: for col in self.frame.columns: - result = self.frame.get_value(idx, col) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.frame.get_value(idx, col) expected = self.frame[col][idx] tm.assert_almost_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index d0cd1899a0a3c..7f1cc12ec4277 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -258,8 +258,12 @@ def test_constructor_dict(self): # Dict with None value frame_none = DataFrame(dict(a=None), index=[0]) frame_none_list = DataFrame(dict(a=[None]), index=[0]) - assert frame_none.get_value(0, 'a') is None - assert frame_none_list.get_value(0, 'a') is None + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert frame_none.get_value(0, 'a') is None + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert frame_none_list.get_value(0, 'a') is None tm.assert_frame_equal(frame_none, frame_none_list) # GH10856 @@ -509,7 +513,9 @@ def test_nested_dict_frame_constructor(self): data = {} for col in df.columns: for row in df.index: - data.setdefault(col, {})[row] = df.get_value(row, col) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + data.setdefault(col, {})[row] = df.get_value(row, col) result = DataFrame(data, columns=rng) tm.assert_frame_equal(result, df) @@ -517,7 +523,9 @@ def test_nested_dict_frame_constructor(self): data = {} for col in df.columns: for row in df.index: - data.setdefault(row, {})[col] = df.get_value(row, col) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + data.setdefault(row, {})[col] = df.get_value(row, col) result = DataFrame(data, index=rng).T tm.assert_frame_equal(result, df) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index dd2759cd3ef8e..d00f56830a6fa 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -1629,7 +1629,9 @@ def test_getitem_list_duplicates(self): def test_get_value(self): for idx in self.frame.index: for col in self.frame.columns: - result = self.frame.get_value(idx, col) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.frame.get_value(idx, col) expected = self.frame[col][idx] assert result == expected @@ -1637,7 +1639,9 @@ def test_lookup(self): def alt(df, rows, cols, dtype): result = [] for r, c in zip(rows, cols): - result.append(df.get_value(r, c)) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result.append(df.get_value(r, c)) return np.array(result, dtype=dtype) def testit(df): @@ -1671,32 +1675,48 @@ def testit(df): def test_set_value(self): for idx in self.frame.index: for col in self.frame.columns: - self.frame.set_value(idx, col, 1) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + self.frame.set_value(idx, col, 1) assert self.frame[col][idx] == 1 def test_set_value_resize(self): - res = self.frame.set_value('foobar', 'B', 0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res = self.frame.set_value('foobar', 'B', 0) assert res is self.frame assert res.index[-1] == 'foobar' - assert res.get_value('foobar', 'B') == 0 + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert res.get_value('foobar', 'B') == 0 self.frame.loc['foobar', 'qux'] = 0 - assert self.frame.get_value('foobar', 'qux') == 0 + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert self.frame.get_value('foobar', 'qux') == 0 res = self.frame.copy() - res3 = res.set_value('foobar', 'baz', 'sam') + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res3 = res.set_value('foobar', 'baz', 'sam') assert res3['baz'].dtype == np.object_ res = self.frame.copy() - res3 = res.set_value('foobar', 'baz', True) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res3 = res.set_value('foobar', 'baz', True) assert res3['baz'].dtype == np.object_ res = self.frame.copy() - res3 = res.set_value('foobar', 'baz', 5) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res3 = res.set_value('foobar', 'baz', 5) assert is_float_dtype(res3['baz']) assert isna(res3['baz'].drop(['foobar'])).all() - pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam') + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam') def test_set_value_with_index_dtype_change(self): df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC')) @@ -1704,7 +1724,9 @@ def test_set_value_with_index_dtype_change(self): # this is actually ambiguous as the 2 is interpreted as a positional # so column is not created df = df_orig.copy() - df.set_value('C', 2, 1.0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + df.set_value('C', 2, 1.0) assert list(df.index) == list(df_orig.index) + ['C'] # assert list(df.columns) == list(df_orig.columns) + [2] @@ -1715,7 +1737,9 @@ def test_set_value_with_index_dtype_change(self): # create both new df = df_orig.copy() - df.set_value('C', 'D', 1.0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + df.set_value('C', 'D', 1.0) assert list(df.index) == list(df_orig.index) + ['C'] assert list(df.columns) == list(df_orig.columns) + ['D'] @@ -1728,8 +1752,9 @@ def test_get_set_value_no_partial_indexing(self): # partial w/ MultiIndex raise exception index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)]) df = DataFrame(index=index, columns=lrange(4)) - pytest.raises(KeyError, df.get_value, 0, 1) - # pytest.raises(KeyError, df.set_value, 0, 1, 0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + pytest.raises(KeyError, df.get_value, 0, 1) def test_single_element_ix_dont_upcast(self): self.frame['E'] = 1 diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 93e7b81163b54..0ca319565e24b 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -800,13 +800,17 @@ def test_setitem_dtypes(self): def test_set_value(self): idx = self.ts.index[10] - res = self.ts.set_value(idx, 0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res = self.ts.set_value(idx, 0) assert res is self.ts assert self.ts[idx] == 0 # equiv s = self.series.copy() - res = s.set_value('foobar', 0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res = s.set_value('foobar', 0) assert res is s assert res.index[-1] == 'foobar' assert res['foobar'] == 0 @@ -2632,8 +2636,12 @@ def test_series_set_value(self): dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)] index = DatetimeIndex(dates) - s = Series().set_value(dates[0], 1.) - s2 = s.set_value(dates[1], np.nan) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + s = Series().set_value(dates[0], 1.) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + s2 = s.set_value(dates[1], np.nan) exp = Series([1., np.nan], index=index) diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index ed4a3a9e5f75f..ef94e2f78278d 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -430,22 +430,32 @@ def test_set_value(self): # ok, as the index gets converted to object frame = self.frame.copy() - res = frame.set_value('foobar', 'B', 1.5) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res = frame.set_value('foobar', 'B', 1.5) assert res.index.dtype == 'object' res = self.frame res.index = res.index.astype(object) - res = self.frame.set_value('foobar', 'B', 1.5) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res = self.frame.set_value('foobar', 'B', 1.5) assert res is not self.frame assert res.index[-1] == 'foobar' - assert res.get_value('foobar', 'B') == 1.5 + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert res.get_value('foobar', 'B') == 1.5 - res2 = res.set_value('foobar', 'qux', 1.5) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + res2 = res.set_value('foobar', 'qux', 1.5) assert res2 is not res tm.assert_index_equal(res2.columns, pd.Index(list(self.frame.columns) + ['qux'])) - assert res2.get_value('foobar', 'qux') == 1.5 + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + assert res2.get_value('foobar', 'qux') == 1.5 def test_fancy_index_misc(self): # axis = 0 diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 13dab68b2e5b4..7c7399317809f 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -465,15 +465,22 @@ def test_get_get_value(self): expected = self.btseries.to_dense()[dt] tm.assert_almost_equal(result, expected) - tm.assert_almost_equal(self.bseries.get_value(10), self.bseries[10]) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + tm.assert_almost_equal( + self.bseries.get_value(10), self.bseries[10]) def test_set_value(self): idx = self.btseries.index[7] - self.btseries.set_value(idx, 0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + self.btseries.set_value(idx, 0) assert self.btseries[idx] == 0 - self.iseries.set_value('foobar', 0) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + self.iseries.set_value('foobar', 0) assert self.iseries.index[-1] == 'foobar' assert self.iseries['foobar'] == 0 diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index a6113f231f8f2..c8e056f156218 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -405,7 +405,9 @@ def test_get_value(self): for item in self.panel.items: for mjr in self.panel.major_axis[::2]: for mnr in self.panel.minor_axis: - result = self.panel.get_value(item, mjr, mnr) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = self.panel.get_value(item, mjr, mnr) expected = self.panel[item][mnr][mjr] assert_almost_equal(result, expected) @@ -867,16 +869,17 @@ def test_comp(func): test_comp(operator.le) def test_get_value(self): - for item in self.panel.items: - for mjr in self.panel.major_axis[::2]: - for mnr in self.panel.minor_axis: - result = self.panel.get_value(item, mjr, mnr) - expected = self.panel[item][mnr][mjr] - assert_almost_equal(result, expected) - with tm.assert_raises_regex(TypeError, - "There must be an argument " - "for each axis"): - self.panel.get_value('a') + with catch_warnings(record=True): + for item in self.panel.items: + for mjr in self.panel.major_axis[::2]: + for mnr in self.panel.minor_axis: + result = self.panel.get_value(item, mjr, mnr) + expected = self.panel[item][mnr][mjr] + assert_almost_equal(result, expected) + with tm.assert_raises_regex(TypeError, + "There must be an argument " + "for each axis"): + self.panel.get_value('a') def test_set_value(self): with catch_warnings(record=True):
closes #15269
https://api.github.com/repos/pandas-dev/pandas/pulls/17739
2017-10-02T12:30:58Z
2017-10-05T12:18:20Z
2017-10-05T12:18:20Z
2017-10-05T12:20:01Z
BUG: Regression in .loc accepting a boolean Index as an indexer
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index c8a0a6bff5cc7..23ff7cb9f34d4 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -667,6 +667,7 @@ Indexing - Bug in ``IntervalIndex`` where performing a scalar lookup fails for included right endpoints of non-overlapping monotonic decreasing indexes (:issue:`16417`, :issue:`17271`) - Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` when no valid entry (:issue:`17400`) - Bug in :func:`Series.rename` when called with a `callable`, incorrectly alters the name of the `Series`, rather than the name of the `Index`. (:issue:`17407`) +- Regression in ``.loc`` accepting a boolean ``Index`` as an indexer (:issue:`17131`) I/O ^^^ diff --git a/pandas/core/common.py b/pandas/core/common.py index 0f7b86f5e74a0..2686ad370e1ed 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -15,7 +15,7 @@ from pandas import compat from pandas.compat import long, zip, iteritems from pandas.core.config import get_option -from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.generic import ABCSeries, ABCIndex from pandas.core.dtypes.common import _NS_DTYPE from pandas.core.dtypes.inference import _iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa @@ -182,7 +182,7 @@ def _maybe_box_datetimelike(value): def is_bool_indexer(key): - if isinstance(key, (ABCSeries, np.ndarray)): + if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)): if key.dtype == np.object_: key = np.asarray(_values_from_object(key)) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 17316a714e260..95d6a24e68425 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -317,6 +317,23 @@ def test_loc_getitem_label_slice(self): self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice( 2, 4, 2), typs=['mixed'], axes=0, fails=TypeError) + def test_loc_index(self): + # gh-17131 + # a boolean index should index like a boolean numpy array + + df = DataFrame( + np.random.random(size=(5, 10)), + index=["alpha_0", "alpha_1", "alpha_2", "beta_0", "beta_1"]) + + mask = df.index.map(lambda x: "alpha" in x) + expected = df.loc[np.array(mask)] + + result = df.loc[mask] + tm.assert_frame_equal(result, expected) + + result = df.loc[mask.values] + tm.assert_frame_equal(result, expected) + def test_loc_general(self): df = DataFrame(
closes #17131
https://api.github.com/repos/pandas-dev/pandas/pulls/17738
2017-10-02T10:32:10Z
2017-10-02T11:59:16Z
2017-10-02T11:59:16Z
2017-10-02T12:50:33Z
Fixed the memory usage explanation of categorical in gotchas from O(n…
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index cadbc895354b7..c5bbc3c004675 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -979,7 +979,7 @@ Memory Usage .. _categorical.memory: -The memory usage of a ``Categorical`` is proportional to the number of categories times the length of the data. In contrast, +The memory usage of a ``Categorical`` is proportional to the number of categories plus the length of the data. In contrast, an ``object`` dtype is a constant times the length of the data. .. ipython:: python
…m) to O(n+m) - [ ] closes #17705
https://api.github.com/repos/pandas-dev/pandas/pulls/17736
2017-10-02T06:37:32Z
2017-10-02T10:18:22Z
2017-10-02T10:18:22Z
2017-10-02T10:18:24Z
CLN: replace %s syntax with .format in pandas.core: categorical, common, config, config_init
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 61e28dde2e34c..5619f15ac85d9 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -263,7 +263,8 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, if dtype == 'category': dtype = CategoricalDtype(categories, ordered) else: - raise ValueError("Unknown `dtype` {}".format(dtype)) + msg = "Unknown `dtype` {dtype}" + raise ValueError(msg.format(dtype=dtype)) elif categories is not None or ordered is not None: raise ValueError("Cannot specify both `dtype` and `categories`" " or `ordered`.") @@ -931,9 +932,9 @@ def add_categories(self, new_categories, inplace=False): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: - msg = ("new categories must not include old categories: %s" % - str(already_included)) - raise ValueError(msg) + msg = ("new categories must not include old categories: " + "{already_included!s}") + raise ValueError(msg.format(already_included=already_included)) new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) @@ -989,8 +990,8 @@ def remove_categories(self, removals, inplace=False): new_categories = [x for x in new_categories if notna(x)] if len(not_included) != 0: - raise ValueError("removals must all be in old categories: %s" % - str(not_included)) + msg = "removals must all be in old categories: {not_included!s}" + raise ValueError(msg.format(not_included=not_included)) return self.set_categories(new_categories, ordered=self.ordered, rename=False, inplace=inplace) @@ -1443,7 +1444,8 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'): """ inplace = validate_bool_kwarg(inplace, 'inplace') if na_position not in ['last', 'first']: - raise ValueError('invalid na_position: {!r}'.format(na_position)) + msg = 'invalid na_position: {na_position!r}' + raise ValueError(msg.format(na_position=na_position)) codes = np.sort(self._codes) if not ascending: @@ -1653,9 +1655,10 @@ def _tidy_repr(self, max_vals=10, footer=True): head = self[:num]._get_repr(length=False, footer=False) tail = self[-(max_vals - num):]._get_repr(length=False, footer=False) - result = '%s, ..., %s' % (head[:-1], tail[1:]) + result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:]) if footer: - result = '%s\n%s' % (result, self._repr_footer()) + result = u('{result}\n{footer}').format(result=result, + footer=self._repr_footer()) return compat.text_type(result) @@ -1683,7 +1686,8 @@ def _repr_categories_info(self): dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype)) - levheader = "Categories (%d, %s): " % (len(self.categories), dtype) + levheader = "Categories ({length}, {dtype}): ".format( + length=len(self.categories), dtype=dtype) width, height = get_terminal_size() max_width = get_option("display.width") or width if com.in_ipython_frontend(): @@ -1708,7 +1712,8 @@ def _repr_categories_info(self): def _repr_footer(self): - return u('Length: %d\n%s') % (len(self), self._repr_categories_info()) + return u('Length: {length}\n{info}').format( + length=len(self), info=self._repr_categories_info()) def _get_repr(self, length=True, na_rep='NaN', footer=True): from pandas.io.formats import format as fmt @@ -1725,9 +1730,8 @@ def __unicode__(self): elif len(self._codes) > 0: result = self._get_repr(length=len(self) > _maxlen) else: - result = ('[], %s' % - self._get_repr(length=False, - footer=True, ).replace("\n", ", ")) + msg = self._get_repr(length=False, footer=True).replace("\n", ", ") + result = ('[], {repr_msg}'.format(repr_msg=msg)) return result @@ -1869,8 +1873,8 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, """ perform the reduction type operation """ func = getattr(self, name, None) if func is None: - raise TypeError("Categorical cannot perform the operation " - "{op}".format(op=name)) + msg = 'Categorical cannot perform the operation {op}' + raise TypeError(msg.format(op=name)) return func(numeric_only=numeric_only, **kwds) def min(self, numeric_only=None, **kwargs): diff --git a/pandas/core/common.py b/pandas/core/common.py index 515a401096120..0f7b86f5e74a0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -96,8 +96,8 @@ def __init__(self, class_instance): self.class_instance = class_instance def __str__(self): - return ("This method must be defined in the concrete class of %s" % - self.class_instance.__class__.__name__) + msg = "This method must be defined in the concrete class of {name}" + return (msg.format(name=self.class_instance.__class__.__name__)) def flatten(l): @@ -150,8 +150,8 @@ def _maybe_match_name(a, b): def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): - raise TypeError('object of type %r has no info axis' % - type(obj).__name__) + msg = 'object of type {typ!r} has no info axis' + raise TypeError(msg.format(typ=type(obj).__name__)) slices = [slice(None)] * obj.ndim slices[obj._info_axis_number] = indexer return tuple(slices) @@ -214,8 +214,8 @@ def _mut_exclusive(**kwargs): label1, val1 = item1 label2, val2 = item2 if val1 is not None and val2 is not None: - raise TypeError('mutually exclusive arguments: %r and %r' % - (label1, label2)) + msg = 'mutually exclusive arguments: {label1!r} and {label2!r}' + raise TypeError(msg.format(label1=label1, label2=label2)) elif val1 is not None: return val1 else: @@ -517,7 +517,7 @@ def standardize_mapping(into): collections.defaultdict, into.default_factory) into = type(into) if not issubclass(into, collections.Mapping): - raise TypeError('unsupported type: {}'.format(into)) + raise TypeError('unsupported type: {into}'.format(into=into)) elif into == collections.defaultdict: raise TypeError( 'to_dict() only accepts initialized defaultdicts') diff --git a/pandas/core/config.py b/pandas/core/config.py index b406f6724aa6d..2354b7ca04e7f 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -80,7 +80,7 @@ def _get_single_key(pat, silent): if len(keys) == 0: if not silent: _warn_if_deprecated(pat) - raise OptionError('No such keys(s): %r' % pat) + raise OptionError('No such keys(s): {pat!r}'.format(pat=pat)) if len(keys) > 1: raise OptionError('Pattern matched multiple keys') key = keys[0] @@ -112,8 +112,8 @@ def _set_option(*args, **kwargs): silent = kwargs.pop('silent', False) if kwargs: - raise TypeError('_set_option() got an unexpected keyword ' - 'argument "{0}"'.format(list(kwargs.keys())[0])) + msg = '_set_option() got an unexpected keyword argument "{kwarg}"' + raise TypeError(msg.format(list(kwargs.keys())[0])) for k, v in zip(args[::2], args[1::2]): key = _get_single_key(k, silent) @@ -436,9 +436,11 @@ def register_option(key, defval, doc='', validator=None, cb=None): key = key.lower() if key in _registered_options: - raise OptionError("Option '%s' has already been registered" % key) + msg = "Option '{key}' has already been registered" + raise OptionError(msg.format(key=key)) if key in _reserved_keys: - raise OptionError("Option '%s' is a reserved key" % key) + msg = "Option '{key}' is a reserved key" + raise OptionError(msg.format(key=key)) # the default value should be legal if validator: @@ -449,22 +451,21 @@ def register_option(key, defval, doc='', validator=None, cb=None): for k in path: if not bool(re.match('^' + tokenize.Name + '$', k)): - raise ValueError("%s is not a valid identifier" % k) + raise ValueError("{k} is not a valid identifier".format(k=k)) if keyword.iskeyword(k): - raise ValueError("%s is a python keyword" % k) + raise ValueError("{k} is a python keyword".format(k=k)) cursor = _global_config + msg = "Path prefix to option '{option}' is already an option" for i, p in enumerate(path[:-1]): if not isinstance(cursor, dict): - raise OptionError("Path prefix to option '%s' is already an option" - % '.'.join(path[:i])) + raise OptionError(msg.format(option='.'.join(path[:i]))) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): - raise OptionError("Path prefix to option '%s' is already an option" % - '.'.join(path[:-1])) + raise OptionError(msg.format(option='.'.join(path[:-1]))) cursor[path[-1]] = defval # initialize @@ -516,8 +517,8 @@ def deprecate_option(key, msg=None, rkey=None, removal_ver=None): key = key.lower() if key in _deprecated_options: - raise OptionError("Option '%s' has already been defined as deprecated." - % key) + msg = "Option '{key}' has already been defined as deprecated." + raise OptionError(msg.format(key=key)) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver) @@ -614,11 +615,12 @@ def _warn_if_deprecated(key): print(d.msg) warnings.warn(d.msg, DeprecationWarning) else: - msg = "'%s' is deprecated" % key + msg = "'{key}' is deprecated".format(key=key) if d.removal_ver: - msg += ' and will be removed in %s' % d.removal_ver + msg += (' and will be removed in {version}' + .format(version=d.removal_ver)) if d.rkey: - msg += ", please use '%s' instead." % d.rkey + msg += ", please use '{rkey}' instead.".format(rkey=d.rkey) else: msg += ', please refrain from using it.' @@ -633,7 +635,7 @@ def _build_option_description(k): o = _get_registered_option(k) d = _get_deprecated_option(k) - s = u('%s ') % k + s = u('{k} ').format(k=k) if o.doc: s += '\n'.join(o.doc.strip().split('\n')) @@ -641,12 +643,13 @@ def _build_option_description(k): s += 'No description available.' if o: - s += u('\n [default: %s] [currently: %s]') % (o.defval, - _get_option(k, True)) + s += (u('\n [default: {default}] [currently: {current}]') + .format(default=o.defval, current=_get_option(k, True))) if d: s += u('\n (Deprecated') - s += (u(', use `%s` instead.') % d.rkey if d.rkey else '') + s += (u(', use `{rkey}` instead.') + .format(rkey=d.rkey if d.rkey else '')) s += u(')') s += '\n\n' @@ -718,7 +721,7 @@ def config_prefix(prefix): def wrap(func): def inner(key, *args, **kwds): - pkey = '%s.%s' % (prefix, key) + pkey = '{prefix}.{key}'.format(prefix=prefix, key=key) return func(pkey, *args, **kwds) return inner @@ -754,7 +757,8 @@ def is_type_factory(_type): def inner(x): if type(x) != _type: - raise ValueError("Value must have type '%s'" % str(_type)) + msg = "Value must have type '{typ!s}'" + raise ValueError(msg.format(typ=_type)) return inner @@ -777,11 +781,12 @@ def is_instance_factory(_type): from pandas.io.formats.printing import pprint_thing type_repr = "|".join(map(pprint_thing, _type)) else: - type_repr = "'%s'" % _type + type_repr = "'{typ}'".format(typ=_type) def inner(x): if not isinstance(x, _type): - raise ValueError("Value must be an instance of %s" % type_repr) + msg = "Value must be an instance of {type_repr}" + raise ValueError(msg.format(type_repr=type_repr)) return inner @@ -797,10 +802,10 @@ def inner(x): if not any([c(x) for c in callables]): pp_values = pp("|".join(lmap(pp, legal_values))) - msg = "Value must be one of {0}".format(pp_values) + msg = "Value must be one of {pp_values}" if len(callables): msg += " or a callable" - raise ValueError(msg) + raise ValueError(msg.format(pp_values=pp_values)) return inner diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index ea5c213dbe057..5652424a8f75b 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -453,10 +453,10 @@ def use_inf_as_na_cb(key): cf.register_option(ext + '.writer', default, doc, validator=str) def _register_xlsx(engine, other): - cf.register_option('xlsx.writer', engine, - writer_engine_doc.format(ext='xlsx', default=engine, - others=", '%s'" % other), - validator=str) + others = ", '{other}'".format(other=other) + doc = writer_engine_doc.format(ext='xlsx', default=engine, + others=others) + cf.register_option('xlsx.writer', engine, doc, validator=str) try: # better memory footprint
Progress towards #16130 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Replaced `%s` syntax with `.format` in pandas.core: `categorical.py`, `common.py`, `config.py`, `config_init.py`. Additionally, made some of the existing positional `.format` code more explicit.
https://api.github.com/repos/pandas-dev/pandas/pulls/17735
2017-10-02T06:05:07Z
2017-10-02T10:19:54Z
2017-10-02T10:19:54Z
2017-10-02T13:39:29Z
TST: add backward compat for offset testing for pickles
diff --git a/pandas/tests/io/data/legacy_pickle/0.17.0/0.17.0_x86_64_darwin_3.5.3.pickle b/pandas/tests/io/data/legacy_pickle/0.17.0/0.17.0_x86_64_darwin_3.5.3.pickle new file mode 100644 index 0000000000000..537864af7028b Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/0.17.0/0.17.0_x86_64_darwin_3.5.3.pickle differ diff --git a/pandas/tests/io/data/legacy_pickle/0.18.1/0.18.1_x86_64_darwin_3.5.2.pickle b/pandas/tests/io/data/legacy_pickle/0.18.1/0.18.1_x86_64_darwin_3.5.2.pickle index db1d17a8b67c3..20af597c57a1b 100644 Binary files a/pandas/tests/io/data/legacy_pickle/0.18.1/0.18.1_x86_64_darwin_3.5.2.pickle and b/pandas/tests/io/data/legacy_pickle/0.18.1/0.18.1_x86_64_darwin_3.5.2.pickle differ diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index 996965999724e..1cb2081409312 100755 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -1,6 +1,39 @@ #!/usr/env/bin python -""" self-contained to write legacy storage (pickle/msgpack) files """ +""" +self-contained to write legacy storage (pickle/msgpack) files + +To use this script. Create an environment where you want +generate pickles, say its for 0.18.1, with your pandas clone +in ~/pandas + +. activate pandas_0.18.1 +cd ~/ + +$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \ + pandas/pandas/tests/io/data/legacy_pickle/0.18.1/ pickle + +This script generates a storage file for the current arch, system, +and python version + pandas version: 0.18.1 + output dir : pandas/pandas/tests/io/data/legacy_pickle/0.18.1/ + storage format: pickle +created pickle file: 0.18.1_x86_64_darwin_3.5.2.pickle + +The idea here is you are using the *current* version of the +generate_legacy_storage_files with an *older* version of pandas to +generate a pickle file. We will then check this file into a current +branch, and test using test_pickle.py. This will load the *older* +pickles and test versus the current data that is generated +(with master). These are then compared. + +If we have cases where we changed the signature (e.g. we renamed +offset -> freq in Timestamp). Then we have to conditionally execute +in the generate_legacy_storage_files.py to make it +run under the older AND the newer version. + +""" + from __future__ import print_function from warnings import catch_warnings from distutils.version import LooseVersion @@ -9,6 +42,11 @@ Index, MultiIndex, bdate_range, to_msgpack, date_range, period_range, Timestamp, NaT, Categorical, Period) +from pandas.tseries.offsets import ( + DateOffset, Hour, Minute, Day, + MonthBegin, MonthEnd, YearBegin, + YearEnd, Week, + QuarterBegin, QuarterEnd) from pandas.compat import u import os import sys @@ -151,10 +189,28 @@ def create_data(): timestamp = dict(normal=Timestamp('2011-01-01'), nat=NaT, - tz=Timestamp('2011-01-01', tz='US/Eastern'), - freq=Timestamp('2011-01-01', freq='D'), - both=Timestamp('2011-01-01', tz='Asia/Tokyo', - freq='M')) + tz=Timestamp('2011-01-01', tz='US/Eastern')) + + if _loose_version < '0.19.2': + timestamp['freq'] = Timestamp('2011-01-01', offset='D') + timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo', + offset='M') + else: + timestamp['freq'] = Timestamp('2011-01-01', freq='D') + timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo', + freq='M') + + off = {'DateOffset': DateOffset(years=1), + 'MonthBegin': MonthBegin(1), + 'MonthEnd': MonthEnd(1), + 'QuarterBegin': QuarterBegin(1), + 'QuarterEnd': QuarterEnd(1), + 'Day': Day(1), + 'YearBegin': YearBegin(1), + 'YearEnd': YearEnd(1), + 'Week': Week(1), + 'Hour': Hour(1), + 'Minute': Minute(1)} return dict(series=series, frame=frame, @@ -166,7 +222,8 @@ def create_data(): ts=_create_sp_tsseries()), sp_frame=dict(float=_create_sp_frame()), cat=cat, - timestamp=timestamp) + timestamp=timestamp, + offsets=off) def create_pickle_data(): diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index d56b36779efe7..91c1f19f5caab 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -193,26 +193,18 @@ def legacy_pickle_versions(): for v in os.listdir(path): p = os.path.join(path, v) if os.path.isdir(p): - yield v + for f in os.listdir(p): + yield (v, f) -@pytest.mark.parametrize('version', legacy_pickle_versions()) -def test_pickles(current_pickle_data, version): +@pytest.mark.parametrize('version, f', legacy_pickle_versions()) +def test_pickles(current_pickle_data, version, f): if not is_platform_little_endian(): pytest.skip("known failure on non-little endian") - pth = tm.get_data_path('legacy_pickle/{0}'.format(version)) - n = 0 - for f in os.listdir(pth): - vf = os.path.join(pth, f) - with catch_warnings(record=True): - data = compare(current_pickle_data, vf, version) - - if data is None: - continue - n += 1 - assert n > 0, ('Pickle files are not ' - 'tested: {version}'.format(version=version)) + vf = tm.get_data_path('legacy_pickle/{}/{}'.format(version, f)) + with catch_warnings(record=True): + compare(current_pickle_data, vf, version) def test_round_trip_current(current_pickle_data):
closes #17721
https://api.github.com/repos/pandas-dev/pandas/pulls/17733
2017-10-01T20:43:33Z
2017-10-01T22:58:02Z
2017-10-01T22:58:01Z
2017-10-01T22:58:57Z
DOC: Fixed typo in documentation for 'pandas.DataFrame.replace'
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2fb0e348c01c0..6fd4f3eeb6b90 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4260,7 +4260,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, dicts of such objects are also allowed. inplace : boolean, default False If True, in place. Note: this will modify any - other views on this object (e.g. a column form a DataFrame). + other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. limit : int, default None Maximum size gap to forward or backward fill
* Replaced 'form' to 'from' for the 'inplace' parameter. - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17731
2017-10-01T11:33:48Z
2017-10-01T12:25:40Z
2017-10-01T12:25:40Z
2017-10-01T12:25:46Z
BUG: GH17525 Function _get_standard_colors resets global random seed
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index e0e0c18052550..da3cf965cb81a 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -614,6 +614,7 @@ Plotting - Line plots no longer assume monotonic x data when calculating xlims, they show the entire lines now even for unsorted x data. (:issue:`11310`, :issue:`11471`) - With matplotlib 2.0.0 and above, calculation of x limits for line plots is left to matplotlib, so that its new default settings are applied. (:issue:`15495`) - Bug in ``Series.plot.bar`` or ``DataFramee.plot.bar`` with ``y`` not respecting user-passed ``color`` (:issue:`16822`) +- Bug causing ``plotting.parallel_coordinates`` to reset the random seed when using random colors (:issue:`17525`) Groupby/Resample/Rolling diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 8cb4e30e0d91c..bd581c93f1011 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -9,7 +9,7 @@ import numpy as np from pandas.core.dtypes.common import is_list_like -from pandas.compat import range, lrange, lmap +from pandas.compat import lrange, lmap import pandas.compat as compat from pandas.plotting._compat import _mpl_ge_2_0_0 @@ -111,11 +111,13 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', if isinstance(colors, compat.string_types): colors = list(colors) elif color_type == 'random': - import random + from pandas.core.common import _random_state def random_color(column): - random.seed(column) - return [random.random() for _ in range(3)] + """ Returns a random color represented as a list of length 3""" + # GH17525 use common._random_state to avoid resetting the seed + rs = _random_state(column) + return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) else: diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index c4795ea1e1eca..957369a20f16e 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -284,3 +284,20 @@ def test_subplot_titles(self): title=title[:-1]) title_list = [ax.get_title() for sublist in plot for ax in sublist] assert title_list == title[:3] + [''] + + def test_get_standard_colors_random_seed(self): + # GH17525 + df = DataFrame(np.zeros((10, 10))) + + # Make sure that the random seed isn't reset by _get_standard_colors + plotting.parallel_coordinates(df, 0) + rand1 = random.random() + plotting.parallel_coordinates(df, 0) + rand2 = random.random() + assert rand1 != rand2 + + # Make sure it produces the same colors every time it's called + from pandas.plotting._style import _get_standard_colors + color1 = _get_standard_colors(1, color_type='random') + color2 = _get_standard_colors(1, color_type='random') + assert color1 == color2
- [X] closes #17525 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17730
2017-09-30T21:33:48Z
2017-10-03T08:07:09Z
2017-10-03T08:07:09Z
2017-10-03T11:19:55Z
BUG: GH17525 Function _get_standard_colors resets random seed
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index e0e0c18052550..d5f8c3027d5af 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -614,6 +614,7 @@ Plotting - Line plots no longer assume monotonic x data when calculating xlims, they show the entire lines now even for unsorted x data. (:issue:`11310`, :issue:`11471`) - With matplotlib 2.0.0 and above, calculation of x limits for line plots is left to matplotlib, so that its new default settings are applied. (:issue:`15495`) - Bug in ``Series.plot.bar`` or ``DataFramee.plot.bar`` with ``y`` not respecting user-passed ``color`` (:issue:`16822`) +- Bug in ``plotting._style._get_standard_colors`` resetting the random seed when generating random colors (:issue:`17525`) Groupby/Resample/Rolling diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 8cb4e30e0d91c..c8472a6fdb364 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -114,7 +114,6 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', import random def random_color(column): - random.seed(column) return [random.random() for _ in range(3)] colors = lmap(random_color, lrange(num_colors))
- [X] closes #17525 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17729
2017-09-30T20:58:50Z
2017-09-30T21:22:52Z
null
2023-05-11T01:16:30Z
REF/INT: concat blocks of same type with preserving block type
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index f6f956832eebe..93993fd0a0cab 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -63,11 +63,12 @@ def get_dtype_kinds(l): return typs -def _get_series_result_type(result): +def _get_series_result_type(result, objs=None): """ return appropriate class of Series concat input is either dict or array-like """ + # concat Series with axis 1 if isinstance(result, dict): # concat Series with axis 1 if all(is_sparse(c) for c in compat.itervalues(result)): @@ -77,13 +78,12 @@ def _get_series_result_type(result): from pandas.core.frame import DataFrame return DataFrame - elif is_sparse(result): - # concat Series with axis 1 + # otherwise it is a SingleBlockManager (axis = 0) + if result._block.is_sparse: from pandas.core.sparse.api import SparseSeries return SparseSeries else: - from pandas.core.series import Series - return Series + return objs[0]._constructor def _get_frame_result_type(result, objs): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 689f5521e1ccb..f4f231be570c2 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -102,6 +102,7 @@ class Block(PandasObject): _validate_ndim = True _ftype = 'dense' _holder = None + _concatenator = staticmethod(np.concatenate) def __init__(self, values, placement, ndim=None, fastpath=False): if ndim is None: @@ -314,6 +315,15 @@ def ftype(self): def merge(self, other): return _merge_blocks([self, other]) + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + return self.make_block_same_class( + values, placement=placement or slice(0, len(values), 1)) + def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): """ @@ -2309,6 +2319,7 @@ class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): _verify_integrity = True _can_hold_na = True _holder = Categorical + _concatenator = staticmethod(_concat._concat_categorical) def __init__(self, values, placement, fastpath=False, **kwargs): @@ -2432,6 +2443,17 @@ def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): # we are expected to return a 2-d ndarray return values.reshape(1, len(values)) + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be object dtype + return make_block( + values, placement=placement or slice(0, len(values), 1), + ndim=self.ndim) + class DatetimeBlock(DatetimeLikeBlockMixin, Block): __slots__ = () @@ -2571,6 +2593,7 @@ class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ __slots__ = () _holder = DatetimeIndex + _concatenator = staticmethod(_concat._concat_datetime) is_datetimetz = True def __init__(self, values, placement, ndim=2, **kwargs): @@ -2711,6 +2734,16 @@ def shift(self, periods, axis=0, mgr=None): return [self.make_block_same_class(new_values, placement=self.mgr_locs)] + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be non-tz dtype + return make_block( + values, placement=placement or slice(0, len(values), 1)) + class SparseBlock(NonConsolidatableMixIn, Block): """ implement as a list of sparse arrays of the same dtype """ @@ -2721,6 +2754,7 @@ class SparseBlock(NonConsolidatableMixIn, Block): _can_hold_na = True _ftype = 'sparse' _holder = SparseArray + _concatenator = staticmethod(_concat._concat_sparse) @property def shape(self): @@ -4517,6 +4551,45 @@ def fast_xs(self, loc): """ return self._block.values[loc] + def concat(self, to_concat, new_axis): + """ + Concatenate a list of SingleBlockManagers into a single + SingleBlockManager. + + Used for pd.concat of Series objects with axis=0. + + Parameters + ---------- + to_concat : list of SingleBlockManagers + new_axis : Index of the result + + Returns + ------- + SingleBlockManager + + """ + non_empties = [x for x in to_concat if len(x) > 0] + + # check if all series are of the same block type: + if len(non_empties) > 0: + blocks = [obj.blocks[0] for obj in non_empties] + + if all([type(b) is type(blocks[0]) for b in blocks[1:]]): # noqa + new_block = blocks[0].concat_same_type(blocks) + else: + values = [x.values for x in blocks] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + else: + values = [x._block.values for x in to_concat] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + + mgr = SingleBlockManager(new_block, new_axis) + return mgr + def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ @@ -5105,13 +5178,42 @@ def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): [get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers], concat_axis) - blocks = [make_block( - concatenate_join_units(join_units, concat_axis, copy=copy), - placement=placement) for placement, join_units in concat_plan] + blocks = [] + + for placement, join_units in concat_plan: + + if is_uniform_join_units(join_units): + b = join_units[0].block.concat_same_type( + [ju.block for ju in join_units], placement=placement) + else: + b = make_block( + concatenate_join_units(join_units, concat_axis, copy=copy), + placement=placement) + blocks.append(b) return BlockManager(blocks, axes) +def is_uniform_join_units(join_units): + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + concatenate_join_units (which uses `_concat._concat_compat`). + + """ + return ( + # all blocks need to have the same type + all([type(ju.block) is type(join_units[0].block) for ju in join_units]) and # noqa + # no blocks that would get missing values (can lead to type upcasts) + all([not ju.is_na for ju in join_units]) and + # no blocks with indexers (as then the dimensions do not fit) + all([not ju.indexers for ju in join_units]) and + # disregard Panels + all([ju.block.ndim <= 2 for ju in join_units]) and + # only use this path when there is something to concatenate + len(join_units) > 1) + + def get_empty_dtype_and_na(join_units): """ Return dtype and N/A values to use when concatenating specified units. diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 4040c65136617..c54763f8ebde1 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -362,20 +362,12 @@ def get_result(self): # stack blocks if self.axis == 0: - # concat Series with length to keep dtype as much - non_empties = [x for x in self.objs if len(x) > 0] - if len(non_empties) > 0: - values = [x._values for x in non_empties] - else: - values = [x._values for x in self.objs] - new_data = _concat._concat_compat(values) - name = com._consensus_name_attr(self.objs) - cons = _concat._get_series_result_type(new_data) - return (cons(new_data, index=self.new_axes[0], - name=name, dtype=new_data.dtype) - .__finalize__(self, method='concat')) + mgr = self.objs[0]._data.concat([x._data for x in self.objs], + self.new_axes) + cons = _concat._get_series_result_type(mgr, self.objs) + return cons(mgr, name=name).__finalize__(self, method='concat') # combine as columns in a frame else: diff --git a/pandas/tests/internals/test_external_block.py b/pandas/tests/internals/test_external_block.py index cccde76c3e1d9..d98b293ed8daa 100644 --- a/pandas/tests/internals/test_external_block.py +++ b/pandas/tests/internals/test_external_block.py @@ -4,14 +4,26 @@ import numpy as np import pandas as pd -from pandas.core.internals import Block, BlockManager, SingleBlockManager +from pandas.core.internals import ( + Block, BlockManager, SingleBlockManager, NonConsolidatableMixIn) -class CustomBlock(Block): +class CustomBlock(NonConsolidatableMixIn, Block): + + _holder = np.ndarray def formatting_values(self): return np.array(["Val: {}".format(i) for i in self.values]) + def concat_same_type(self, to_concat, placement=None): + """ + Always concatenate disregarding self.ndim as the values are + always 1D in this custom Block + """ + values = np.concatenate([blk.values for blk in to_concat]) + return self.make_block_same_class( + values, placement=placement or slice(0, len(values), 1)) + def test_custom_repr(): values = np.arange(3, dtype='int64') @@ -23,7 +35,30 @@ def test_custom_repr(): assert repr(s) == '0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64' # dataframe - block = CustomBlock(values.reshape(1, -1), placement=slice(0, 1)) + block = CustomBlock(values, placement=slice(0, 1)) blk_mgr = BlockManager([block], [['col'], range(3)]) df = pd.DataFrame(blk_mgr) assert repr(df) == ' col\n0 Val: 0\n1 Val: 1\n2 Val: 2' + + +def test_concat_series(): + # GH17728 + values = np.arange(3, dtype='int64') + block = CustomBlock(values, placement=slice(0, 3)) + s = pd.Series(block, pd.RangeIndex(3), fastpath=True) + + res = pd.concat([s, s]) + assert isinstance(res._data.blocks[0], CustomBlock) + + +def test_concat_dataframe(): + # GH17728 + df = pd.DataFrame({'a': [1, 2, 3]}) + blocks = df._data.blocks + values = np.arange(3, dtype='int64') + custom_block = CustomBlock(values, placement=slice(1, 2)) + blocks = blocks + (custom_block, ) + block_manager = BlockManager(blocks, [pd.Index(['a', 'b']), df.index]) + df = pd.DataFrame(block_manager) + res = pd.concat([df, df]) + assert isinstance(res._data.blocks[1], CustomBlock)
Related to https://github.com/pandas-dev/pandas/issues/17283 Goal is to get `pd.concat([list of series]` working with Series with external block type. Currently the values are always converted to arrays, concatenated, and converted back to block with block type inference. This is a proof-of-concept to check whether something like this would be approved. Tests should still break because I didn't specialize the `concat_same_type` for categorical data.
https://api.github.com/repos/pandas-dev/pandas/pulls/17728
2017-09-30T15:26:19Z
2017-10-12T21:02:14Z
2017-10-12T21:02:14Z
2018-01-11T17:29:50Z
repr string for pd.Grouper
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 967685c4e11bf..f9ac0e67fcf05 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -9,7 +9,7 @@ from pandas.compat import ( zip, range, lzip, - callable, map + callable, map, signature ) from pandas import compat @@ -234,6 +234,8 @@ class Grouper(object): >>> df.groupby(Grouper(level='date', freq='60s', axis=1)) """ + _attributes = ['key', 'level', 'freq', 'axis', 'sort'] + def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: from pandas.core.resample import TimeGrouper @@ -333,6 +335,17 @@ def _set_grouper(self, obj, sort=False): def groups(self): return self.grouper.groups + def __repr__(self): + grouper_defaults = compat.signature(self.__init__).defaults + sd = self.__dict__ + attrs = collections.OrderedDict() + for k, v in zip(self._attributes, grouper_defaults): + if k in sd and sd[k] != v: + attrs[k] = sd[k] + attrs = ", ".join("{}={!r}".format(k, v) for k, v in attrs.items()) + cls_name = self.__class__.__name__ + return "{}({})".format(cls_name, attrs) + class GroupByPlot(PandasObject): """ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5a571f9077999..56402ea979255 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1,4 +1,5 @@ from datetime import timedelta +import collections import numpy as np import warnings import copy @@ -1026,13 +1027,18 @@ class TimeGrouper(Grouper): directly from the associated object """ + _attributes = ['key', 'level', 'freq', 'axis', 'sort', 'closed', 'label', + 'how', 'nperiods', 'fill_method', 'limit', + 'loffset', 'kind', 'convention', 'base'] + _end_types = {'M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'} + def __init__(self, freq='Min', closed=None, label=None, how='mean', nperiods=None, axis=0, fill_method=None, limit=None, loffset=None, kind=None, convention=None, base=0, **kwargs): freq = to_offset(freq) - end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) + end_types = self._end_types rule = freq.rule_code if (rule in end_types or ('-' in rule and rule[:rule.find('-')] in end_types)): @@ -1047,6 +1053,7 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', label = 'left' self.closed = closed + self.freq = freq self.label = label self.nperiods = nperiods self.kind = kind diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index ba1a2ad1f42e2..ff012c75e1531 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -3177,6 +3177,14 @@ def setup_method(self, method): self.ts = Series(np.random.randn(1000), index=date_range('1/1/2000', periods=1000)) + def test_timegrouper_repr(self): + # Added in GH17727 + result = repr(TimeGrouper(key='key', freq='50Min', label='right')) + expected = ("TimeGrouper(key='key', freq=<50 * Minutes>, axis=0," + " sort=True, closed='left', label='right', how='mean', " + "loffset=None)") + assert result == expected + def test_apply(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- [ ] closes #xxxx - [x ] tests added / passed - [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry **Edit: I've made a full proposal (sans some discussion points below)** Currently the repr for ``Grouper`` and ``TimeGrouper`` is not pretty: ```python >>> pd.Grouper(key='key') <pandas.core.groupby.Grouper at 0x248d5ebfd30> >>> pd.Grouper(key='key', freq='50Min') <pandas.core.resample.TimeGrouper at 0x248d68d95c0> ``` I propose adding a ``Grouper.__repr__``, so the repr will be like: ```python >>> pd.Grouper(key='key') Grouper(key='key') >>> pd.Grouper(key='key', freq='50Min') TimeGrouper(key='key', freq='50T') >>> pd.Grouper(key='key', freq='50Min', label='right') TimeGrouper(label='right', key='k', freq='50T') ``` The repr shows the instantiation form, so users could copy the repr and paste it in to use it, which is always nice. See attached PR. Tests are still missing. Comments welcome. Two points: * The repr calculation is a bit heavy, so I've cached it. Don't know if that is going overboard? * Is ``TimeGrouper`` deprecated?
https://api.github.com/repos/pandas-dev/pandas/pulls/17727
2017-09-30T12:57:07Z
2017-11-09T22:31:19Z
null
2017-12-11T08:09:16Z
BUG: fixed check `_is_unorderable_exception`
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 37f99bd344e6c..2962c05e1b4fd 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1158,7 +1158,9 @@ def _is_unorderable_exception(e): """ if PY36: - return "'>' not supported between instances of" in str(e) + str_e = str(e) + return ("'>' not supported between instances of" in str_e or + "'<' not supported between instances of" in str_e) elif PY3: return 'unorderable' in str(e)
With upgrade to NumPy 1.13.2 the error message raised when comparing unorderable types changes from using `"'>' not supported between instances of"` to using `"'<' not supported between instances of"`. This PR checks both of these. See [GH-17046](https://github.com/pandas-dev/pandas/issues/17046) for discussion. This caused failing `test test_basic_indexing` when running tests with NumPy 1.13.2. Here is the reproducer ``` import pandas import pytest import numpy as np def test_basic_indexing(): s = pandas.Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b']) pytest.raises(IndexError, s.__getitem__, 5) pytest.raises(IndexError, s.__setitem__, 5, 0) pytest.raises(KeyError, s.__getitem__, 'c') s = s.sort_index() pytest.raises(IndexError, s.__getitem__, 5) pytest.raises(IndexError, s.__setitem__, 5, 0) # this part was failing test_basic_indexing() ```
https://api.github.com/repos/pandas-dev/pandas/pulls/17724
2017-09-29T20:53:36Z
2017-10-03T13:00:49Z
null
2017-10-03T13:00:49Z
Update clipboard Qt-bindings for flexiblity and Python3 compatibility
diff --git a/ci/requirements-3.6_BUILD_TEST.sh b/ci/requirements-3.6_BUILD_TEST.sh index 84dd27c50d587..2a3adeff836ee 100644 --- a/ci/requirements-3.6_BUILD_TEST.sh +++ b/ci/requirements-3.6_BUILD_TEST.sh @@ -4,4 +4,4 @@ source activate pandas echo "install 36 BUILD_TEST" -conda install -n pandas -c conda-forge pyarrow dask +conda install -n pandas -c conda-forge pyarrow dask pyqt qtpy diff --git a/doc/source/install.rst b/doc/source/install.rst index c805f84d0faaa..b8968e18aecb0 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -251,12 +251,13 @@ Optional Dependencies * `Jinja2 <http://jinja.pocoo.org/>`__: Template engine for conditional HTML formatting. * `s3fs <http://s3fs.readthedocs.io/>`__: necessary for Amazon S3 access (s3fs >= 0.0.7). * `blosc <https://pypi.python.org/pypi/blosc>`__: for msgpack compression using ``blosc`` -* One of `PyQt4 - <http://www.riverbankcomputing.com/software/pyqt/download>`__, `PySide - <http://qt-project.org/wiki/Category:LanguageBindings::PySide>`__, `pygtk - <http://www.pygtk.org/>`__, `xsel - <http://www.vergenet.net/~conrad/software/xsel/>`__, or `xclip - <https://github.com/astrand/xclip/>`__: necessary to use +* One of + `qtpy <https://github.com/spyder-ide/qtpy>`__ (requires PyQt or PySide), + `PyQt5 <https://www.riverbankcomputing.com/software/pyqt/download5>`__, + `PyQt4 <http://www.riverbankcomputing.com/software/pyqt/download>`__, + `pygtk <http://www.pygtk.org/>`__, + `xsel <http://www.vergenet.net/~conrad/software/xsel/>`__, or + `xclip <https://github.com/astrand/xclip/>`__: necessary to use :func:`~pandas.read_clipboard`. Most package managers on Linux distributions will have ``xclip`` and/or ``xsel`` immediately available for installation. * For Google BigQuery I/O - see `here <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__ diff --git a/doc/source/io.rst b/doc/source/io.rst index c94d5bc75d4fc..5390fc3399e23 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3053,7 +3053,7 @@ We can see that we got the same content back, which we had earlier written to th .. note:: - You may need to install xclip or xsel (with gtk or PyQt4 modules) on Linux to use these methods. + You may need to install xclip or xsel (with gtk, PyQt5, PyQt4 or qtpy) on Linux to use these methods. .. _io.pickle: diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 90032a692fd15..9b7226f0fe594 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -44,6 +44,7 @@ Other Enhancements - :class:`pandas.io.formats.style.Styler` now has method ``hide_columns()`` to determine whether columns will be hidden in output (:issue:`14194`) - Improved wording of ``ValueError`` raised in :func:`to_datetime` when ``unit=`` is passed with a non-convertible value (:issue:`14350`) - :func:`Series.fillna` now accepts a Series or a dict as a ``value`` for a categorical dtype (:issue:`17033`) +- :func:`pandas.read_clipboard` updated to use qtpy, falling back to PyQt5 and then PyQt4, adding compatibility with Python3 and multiple python-qt bindings (:issue:`17722`) .. _whatsnew_0220.api_breaking: diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 4066a3be5e850..37d398f20ef41 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -18,7 +18,8 @@ On Linux, install xclip or xsel via package manager. For example, in Debian: sudo apt-get install xclip -Otherwise on Linux, you will need the gtk or PyQt4 modules installed. +Otherwise on Linux, you will need the gtk, qtpy or PyQt modules installed. +qtpy also requires a python-qt-bindings module: PyQt4, PyQt5, PySide, PySide2 gtk and PyQt4 modules are not available for Python 3, and this module does not work with PyGObject yet. @@ -34,9 +35,9 @@ init_klipper_clipboard, init_no_clipboard) from .windows import init_windows_clipboard -# `import PyQt4` sys.exit()s if DISPLAY is not in the environment. +# `import qtpy` sys.exit()s if DISPLAY is not in the environment. # Thus, we need to detect the presence of $DISPLAY manually -# and not load PyQt4 if it is absent. +# and not load qtpy if it is absent. HAS_DISPLAY = os.getenv("DISPLAY", False) CHECK_CMD = "where" if platform.system() == "Windows" else "which" @@ -68,9 +69,23 @@ def determine_clipboard(): return init_gtk_clipboard() try: - # Check if PyQt4 is installed - import PyQt4 # noqa + # qtpy is a small abstraction layer that lets you write + # applications using a single api call to either PyQt or PySide + # https://pypi.python.org/pypi/QtPy + import qtpy # noqa except ImportError: + # If qtpy isn't installed, fall back on importing PyQt5, or PyQt5 + try: + import PyQt5 # noqa + except ImportError: + try: + import PyQt4 # noqa + except ImportError: + pass # fail fast for all non-ImportError exceptions. + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() pass else: return init_qt_clipboard() diff --git a/pandas/io/clipboard/clipboards.py b/pandas/io/clipboard/clipboards.py index e32380a383374..285d93e3ca497 100644 --- a/pandas/io/clipboard/clipboards.py +++ b/pandas/io/clipboard/clipboards.py @@ -46,10 +46,19 @@ def paste_gtk(): def init_qt_clipboard(): # $DISPLAY should exist - from PyQt4.QtGui import QApplication - # use the global instance if it exists - app = QApplication.instance() or QApplication([]) + # Try to import from qtpy, but if that fails try PyQt5 then PyQt4 + try: + from qtpy.QtWidgets import QApplication + except ImportError: + try: + from PyQt5.QtWidgets import QApplication + except ImportError: + from PyQt4.QtGui import QApplication + + app = QApplication.instance() + if app is None: + app = QApplication([]) def copy_qt(text): cb = app.clipboard()
This should prevent any conflicts with other qt-bindings packages when embedding pandas in a Qt-based gui and should also provide compatibility with Python3 since PyQt4 may not be available for the latest releases. - [ ] closes #17722
https://api.github.com/repos/pandas-dev/pandas/pulls/17723
2017-09-29T18:40:37Z
2017-11-24T22:17:03Z
2017-11-24T22:17:03Z
2017-11-25T00:42:02Z
Separate out _convert_datetime_to_tsobject
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4c34d0fcb1e5f..60b7c3cfcb6c7 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -728,7 +728,7 @@ class Timestamp(_Timestamp): # reconstruct & check bounds ts_input = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tzinfo=_tzinfo) - ts = convert_to_tsobject(ts_input, _tzinfo, None, 0, 0) + ts = convert_datetime_to_tsobject(ts_input, _tzinfo) value = ts.value + (dts.ps // 1000) if value != NPY_NAT: _check_dts_bounds(&dts) @@ -1475,52 +1475,11 @@ cdef convert_to_tsobject(object ts, object tz, object unit, obj.value = ts pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts) elif PyDateTime_Check(ts): - if tz is not None: - # sort of a temporary hack - if ts.tzinfo is not None: - if (hasattr(tz, 'normalize') and - hasattr(ts.tzinfo, '_utcoffset')): - ts = tz.normalize(ts) - obj.value = _pydatetime_to_dts(ts, &obj.dts) - obj.tzinfo = ts.tzinfo - else: #tzoffset - try: - tz = ts.astimezone(tz).tzinfo - except: - pass - obj.value = _pydatetime_to_dts(ts, &obj.dts) - ts_offset = get_utcoffset(ts.tzinfo, ts) - obj.value -= _delta_to_nanoseconds(ts_offset) - tz_offset = get_utcoffset(tz, ts) - obj.value += _delta_to_nanoseconds(tz_offset) - pandas_datetime_to_datetimestruct(obj.value, - PANDAS_FR_ns, &obj.dts) - obj.tzinfo = tz - elif not is_utc(tz): - ts = _localize_pydatetime(ts, tz) - obj.value = _pydatetime_to_dts(ts, &obj.dts) - obj.tzinfo = ts.tzinfo - else: - # UTC - obj.value = _pydatetime_to_dts(ts, &obj.dts) - obj.tzinfo = pytz.utc - else: - obj.value = _pydatetime_to_dts(ts, &obj.dts) - obj.tzinfo = ts.tzinfo - - if obj.tzinfo is not None and not is_utc(obj.tzinfo): - offset = get_utcoffset(obj.tzinfo, ts) - obj.value -= _delta_to_nanoseconds(offset) - - if is_timestamp(ts): - obj.value += ts.nanosecond - obj.dts.ps = ts.nanosecond * 1000 - _check_dts_bounds(&obj.dts) - return obj + return convert_datetime_to_tsobject(ts, tz) elif PyDate_Check(ts): # Keep the converter same as PyDateTime's ts = datetime.combine(ts, datetime_time()) - return convert_to_tsobject(ts, tz, None, 0, 0) + return convert_datetime_to_tsobject(ts, tz) elif getattr(ts, '_typ', None) == 'period': raise ValueError( "Cannot convert Period to Timestamp " @@ -1538,6 +1497,83 @@ cdef convert_to_tsobject(object ts, object tz, object unit, return obj +cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, + int32_t nanos=0): + """ + Convert a datetime (or Timestamp) input `ts`, along with optional timezone + object `tz` to a _TSObject. + + The optional argument `nanos` allows for cases where datetime input + needs to be supplemented with higher-precision information. + + Parameters + ---------- + ts : datetime or Timestamp + Value to be converted to _TSObject + tz : tzinfo or None + timezone for the timezone-aware output + nanos : int32_t, default is 0 + nanoseconds supplement the precision of the datetime input ts + + Returns + ------- + obj : _TSObject + """ + cdef: + _TSObject obj = _TSObject() + + if tz is not None: + tz = maybe_get_tz(tz) + + # sort of a temporary hack + if ts.tzinfo is not None: + if (hasattr(tz, 'normalize') and + hasattr(ts.tzinfo, '_utcoffset')): + ts = tz.normalize(ts) + obj.value = _pydatetime_to_dts(ts, &obj.dts) + obj.tzinfo = ts.tzinfo + else: + # tzoffset + try: + tz = ts.astimezone(tz).tzinfo + except: + pass + obj.value = _pydatetime_to_dts(ts, &obj.dts) + ts_offset = get_utcoffset(ts.tzinfo, ts) + obj.value -= int(ts_offset.total_seconds() * 1e9) + tz_offset = get_utcoffset(tz, ts) + obj.value += int(tz_offset.total_seconds() * 1e9) + pandas_datetime_to_datetimestruct(obj.value, + PANDAS_FR_ns, &obj.dts) + obj.tzinfo = tz + elif not is_utc(tz): + ts = _localize_pydatetime(ts, tz) + obj.value = _pydatetime_to_dts(ts, &obj.dts) + obj.tzinfo = ts.tzinfo + else: + # UTC + obj.value = _pydatetime_to_dts(ts, &obj.dts) + obj.tzinfo = pytz.utc + else: + obj.value = _pydatetime_to_dts(ts, &obj.dts) + obj.tzinfo = ts.tzinfo + + if obj.tzinfo is not None and not is_utc(obj.tzinfo): + offset = get_utcoffset(obj.tzinfo, ts) + obj.value -= int(offset.total_seconds() * 1e9) + + if is_timestamp(ts): + obj.value += ts.nanosecond + obj.dts.ps = ts.nanosecond * 1000 + + if nanos: + obj.value += nanos + obj.dts.ps = nanos * 1000 + + _check_dts_bounds(&obj.dts) + return obj + + cpdef convert_str_to_tsobject(object ts, object tz, object unit, dayfirst=False, yearfirst=False): """ ts must be a string """ @@ -1558,11 +1594,12 @@ cpdef convert_str_to_tsobject(object ts, object tz, object unit, elif ts == 'now': # Issue 9000, we short-circuit rather than going # into np_datetime_strings which returns utc - ts = Timestamp.now(tz) + ts = datetime.now(tz) elif ts == 'today': # Issue 9000, we short-circuit rather than going # into np_datetime_strings which returns a normalized datetime - ts = Timestamp.today(tz) + ts = datetime.now(tz) + # equiv: datetime.today().replace(tzinfo=tz) else: try: _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) @@ -1577,7 +1614,15 @@ cpdef convert_str_to_tsobject(object ts, object tz, object unit, return obj else: # Keep the converter same as PyDateTime's - ts = Timestamp(obj.value, tz=obj.tzinfo) + obj = convert_to_tsobject(obj.value, obj.tzinfo, + None, 0, 0) + dtime = datetime(obj.dts.year, obj.dts.month, obj.dts.day, + obj.dts.hour, obj.dts.min, obj.dts.sec, + obj.dts.us, obj.tzinfo) + obj = convert_datetime_to_tsobject(dtime, tz, + nanos=obj.dts.ps / 1000) + return obj + else: ts = obj.value if tz is not None: @@ -1726,7 +1771,7 @@ def datetime_to_datetime64(ndarray[object] values): else: inferred_tz = get_timezone(val.tzinfo) - _ts = convert_to_tsobject(val, None, None, 0, 0) + _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value _check_dts_bounds(&_ts.dts) else: @@ -2046,7 +2091,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', seen_datetime=1 if val.tzinfo is not None: if utc_convert: - _ts = convert_to_tsobject(val, None, 'ns', 0, 0) + _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value try: _check_dts_bounds(&_ts.dts) @@ -2155,7 +2200,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', raise TypeError("invalid string coercion to datetime") try: - _ts = convert_to_tsobject(py_dt, None, None, 0, 0) + _ts = convert_datetime_to_tsobject(py_dt, None) iresult[i] = _ts.value except ValueError: if is_coerce:
A bunch of calls to `convert_to_tsobject` are made in cases where we already know that the input is a `datetime` object. This PR separates out the `datetime` case into a new function `_convert_datetime_to_tsobject`. Second, to make the dependency between `_TSObject` and `Timestamp` one-way, this removes the three references to `Timestamp` that are made in `convert_str_to_tsobject`. Verifying the last of these changes may be non-trivial. This is a prelude to separating `_TSobject` logic out into `tslibs.conversion`. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17715
2017-09-29T02:49:22Z
2017-10-02T12:43:40Z
2017-10-02T12:43:40Z
2017-10-30T16:25:23Z
Doc: Further improvements for IntervalIndex and Interval
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 799d04859cc2a..cfdb53ec7e4b1 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -833,12 +833,21 @@ Of course if you need integer based selection, then use ``iloc`` IntervalIndex ~~~~~~~~~~~~~ +:class:`IntervalIndex` together with its own dtype, ``interval`` as well as the +:class:`Interval` scalar type, allow first-class support in pandas for interval +notation. + +The ``IntervalIndex`` allows some unique indexing and is also used as a +return type for the categories in :func:`cut` and :func:`qcut`. + .. versionadded:: 0.20.0 .. warning:: These indexing behaviors are provisional and may change in a future version of pandas. +An ``IntervalIndex`` can be used in ``Series`` and in ``DataFrame`` as the index. + .. ipython:: python df = pd.DataFrame({'A': [1, 2, 3, 4]}, @@ -860,6 +869,20 @@ If you select a lable *contained* within an interval, this will also select the df.loc[2.5] df.loc[[2.5, 3.5]] +``Interval`` and ``IntervalIndex`` are used by ``cut`` and ``qcut``: + +.. ipython:: python + + c = pd.cut(range(4), bins=2) + c + c.categories + +Furthermore, ``IntervalIndex`` allows one to bin *other* data with these same +bins, with ``NaN`` representing a missing value similar to other dtypes. + +.. ipython:: python + + pd.cut([0, 3, 5, 1], bins=c.categories) Miscellaneous indexing FAQ -------------------------- diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 306597031817d..264a983fe4d53 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -51,17 +51,35 @@ cdef class Interval(IntervalMixin): .. versionadded:: 0.20.0 - Attributes + Parameters ---------- - left, right : values - Left and right bounds for each interval. + left : value + Left bound for interval. + right : value + Right bound for interval. closed : {'left', 'right', 'both', 'neither'} Whether the interval is closed on the left-side, right-side, both or neither. Defaults to 'right'. + Examples + -------- + >>> iv = pd.Interval(left=0, right=5) + >>> iv + Interval(0, 5, closed='right') + >>> 2.5 in iv + True + + >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01'), + ... pd.Timestamp('2017-12-31'), closed='both') + >>> pd.Timestamp('2017-01-01 00:00') in year_2017 + True + See Also -------- - IntervalIndex : an Index of intervals that are all closed on the same side. + IntervalIndex : an Index of ``interval`` s that are all closed on the same + side. + cut, qcut : convert arrays of continuous data into categoricals/series of + ``Interval``. """ cdef readonly object left, right diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index a697ed7888f90..29699f664bbf3 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -105,8 +105,10 @@ class IntervalIndex(IntervalMixin, Index): .. versionadded:: 0.20.0 - Warning: the indexing behaviors are provisional and may change in - a future version of pandas. + .. warning:: + + The indexing behaviors are provisional and may change in + a future version of pandas. Attributes ---------- @@ -147,15 +149,11 @@ class IntervalIndex(IntervalMixin, Index): -------- Index Interval : A bounded slice-like interval - interval_range : Function to create a fixed frequency IntervalIndex - IntervalIndex.from_arrays : Construct an IntervalIndex from a left and - right array - IntervalIndex.from_breaks : Construct an IntervalIndex from an array of - splits - IntervalIndex.from_intervals : Construct an IntervalIndex from an array of - Interval objects - IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of - tuples + interval_range : Function to create a fixed frequency + IntervalIndex, IntervalIndex.from_arrays, IntervalIndex.from_breaks, + IntervalIndex.from_intervals, IntervalIndex.from_tuples + cut, qcut : convert arrays of continuous data into categoricals/series of + ``Interval``. """ _typ = 'intervalindex' _comparables = ['name']
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` This follows #17706. Further changes are: * Taken text from the whatsnew for v0.20 and use it in advanced.rst to give more explations to ``IntervalIndex`` and ``Interval``. * in interval.pyx, the doc string "Attributes" didn't parse properly, because sphinx doesn't like having "left" and "right" on the same line. They're now on separate lines and the section renamed to 'Parameters'. See current here and compare to source: http://pandas-docs.github.io/pandas-docs-travis/generated/pandas.Interval.html#pandas.Interval. * some examples are added to ``Interval`` * the warning text on ``IntervalIndex`` is now parsed properly. * The "See Also" had become quite large, felt it becomes more reader-friendly to combine the ``from_*`` metods into one line. * added ``cut`` and ``qcut`` to 'See also' This will conclude my contributions to ``IntervalIndex``.
https://api.github.com/repos/pandas-dev/pandas/pulls/17714
2017-09-29T01:41:22Z
2017-09-29T15:00:09Z
2017-09-29T15:00:09Z
2017-09-29T16:08:11Z
update imports of DateParseError, remove unused imports from tslib
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4c34d0fcb1e5f..b0b70bb810204 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,12 +1,9 @@ # -*- coding: utf-8 -*- # cython: profile=False -import warnings - cimport numpy as np from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray, - float64_t, - NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA) + float64_t, NPY_DATETIME, NPY_TIMEDELTA) import numpy as np import sys @@ -16,12 +13,10 @@ from cpython cimport ( PyTypeObject, PyFloat_Check, PyComplex_Check, - PyLong_Check, PyObject_RichCompareBool, PyObject_RichCompare, Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE, - PyUnicode_Check, - PyUnicode_AsUTF8String) + PyUnicode_Check) cdef extern from "Python.h": cdef PyTypeObject *Py_TYPE(object) @@ -38,7 +33,6 @@ from datetime cimport ( pandas_datetimestruct, pandas_datetime_to_datetimestruct, pandas_datetimestruct_to_datetime, - cmp_pandas_datetimestruct, days_per_month_table, get_datetime64_value, get_timedelta64_value, @@ -68,23 +62,12 @@ from khash cimport ( kh_resize_int64, kh_get_int64) from .tslibs.parsing import parse_datetime_string -from .tslibs.parsing import DateParseError # noqa cimport cython -import re import time -# dateutil compat -from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, - tzutc as _dateutil_tzutc, - tzstr as _dateutil_tzstr) - -from dateutil.relativedelta import relativedelta -from dateutil.parser import DEFAULTPARSER - -from pandas.compat import (parse_date, string_types, iteritems, - StringIO, callable) +from pandas.compat import iteritems, callable import operator import collections @@ -97,9 +80,6 @@ import_array() # import datetime C API PyDateTime_IMPORT -# in numpy 1.7, will prob need the following: -# numpy_pydatetime_import - cdef int64_t NPY_NAT = util.get_nat() iNaT = NPY_NAT @@ -318,7 +298,7 @@ class Timestamp(_Timestamp): tz : string / timezone object, default None Timezone to localize to """ - if isinstance(tz, string_types): + if util.is_string_object(tz): tz = maybe_get_tz(tz) return cls(datetime.now(tz)) @@ -613,7 +593,7 @@ class Timestamp(_Timestamp): if self.tzinfo is None: # tz naive, localize tz = maybe_get_tz(tz) - if not isinstance(ambiguous, string_types): + if not util.is_string_object(ambiguous): ambiguous = [ambiguous] value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz, ambiguous=ambiguous, errors=errors)[0] @@ -2426,8 +2406,8 @@ class Timedelta(_Timedelta): raise TypeError( "Invalid type {0}. Must be int or float.".format(type(v))) - kwargs = dict([ (k, _to_py_int_float(v)) - for k, v in iteritems(kwargs) ]) + kwargs = dict([(k, _to_py_int_float(v)) + for k, v in iteritems(kwargs)]) try: nano = kwargs.pop('nanoseconds', 0) @@ -3682,7 +3662,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, result[i] = v - delta return result - if isinstance(ambiguous, string_types): + if util.is_string_object(ambiguous): if ambiguous == 'infer': infer_dst = True elif ambiguous == 'NaT': diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 97ac8445faf4c..8fe28aa400613 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -8,6 +8,7 @@ from pandas._libs.tslibs import parsing from pandas._libs.tslibs.parsing import ( # noqa parse_time_string, + DateParseError, _format_is_iso, _guess_datetime_format) @@ -561,7 +562,6 @@ def calc_with_mask(carg, mask): return None -DateParseError = tslib.DateParseError normalize_date = tslib.normalize_date # Fixed time formats for time parsing diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index bdfe6b5b09e45..b8ce1f0af6ea8 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1335,13 +1335,13 @@ def test_parsers_monthfreq(self): def test_parsers_quarterly_with_freq(self): msg = ('Incorrect quarterly string is given, quarter ' 'must be between 1 and 4: 2013Q5') - with tm.assert_raises_regex(tslib.DateParseError, msg): + with tm.assert_raises_regex(parsing.DateParseError, msg): tools.parse_time_string('2013Q5') # GH 5418 msg = ('Unable to retrieve month information from given freq: ' 'INVLD-L-DEC-SAT') - with tm.assert_raises_regex(tslib.DateParseError, msg): + with tm.assert_raises_regex(parsing.DateParseError, msg): tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT') cases = {('2013Q2', None): datetime(2013, 4, 1), diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index c17a216df44cb..28d85c52604d9 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -11,6 +11,7 @@ from pandas.compat.numpy import np_datetime64_compat from pandas._libs import tslib, period as libperiod +from pandas._libs.tslibs.parsing import DateParseError from pandas import Period, Timestamp, offsets from pandas.tseries.frequencies import DAYS, MONTHS @@ -886,8 +887,8 @@ def test_constructor_infer_freq(self): def test_badinput(self): pytest.raises(ValueError, Period, '-2000', 'A') - pytest.raises(tslib.DateParseError, Period, '0', 'A') - pytest.raises(tslib.DateParseError, Period, '1/1/-2000', 'A') + pytest.raises(DateParseError, Period, '0', 'A') + pytest.raises(DateParseError, Period, '1/1/-2000', 'A') def test_multiples(self): result1 = Period('1989', freq='2A')
Start checking off the todo list in #17652 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17713
2017-09-29T01:30:58Z
2017-09-29T10:04:42Z
2017-09-29T10:04:42Z
2017-10-30T16:23:15Z
Add missing file to _pyxfiles, delete commented-out
diff --git a/setup.py b/setup.py index d25ae4a5fb45c..793aa089e708f 100755 --- a/setup.py +++ b/setup.py @@ -341,6 +341,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/window.pyx', 'pandas/_libs/sparse.pyx', 'pandas/_libs/parsers.pyx', + 'pandas/_libs/tslibs/strptime.pyx', 'pandas/_libs/tslibs/timezones.pyx', 'pandas/_libs/tslibs/frequencies.pyx', 'pandas/_libs/tslibs/parsing.pyx', @@ -349,14 +350,6 @@ class CheckSDist(sdist_class): def initialize_options(self): sdist_class.initialize_options(self) - ''' - self._pyxfiles = [] - for root, dirs, files in os.walk('pandas'): - for f in files: - if f.endswith('.pyx'): - self._pyxfiles.append(pjoin(root, f)) - ''' - def run(self): if 'cython' in cmdclass: self.run_command('cython')
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17712
2017-09-28T23:54:26Z
2017-09-29T10:06:00Z
2017-09-29T10:06:00Z
2017-10-30T16:23:16Z
remove unused time conversion funcs
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 884117799ec5b..c96251a0293d6 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -13,29 +13,19 @@ cimport util import numpy as np -cimport tslib +from tslib cimport _to_i8 from hashtable cimport HashTable -from tslibs.timezones cimport is_utc, get_utcoffset -from pandas._libs import tslib, algos, hashtable as _hash +from pandas._libs import algos, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta from datetime import datetime, timedelta -from datetime cimport (get_datetime64_value, _pydatetime_to_dts, - pandas_datetimestruct) - from cpython cimport PyTuple_Check, PyList_Check -cdef extern from "datetime.h": - bint PyDateTime_Check(object o) - void PyDateTime_IMPORT() - cdef int64_t iNaT = util.get_nat() -PyDateTime_IMPORT - cdef extern from "Python.h": int PySlice_Check(object) @@ -540,23 +530,6 @@ cpdef convert_scalar(ndarray arr, object value): return value -cdef inline _to_i8(object val): - cdef pandas_datetimestruct dts - try: - return val.value - except AttributeError: - if util.is_datetime64_object(val): - return get_datetime64_value(val) - elif PyDateTime_Check(val): - tzinfo = getattr(val, 'tzinfo', None) - # Save the original date value so we can get the utcoffset from it. - ival = _pydatetime_to_dts(val, &dts) - if tzinfo is not None and not is_utc(tzinfo): - offset = get_utcoffset(tzinfo, val) - ival -= tslib._delta_to_nanoseconds(offset) - return ival - return val - cdef class MultiIndexObjectEngine(ObjectEngine): """ diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 01548e17d39ab..503badd0ca8bc 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -45,19 +45,11 @@ cdef double NaN = <double> np.NaN cdef double nan = NaN cdef double NAN = nan -from datetime import datetime as pydatetime - # this is our tseries.pxd from datetime cimport ( get_timedelta64_value, get_datetime64_value, npy_timedelta, npy_datetime, PyDateTime_Check, PyDate_Check, PyTime_Check, PyDelta_Check, - PyDateTime_GET_YEAR, - PyDateTime_GET_MONTH, - PyDateTime_GET_DAY, - PyDateTime_DATE_GET_HOUR, - PyDateTime_DATE_GET_MINUTE, - PyDateTime_DATE_GET_SECOND, PyDateTime_IMPORT) @@ -132,61 +124,6 @@ def memory_usage_of_objects(ndarray[object, ndim=1] arr): s += arr[i].__sizeof__() return s -#---------------------------------------------------------------------- -# datetime / io related - -cdef int _EPOCH_ORD = 719163 - -from datetime import date as pydate - -cdef inline int64_t gmtime(object date): - cdef int y, m, d, h, mn, s, days - - y = PyDateTime_GET_YEAR(date) - m = PyDateTime_GET_MONTH(date) - d = PyDateTime_GET_DAY(date) - h = PyDateTime_DATE_GET_HOUR(date) - mn = PyDateTime_DATE_GET_MINUTE(date) - s = PyDateTime_DATE_GET_SECOND(date) - - days = pydate(y, m, 1).toordinal() - _EPOCH_ORD + d - 1 - return ((<int64_t> (((days * 24 + h) * 60 + mn))) * 60 + s) * 1000 - - -cpdef object to_datetime(int64_t timestamp): - return pydatetime.utcfromtimestamp(timestamp / 1000.0) - - -cpdef object to_timestamp(object dt): - return gmtime(dt) - - -def array_to_timestamp(ndarray[object, ndim=1] arr): - cdef int i, n - cdef ndarray[int64_t, ndim=1] result - - n = len(arr) - result = np.empty(n, dtype=np.int64) - - for i from 0 <= i < n: - result[i] = gmtime(arr[i]) - - return result - - -def time64_to_datetime(ndarray[int64_t, ndim=1] arr): - cdef int i, n - cdef ndarray[object, ndim=1] result - - n = len(arr) - result = np.empty(n, dtype=object) - - for i from 0 <= i < n: - result[i] = to_datetime(arr[i]) - - return result - - #---------------------------------------------------------------------- # isnull / notnull related diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd index ee8adfe67bb5e..147320b108cc8 100644 --- a/pandas/_libs/tslib.pxd +++ b/pandas/_libs/tslib.pxd @@ -4,3 +4,5 @@ cdef convert_to_tsobject(object, object, object, bint, bint) cpdef convert_to_timedelta64(object, object) cdef bint _nat_scalar_rules[6] cdef bint _check_all_nulls(obj) + +cdef _to_i8(object val) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4c34d0fcb1e5f..9730456b0c4c4 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -3436,7 +3436,18 @@ def cast_to_nanoseconds(ndarray arr): return result -def pydt_to_i8(object pydt): +cdef inline _to_i8(object val): + cdef pandas_datetimestruct dts + try: + return val.value + except AttributeError: + if is_datetime64_object(val): + return get_datetime64_value(val) + elif PyDateTime_Check(val): + return Timestamp(val).value + return val + +cpdef pydt_to_i8(object pydt): """ Convert to int64 representation compatible with numpy datetime64; converts to UTC diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ea69116ec363d..ca1b4d031d3ce 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -25,7 +25,7 @@ import numpy as np from pandas import (Series, DataFrame, Panel, Panel4D, Index, - MultiIndex, Int64Index, isna, concat, + MultiIndex, Int64Index, isna, concat, to_datetime, SparseSeries, SparseDataFrame, PeriodIndex, DatetimeIndex, TimedeltaIndex) from pandas.core import config @@ -4529,7 +4529,7 @@ def _unconvert_index(data, kind, encoding=None): def _unconvert_index_legacy(data, kind, legacy=False, encoding=None): kind = _ensure_decoded(kind) if kind == u('datetime'): - index = lib.time64_to_datetime(data) + index = to_datetime(data) elif kind in (u('integer')): index = np.asarray(data, dtype=object) elif kind in (u('string')):
Takes the place of #17708, removing funcs instead of moving them. Had to cpdef `pydt_to_i8` because the func it is replacing in `_libs.index` was cdef'd. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17711
2017-09-28T21:41:04Z
2017-10-01T20:47:44Z
2017-10-01T20:47:44Z
2017-10-30T16:23:08Z
Delay import
diff --git a/.travis.yml b/.travis.yml index 034e2a32bb75c..fe1a2950dbf08 100644 --- a/.travis.yml +++ b/.travis.yml @@ -121,6 +121,8 @@ script: - ci/script_single.sh - ci/script_multi.sh - ci/lint.sh + - echo "checking imports" + - source activate pandas && python ci/check_imports.py - echo "script done" after_success: diff --git a/ci/check_imports.py b/ci/check_imports.py new file mode 100644 index 0000000000000..a83436e7d258c --- /dev/null +++ b/ci/check_imports.py @@ -0,0 +1,36 @@ +""" +Check that certain modules are not loaded by `import pandas` +""" +import sys + +blacklist = { + 'bs4', + 'html5lib', + 'ipython', + 'jinja2' + 'lxml', + 'matplotlib', + 'numexpr', + 'openpyxl', + 'py', + 'pytest', + 's3fs', + 'scipy', + 'tables', + 'xlrd', + 'xlsxwriter', + 'xlwt', +} + + +def main(): + import pandas # noqa + + modules = set(x.split('.')[0] for x in sys.modules) + imported = modules & blacklist + if modules & blacklist: + sys.exit("Imported {}".format(imported)) + + +if __name__ == '__main__': + main() diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index c8a0a6bff5cc7..6b968a3f1ae32 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -164,6 +164,7 @@ Other Enhancements - :func:`read_excel` raises ``ImportError`` with a better message if ``xlrd`` is not installed. (:issue:`17613`) - :func:`read_json` now accepts a ``chunksize`` parameter that can be used when ``lines=True``. If ``chunksize`` is passed, read_json now returns an iterator which reads in ``chunksize`` lines with each iteration. (:issue:`17048`) - :meth:`DataFrame.assign` will preserve the original order of ``**kwargs`` for Python 3.6+ users instead of sorting the column names +- Improved the import time of pandas by about 2.25x (:issue:`16764`) .. _whatsnew_0210.api_breaking: @@ -559,6 +560,8 @@ Other API Changes - :func:`to_datetime` when passed a tz-aware ``origin=`` kwarg will now raise a more informative ``ValueError`` rather than a ``TypeError`` (:issue:`16842`) - Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`) - Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`) +- Pandas no longer registers matplotlib converters on import. The converters + will be registered and used when the first plot is draw (:issue:`17710`) .. _whatsnew_0210.deprecations: diff --git a/pandas/core/computation/__init__.py b/pandas/core/computation/__init__.py index e13faf890d1f8..e69de29bb2d1d 100644 --- a/pandas/core/computation/__init__.py +++ b/pandas/core/computation/__init__.py @@ -1,23 +0,0 @@ - -import warnings -from distutils.version import LooseVersion - -_NUMEXPR_INSTALLED = False -_MIN_NUMEXPR_VERSION = "2.4.6" - -try: - import numexpr as ne - ver = ne.__version__ - _NUMEXPR_INSTALLED = ver >= LooseVersion(_MIN_NUMEXPR_VERSION) - - if not _NUMEXPR_INSTALLED: - warnings.warn( - "The installed version of numexpr {ver} is not supported " - "in pandas and will be not be used\nThe minimum supported " - "version is {min_ver}\n".format( - ver=ver, min_ver=_MIN_NUMEXPR_VERSION), UserWarning) - -except ImportError: # pragma: no cover - pass - -__all__ = ['_NUMEXPR_INSTALLED'] diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py new file mode 100644 index 0000000000000..bb8cc74bad3c2 --- /dev/null +++ b/pandas/core/computation/check.py @@ -0,0 +1,22 @@ +import warnings +from distutils.version import LooseVersion + +_NUMEXPR_INSTALLED = False +_MIN_NUMEXPR_VERSION = "2.4.6" + +try: + import numexpr as ne + ver = ne.__version__ + _NUMEXPR_INSTALLED = ver >= LooseVersion(_MIN_NUMEXPR_VERSION) + + if not _NUMEXPR_INSTALLED: + warnings.warn( + "The installed version of numexpr {ver} is not supported " + "in pandas and will be not be used\nThe minimum supported " + "version is {min_ver}\n".format( + ver=ver, min_ver=_MIN_NUMEXPR_VERSION), UserWarning) + +except ImportError: # pragma: no cover + pass + +__all__ = ['_NUMEXPR_INSTALLED'] diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index d391764794c1c..a5df6aea055ab 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -5,8 +5,6 @@ import tokenize from pandas.io.formats.printing import pprint_thing -from pandas.core.computation import _NUMEXPR_INSTALLED -from pandas.core.computation.expr import Expr, _parsers, tokenize_string from pandas.core.computation.scope import _ensure_scope from pandas.compat import string_types from pandas.core.computation.engines import _engines @@ -32,6 +30,7 @@ def _check_engine(engine): string engine """ + from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: @@ -69,6 +68,8 @@ def _check_parser(parser): KeyError * If an invalid parser is passed """ + from pandas.core.computation.expr import _parsers + if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys())) @@ -129,6 +130,8 @@ def _convert_expression(expr): def _check_for_locals(expr, stack_level, parser): + from pandas.core.computation.expr import tokenize_string + at_top_of_stack = stack_level == 0 not_pandas_parser = parser != 'pandas' @@ -252,6 +255,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True, pandas.DataFrame.query pandas.DataFrame.eval """ + from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 8ddc625887a51..2196fb5917a44 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -9,7 +9,7 @@ import warnings import numpy as np from pandas.core.common import _values_from_object -from pandas.core.computation import _NUMEXPR_INSTALLED +from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.core.config import get_option if _NUMEXPR_INSTALLED: diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 5652424a8f75b..33531e80449d8 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -437,34 +437,36 @@ def use_inf_as_na_cb(key): writer_engine_doc = """ : string The default Excel writer engine for '{ext}' files. Available options: - '{default}' (the default){others}. -""" - -with cf.config_prefix('io.excel'): - # going forward, will be additional writers - for ext, options in [('xls', ['xlwt']), ('xlsm', ['openpyxl'])]: - default = options.pop(0) - if options: - options = " " + ", ".join(options) - else: - options = "" - doc = writer_engine_doc.format(ext=ext, default=default, - others=options) - cf.register_option(ext + '.writer', default, doc, validator=str) - - def _register_xlsx(engine, other): - others = ", '{other}'".format(other=other) - doc = writer_engine_doc.format(ext='xlsx', default=engine, - others=others) - cf.register_option('xlsx.writer', engine, doc, validator=str) - - try: - # better memory footprint - import xlsxwriter # noqa - _register_xlsx('xlsxwriter', 'openpyxl') - except ImportError: - # fallback - _register_xlsx('openpyxl', 'xlsxwriter') + auto, {others}. +""" + +_xls_options = ['xlwt'] +_xlsm_options = ['openpyxl'] +_xlsx_options = ['openpyxl', 'xlsxwriter'] + + +with cf.config_prefix("io.excel.xls"): + cf.register_option("writer", "auto", + writer_engine_doc.format( + ext='xls', + others=', '.join(_xls_options)), + validator=str) + +with cf.config_prefix("io.excel.xlsm"): + cf.register_option("writer", "auto", + writer_engine_doc.format( + ext='xlsm', + others=', '.join(_xlsm_options)), + validator=str) + + +with cf.config_prefix("io.excel.xlsx"): + cf.register_option("writer", "auto", + writer_engine_doc.format( + ext='xlsx', + others=', '.join(_xlsx_options)), + validator=str) + # Set up the io.parquet specific configuration. parquet_engine_doc = """ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5d439f88bca15..01e83821d4524 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -76,9 +76,7 @@ create_block_manager_from_blocks) from pandas.core.series import Series from pandas.core.categorical import Categorical -import pandas.core.computation.expressions as expressions import pandas.core.algorithms as algorithms -from pandas.core.computation.eval import eval as _eval from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) from pandas import compat @@ -2296,6 +2294,8 @@ def eval(self, expr, inplace=False, **kwargs): >>> df.eval('a + b') >>> df.eval('c = a + b') """ + from pandas.core.computation.eval import eval as _eval + inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 @@ -3840,6 +3840,7 @@ def _combine_const(self, other, func, raise_on_error=True, try_cast=True): def _compare_frame_evaluate(self, other, func, str_rep, try_cast=True): + import pandas.core.computation.expressions as expressions # unique if self.columns.is_unique: @@ -3992,6 +3993,7 @@ def combine_first(self, other): ------- combined : DataFrame """ + import pandas.core.computation.expressions as expressions def combiner(x, y, needs_i8_conversion=False): x_values = x.values if hasattr(x, 'values') else x @@ -4027,6 +4029,7 @@ def update(self, other, join='left', overwrite=True, filter_func=None, If True, will raise an error if the DataFrame and other both contain data in the same place. """ + import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 9e348819ce5a3..12ac7a5fd9f20 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -66,7 +66,6 @@ from pandas._libs.tslib import Timedelta from pandas._libs.lib import BlockPlacement -import pandas.core.computation.expressions as expressions from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas import compat @@ -1395,6 +1394,8 @@ def where(self, other, cond, align=True, raise_on_error=True, ------- a new block(s), the result of the func """ + import pandas.core.computation.expressions as expressions + values = self.values orig_other = other if transpose: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index d37acf48ed9c2..506b9267f32b4 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -16,7 +16,6 @@ from pandas import compat from pandas.util._decorators import Appender -import pandas.core.computation.expressions as expressions from pandas.compat import bind_method import pandas.core.missing as missing @@ -668,8 +667,9 @@ def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None, Wrapper function for Series arithmetic operations, to avoid code duplication. """ - def na_op(x, y): + import pandas.core.computation.expressions as expressions + try: result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, **eval_kwargs) @@ -1193,6 +1193,8 @@ def to_series(right): def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns', fill_zeros=None, **eval_kwargs): def na_op(x, y): + import pandas.core.computation.expressions as expressions + try: result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, **eval_kwargs) @@ -1349,6 +1351,8 @@ def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None, # copied from Series na_op above, but without unnecessary branch for # non-scalar def na_op(x, y): + import pandas.core.computation.expressions as expressions + try: result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, **eval_kwargs) @@ -1378,6 +1382,8 @@ def f(self, other): def _comp_method_PANEL(op, name, str_rep=None, masker=False): def na_op(x, y): + import pandas.core.computation.expressions as expressions + try: result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index a3e35492ad9af..68733a3a8b94e 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -15,7 +15,6 @@ is_string_like, is_scalar) from pandas.core.dtypes.missing import notna -import pandas.core.computation.expressions as expressions import pandas.core.common as com import pandas.core.ops as ops import pandas.core.missing as missing @@ -1500,6 +1499,8 @@ def _add_aggregate_operations(cls, use_numexpr=True): def _panel_arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, **eval_kwargs): def na_op(x, y): + import pandas.core.computation.expressions as expressions + try: result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, diff --git a/pandas/io/common.py b/pandas/io/common.py index 69a7e69ea724b..534c1e0671150 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -19,13 +19,6 @@ # gh-12665: Alias for now and remove later. CParserError = ParserError - -try: - from s3fs import S3File - need_text_wrapping = (BytesIO, S3File) -except ImportError: - need_text_wrapping = (BytesIO,) - # common NA values # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', @@ -34,19 +27,6 @@ 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', '' ]) -try: - import pathlib - _PATHLIB_INSTALLED = True -except ImportError: - _PATHLIB_INSTALLED = False - - -try: - from py.path import local as LocalPath - _PY_PATH_INSTALLED = True -except: - _PY_PATH_INSTALLED = False - if compat.PY3: from urllib.request import urlopen, pathname2url @@ -167,6 +147,18 @@ def _stringify_path(filepath_or_buffer): Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ + try: + import pathlib + _PATHLIB_INSTALLED = True + except ImportError: + _PATHLIB_INSTALLED = False + + try: + from py.path import local as LocalPath + _PY_PATH_INSTALLED = True + except ImportError: + _PY_PATH_INSTALLED = False + if hasattr(filepath_or_buffer, '__fspath__'): return filepath_or_buffer.__fspath__() if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path): @@ -322,6 +314,11 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, handles : list of file-like objects A list of file-like object that were openned in this function. """ + try: + from s3fs import S3File + need_text_wrapping = (BytesIO, S3File) + except ImportError: + need_text_wrapping = (BytesIO,) handles = list() f = path_or_buf diff --git a/pandas/io/excel.py b/pandas/io/excel.py index afecd76c498ef..41e3b5283a532 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -170,6 +170,16 @@ def register_writer(klass): _writer_extensions.append(ext) +def _get_default_writer(ext): + _default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'} + try: + import xlsxwriter # noqa + _default_writers['xlsx'] = 'xlsxwriter' + except ImportError: + pass + return _default_writers[ext] + + def get_writer(engine_name): if engine_name == 'openpyxl': try: @@ -690,8 +700,10 @@ class ExcelWriter(object): # ExcelWriter. def __new__(cls, path, engine=None, **kwargs): # only switch class if generic(ExcelWriter) + if issubclass(cls, ExcelWriter): - if engine is None: + if engine is None or (isinstance(engine, string_types) and + engine == 'auto'): if isinstance(path, string_types): ext = os.path.splitext(path)[-1][1:] else: @@ -700,6 +712,8 @@ def __new__(cls, path, engine=None, **kwargs): try: engine = config.get_option('io.excel.{ext}.writer' .format(ext=ext)) + if engine == 'auto': + engine = _get_default_writer(ext) except KeyError: error = ValueError("No engine for filetype: '{ext}'" .format(ext=ext)) diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py index c3cbedb0fc28c..8f98e297e3e66 100644 --- a/pandas/plotting/__init__.py +++ b/pandas/plotting/__init__.py @@ -4,12 +4,6 @@ # flake8: noqa -try: # mpl optional - from pandas.plotting import _converter - _converter.register() # needs to override so set_xlim works with str/number -except ImportError: - pass - from pandas.plotting._misc import (scatter_matrix, radviz, andrews_curves, bootstrap_plot, parallel_coordinates, lag_plot, diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index aa919d600ec52..211d9777e7515 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -33,19 +33,23 @@ from pandas.plotting._compat import (_mpl_ge_1_3_1, _mpl_ge_1_5_0, _mpl_ge_2_0_0) -from pandas.plotting._style import (mpl_stylesheet, plot_params, +from pandas.plotting._style import (plot_params, _get_standard_colors) from pandas.plotting._tools import (_subplots, _flatten, table, _handle_shared_axes, _get_all_lines, _get_xlim, _set_ticks_props, format_date_labels) +_registered = False -if _mpl_ge_1_5_0(): - # Compat with mp 1.5, which uses cycler. - import cycler - colors = mpl_stylesheet.pop('axes.color_cycle') - mpl_stylesheet['axes.prop_cycle'] = cycler.cycler('color', colors) + +def _setup(): + # delay the import of matplotlib until nescessary + global _registered + if not _registered: + from pandas.plotting import _converter + _converter.register() + _registered = True def _get_standard_kind(kind): @@ -95,6 +99,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, secondary_y=False, colormap=None, table=False, layout=None, **kwds): + _setup() self.data = data self.by = by @@ -2056,6 +2061,7 @@ def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): import matplotlib.pyplot as plt + _setup() ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) @@ -2151,7 +2157,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, kwds : other plotting keyword arguments To be passed to hist function """ - + _setup() if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, @@ -2348,6 +2354,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) """ + _setup() if subplots is True: naxes = len(grouped) fig, axes = _subplots(naxes=naxes, squeeze=False, diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 8cb4e30e0d91c..f1d53da5f1396 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -14,73 +14,6 @@ from pandas.plotting._compat import _mpl_ge_2_0_0 -# Extracted from https://gist.github.com/huyng/816622 -# this is the rcParams set when setting display.with_mpl_style -# to True. -mpl_stylesheet = { - 'axes.axisbelow': True, - 'axes.color_cycle': ['#348ABD', - '#7A68A6', - '#A60628', - '#467821', - '#CF4457', - '#188487', - '#E24A33'], - 'axes.edgecolor': '#bcbcbc', - 'axes.facecolor': '#eeeeee', - 'axes.grid': True, - 'axes.labelcolor': '#555555', - 'axes.labelsize': 'large', - 'axes.linewidth': 1.0, - 'axes.titlesize': 'x-large', - 'figure.edgecolor': 'white', - 'figure.facecolor': 'white', - 'figure.figsize': (6.0, 4.0), - 'figure.subplot.hspace': 0.5, - 'font.family': 'monospace', - 'font.monospace': ['Andale Mono', - 'Nimbus Mono L', - 'Courier New', - 'Courier', - 'Fixed', - 'Terminal', - 'monospace'], - 'font.size': 10, - 'interactive': True, - 'keymap.all_axes': ['a'], - 'keymap.back': ['left', 'c', 'backspace'], - 'keymap.forward': ['right', 'v'], - 'keymap.fullscreen': ['f'], - 'keymap.grid': ['g'], - 'keymap.home': ['h', 'r', 'home'], - 'keymap.pan': ['p'], - 'keymap.save': ['s'], - 'keymap.xscale': ['L', 'k'], - 'keymap.yscale': ['l'], - 'keymap.zoom': ['o'], - 'legend.fancybox': True, - 'lines.antialiased': True, - 'lines.linewidth': 1.0, - 'patch.antialiased': True, - 'patch.edgecolor': '#EEEEEE', - 'patch.facecolor': '#348ABD', - 'patch.linewidth': 0.5, - 'toolbar': 'toolbar2', - 'xtick.color': '#555555', - 'xtick.direction': 'in', - 'xtick.major.pad': 6.0, - 'xtick.major.size': 0.0, - 'xtick.minor.pad': 6.0, - 'xtick.minor.size': 0.0, - 'ytick.color': '#555555', - 'ytick.direction': 'in', - 'ytick.major.pad': 6.0, - 'ytick.major.size': 0.0, - 'ytick.minor.pad': 6.0, - 'ytick.minor.size': 0.0 -} - - def _get_standard_colors(num_colors=None, colormap=None, color_type='default', color=None): import matplotlib.pyplot as plt diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index ed569625177d3..af39ee9815313 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -5,13 +5,13 @@ from pandas.core.computation.engines import _engines import pandas.core.computation.expr as expr -from pandas.core.computation import _MIN_NUMEXPR_VERSION +from pandas.core.computation.check import _MIN_NUMEXPR_VERSION def test_compat(): # test we have compat with our version of nu - from pandas.core.computation import _NUMEXPR_INSTALLED + from pandas.core.computation.check import _NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index f0f1a2df27e93..a6c36792ef074 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -17,7 +17,7 @@ makeCustomDataframe as mkdf) import pandas.util.testing as tm -from pandas.core.computation import _NUMEXPR_INSTALLED +from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.tests.frame.common import TestData diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py index aeb4259a9edae..d18467f17ec5b 100644 --- a/pandas/util/_tester.py +++ b/pandas/util/_tester.py @@ -7,21 +7,19 @@ PKG = os.path.dirname(os.path.dirname(__file__)) -try: - import pytest -except ImportError: - def test(): +def test(extra_args=None): + try: + import pytest + except ImportError: raise ImportError("Need pytest>=3.0 to run tests") -else: - def test(extra_args=None): - cmd = ['--skip-slow', '--skip-network'] - if extra_args: - if not isinstance(extra_args, list): - extra_args = [extra_args] - cmd = extra_args - cmd += [PKG] - print("running: pytest {}".format(' '.join(cmd))) - sys.exit(pytest.main(cmd)) + cmd = ['--skip-slow', '--skip-network'] + if extra_args: + if not isinstance(extra_args, list): + extra_args = [extra_args] + cmd = extra_args + cmd += [PKG] + print("running: pytest {}".format(' '.join(cmd))) + sys.exit(pytest.main(cmd)) __all__ = ['test'] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index c5f73ca0e885b..202c9473eea12 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -41,8 +41,6 @@ StringIO, PY3 ) -from pandas.core.computation import expressions as expr - from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, Index, MultiIndex, @@ -2660,7 +2658,11 @@ def __exit__(self, exc_type, exc_value, traceback): @contextmanager -def use_numexpr(use, min_elements=expr._MIN_ELEMENTS): +def use_numexpr(use, min_elements=None): + from pandas.core.computation import expressions as expr + if min_elements is None: + min_elements = expr._MIN_ELEMENTS + olduse = expr._USE_NUMEXPR oldmin = expr._MIN_ELEMENTS expr.set_use_numexpr(use)
Closes https://github.com/pandas-dev/pandas/issues/16764 Improves performance by delaying the import of matplotlib, s3fs, pytest, and openpyxl. Also removes our old MPL style sheet. The option was deprecated and removed, but the stylesheet hung around. Master: ``` In [1]: %time import numpy CPU times: user 36.3 ms, sys: 13.2 ms, total: 49.5 ms Wall time: 68.4 ms In [2]: %time import pandas CPU times: user 306 ms, sys: 52.2 ms, total: 358 ms Wall time: 392 ms ``` head: ``` In [1]: %time import numpy CPU times: user 37.7 ms, sys: 12.4 ms, total: 50 ms Wall time: 69 ms In [2]: %time import pandas CPU times: user 166 ms, sys: 40.9 ms, total: 207 ms Wall time: 245 ms ``` I can shave off more by hardcoding `pandas.__version__`. Maybe that's worthwhile for releases? ``` In [2]: %time import pandas CPU times: user 133 ms, sys: 30 ms, total: 163 ms Wall time: 173 ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/17710
2017-09-28T20:16:25Z
2017-10-02T13:32:53Z
2017-10-02T13:32:53Z
2017-10-25T22:28:40Z
BUG: Implement interpolating NaT values in datetime series
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index dae93feb48b02..37d67e93b86a7 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -163,6 +163,7 @@ Other Enhancements - :func:`Categorical.rename_categories` now accepts a dict-like argument as `new_categories` and only updates the categories found in that dict. (:issue:`17336`) - :func:`read_excel` raises ``ImportError`` with a better message if ``xlrd`` is not installed. (:issue:`17613`) - :meth:`DataFrame.assign` will preserve the original order of ``**kwargs`` for Python 3.6+ users instead of sorting the column names +- Implement interpolating ``NaT`` values in ``datetime`` series (:issue:`11701`) .. _whatsnew_0210.api_breaking: diff --git a/pandas/core/series.py b/pandas/core/series.py index a05324142b223..31825f2aa3e45 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -19,6 +19,7 @@ is_integer, is_integer_dtype, is_float_dtype, is_extension_type, is_datetimetz, + is_datetime64_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_list_like, @@ -36,6 +37,7 @@ maybe_cast_to_datetime, maybe_castable) from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike +from pandas.core.tools.datetimes import to_datetime from pandas.core.common import (is_bool_indexer, _default_index, _asarray_tuplesafe, @@ -2734,6 +2736,15 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, return result + def interpolate(self, *args, **kwargs): + if (is_datetime64_dtype(self) or + is_datetime64tz_dtype(self)) and self.isnull().any(): + s2 = self.astype('i8').astype('f8') + s2[self.isnull()] = np.nan + return to_datetime(s2.interpolate(*args, **kwargs)) + else: + return super(Series, self).interpolate(*args, **kwargs) + def to_csv(self, path=None, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, mode='w', encoding=None, date_format=None, decimal='.'): diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 01bf7274fd384..3c74c0a8993c8 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1218,3 +1218,12 @@ def test_series_interpolate_intraday(self): result = ts.reindex(new_index).interpolate(method='time') tm.assert_numpy_array_equal(result.values, exp.values) + + def test_series_interpolate_nat(self): + # GH 11701 + for tz in [None, 'UTC', 'Europe/Paris']: + expected = pd.Series(pd.date_range('2015-01-01', '2015-01-30', tz=tz)) + result = expected.copy() + result[[3, 4, 5, 13, 14, 15]] = pd.NaT + result = result.interpolate() + tm.assert_series_equal(result, expected)
- [ ] closes #11701 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17709
2017-09-28T19:13:19Z
2017-11-11T16:24:10Z
null
2017-11-11T16:24:10Z
Move time conversion funcs to tslibs.conversion
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 884117799ec5b..9e55f2c3631b5 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -13,28 +13,19 @@ cimport util import numpy as np -cimport tslib - from hashtable cimport HashTable from tslibs.timezones cimport is_utc, get_utcoffset +from tslibs.conversion cimport _to_i8 from pandas._libs import tslib, algos, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta from datetime import datetime, timedelta -from datetime cimport (get_datetime64_value, _pydatetime_to_dts, - pandas_datetimestruct) - from cpython cimport PyTuple_Check, PyList_Check -cdef extern from "datetime.h": - bint PyDateTime_Check(object o) - void PyDateTime_IMPORT() - cdef int64_t iNaT = util.get_nat() -PyDateTime_IMPORT cdef extern from "Python.h": int PySlice_Check(object) @@ -540,23 +531,6 @@ cpdef convert_scalar(ndarray arr, object value): return value -cdef inline _to_i8(object val): - cdef pandas_datetimestruct dts - try: - return val.value - except AttributeError: - if util.is_datetime64_object(val): - return get_datetime64_value(val) - elif PyDateTime_Check(val): - tzinfo = getattr(val, 'tzinfo', None) - # Save the original date value so we can get the utcoffset from it. - ival = _pydatetime_to_dts(val, &dts) - if tzinfo is not None and not is_utc(tzinfo): - offset = get_utcoffset(tzinfo, val) - ival -= tslib._delta_to_nanoseconds(offset) - return ival - return val - cdef class MultiIndexObjectEngine(ObjectEngine): """ diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 01548e17d39ab..47acc63cc5742 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -45,8 +45,6 @@ cdef double NaN = <double> np.NaN cdef double nan = NaN cdef double NAN = nan -from datetime import datetime as pydatetime - # this is our tseries.pxd from datetime cimport ( get_timedelta64_value, get_datetime64_value, @@ -134,58 +132,10 @@ def memory_usage_of_objects(ndarray[object, ndim=1] arr): #---------------------------------------------------------------------- # datetime / io related - -cdef int _EPOCH_ORD = 719163 - -from datetime import date as pydate - -cdef inline int64_t gmtime(object date): - cdef int y, m, d, h, mn, s, days - - y = PyDateTime_GET_YEAR(date) - m = PyDateTime_GET_MONTH(date) - d = PyDateTime_GET_DAY(date) - h = PyDateTime_DATE_GET_HOUR(date) - mn = PyDateTime_DATE_GET_MINUTE(date) - s = PyDateTime_DATE_GET_SECOND(date) - - days = pydate(y, m, 1).toordinal() - _EPOCH_ORD + d - 1 - return ((<int64_t> (((days * 24 + h) * 60 + mn))) * 60 + s) * 1000 - - -cpdef object to_datetime(int64_t timestamp): - return pydatetime.utcfromtimestamp(timestamp / 1000.0) - - -cpdef object to_timestamp(object dt): - return gmtime(dt) - - -def array_to_timestamp(ndarray[object, ndim=1] arr): - cdef int i, n - cdef ndarray[int64_t, ndim=1] result - - n = len(arr) - result = np.empty(n, dtype=np.int64) - - for i from 0 <= i < n: - result[i] = gmtime(arr[i]) - - return result - - -def time64_to_datetime(ndarray[int64_t, ndim=1] arr): - cdef int i, n - cdef ndarray[object, ndim=1] result - - n = len(arr) - result = np.empty(n, dtype=object) - - for i from 0 <= i < n: - result[i] = to_datetime(arr[i]) - - return result - +from tslibs.conversion import ( # noqa + time64_to_datetime, + array_to_timestamp, to_timestamp, to_datetime) +from tslibs.conversion cimport to_timestamp, to_datetime, gmtime # noqa #---------------------------------------------------------------------- # isnull / notnull related diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd new file mode 100644 index 0000000000000..c81f530e0b25d --- /dev/null +++ b/pandas/_libs/tslibs/conversion.pxd @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +from numpy cimport int64_t + +cdef int64_t gmtime(object date) +cpdef object to_datetime(int64_t timestamp) +cpdef object to_timestamp(object dt) + +cdef _to_i8(object val) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx new file mode 100644 index 0000000000000..62d9609bb77e8 --- /dev/null +++ b/pandas/_libs/tslibs/conversion.pyx @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + + +from datetime import ( + date as pydate, + datetime as pydatetime) + +from cpython.datetime cimport ( + PyDateTime_Check, + PyDateTime_GET_YEAR, + PyDateTime_GET_MONTH, + PyDateTime_GET_DAY, + PyDateTime_DATE_GET_HOUR, + PyDateTime_DATE_GET_MINUTE, + PyDateTime_DATE_GET_SECOND, + PyDateTime_IMPORT) +PyDateTime_IMPORT + +from datetime cimport (get_datetime64_value, _pydatetime_to_dts, + pandas_datetimestruct) + +import numpy as np +cimport numpy as cnp +from numpy cimport int64_t, ndarray +cnp.import_array() + +cimport util + +from timezones cimport get_utcoffset, is_utc + +# ---------------------------------------------------------------------- +# Constants +cdef int _EPOCH_ORD = 719163 + +# ---------------------------------------------------------------------- +# Non-pandas-specific + +cpdef object to_datetime(int64_t timestamp): + return pydatetime.utcfromtimestamp(timestamp / 1000.0) + + +cdef inline int64_t gmtime(object date): + cdef int y, m, d, h, mn, s, days + + y = PyDateTime_GET_YEAR(date) + m = PyDateTime_GET_MONTH(date) + d = PyDateTime_GET_DAY(date) + h = PyDateTime_DATE_GET_HOUR(date) + mn = PyDateTime_DATE_GET_MINUTE(date) + s = PyDateTime_DATE_GET_SECOND(date) + + days = pydate(y, m, 1).toordinal() - _EPOCH_ORD + d - 1 + return ((<int64_t> (((days * 24 + h) * 60 + mn))) * 60 + s) * 1000 + + +cpdef object to_timestamp(object dt): + return gmtime(dt) + + +def array_to_timestamp(ndarray[object, ndim=1] arr): + cdef int i, n + cdef ndarray[int64_t, ndim=1] result + + n = len(arr) + result = np.empty(n, dtype=np.int64) + + for i in range(n): + result[i] = gmtime(arr[i]) + + return result + + +def time64_to_datetime(ndarray[int64_t, ndim=1] arr): + cdef int i, n + cdef ndarray[object, ndim=1] result + + n = len(arr) + result = np.empty(n, dtype=object) + + for i in range(n): + result[i] = to_datetime(arr[i]) + + return result + + +# ---------------------------------------------------------------------- + +cdef inline _to_i8(object val): + cdef pandas_datetimestruct dts + try: + return val.value + except AttributeError: + if util.is_datetime64_object(val): + return get_datetime64_value(val) + elif PyDateTime_Check(val): + tzinfo = getattr(val, 'tzinfo', None) + # Save the original date value so we can get the utcoffset from it. + ival = _pydatetime_to_dts(val, &dts) + if tzinfo is not None and not is_utc(tzinfo): + offset = get_utcoffset(tzinfo, val) + ival -= int(offset.total_seconds() * 1e9) + return ival + return val diff --git a/setup.py b/setup.py index d25ae4a5fb45c..b66f74126d26a 100755 --- a/setup.py +++ b/setup.py @@ -341,6 +341,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/window.pyx', 'pandas/_libs/sparse.pyx', 'pandas/_libs/parsers.pyx', + 'pandas/_libs/tslibs/conversion.pyx', 'pandas/_libs/tslibs/timezones.pyx', 'pandas/_libs/tslibs/frequencies.pyx', 'pandas/_libs/tslibs/parsing.pyx', @@ -492,6 +493,7 @@ def pxd(name): 'depends': tseries_depends, 'sources': ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c']}, + '_libs.tslibs.conversion': {'pyxfile': '_libs/tslibs/conversion'}, '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'}, '_libs.period': {'pyxfile': '_libs/period', 'depends': (tseries_depends +
Eventually a bunch of `_TSObject` logic belongs in `tslibs.conversion`. To start off small, this just moves a few out-of-place functions from `_libs.lib` and `_libs.index`. The changes that are not cut/paste: In `_to_i8`: `tslib._delta_to_nanoseconds(offset)` became `int(offset.total_seconds() * 1e9)` In `array_to_timestamp` and `time64_to_datetime`: `for i from 0 <= i < n:` became `for i in range(n):` Right now the functions moved from `lib` are imported with a `# noqa`. If OK, I'll follow-up by updating the imports elsewhere. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17708
2017-09-28T16:37:44Z
2017-09-28T23:38:06Z
null
2017-09-29T00:24:05Z
DOC: Improved doc string for IntervalIndex + related changes
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index bfbda9696ff2b..306597031817d 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -58,6 +58,10 @@ cdef class Interval(IntervalMixin): closed : {'left', 'right', 'both', 'neither'} Whether the interval is closed on the left-side, right-side, both or neither. Defaults to 'right'. + + See Also + -------- + IntervalIndex : an Index of intervals that are all closed on the same side. """ cdef readonly object left, right diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 55ed2342571ab..a697ed7888f90 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -120,9 +120,42 @@ class IntervalIndex(IntervalMixin, Index): copy : boolean, default False Copy the meta-data + Examples + --------- + A new ``IntervalIndex`` is typically constructed using + :func:`interval_range`: + + >>> pd.interval_range(start=0, end=5) + IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]] + closed='right', dtype='interval[int64]') + + It may also be constructed using one of the constructor + methods :meth:`IntervalIndex.from_arrays`, + :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_intervals` + and :meth:`IntervalIndex.from_tuples`. + + See further examples in the doc strings of ``interval_range`` and the + mentioned constructor methods. + + Notes + ------ + See the `user guide + <http://pandas.pydata.org/pandas-docs/stable/advanced.html#intervalindex>`_ + for more. + See Also -------- Index + Interval : A bounded slice-like interval + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_intervals : Construct an IntervalIndex from an array of + Interval objects + IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of + tuples """ _typ = 'intervalindex' _comparables = ['name'] @@ -319,11 +352,20 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False): Examples -------- + >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) + IntervalIndex([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') - >>> IntervalIndex.from_breaks([0, 1, 2, 3]) - IntervalIndex(left=[0, 1, 2], - right=[1, 2, 3], - closed='right') + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_intervals : Construct an IntervalIndex from an array + of Interval objects + IntervalIndex.from_tuples : Construct an IntervalIndex from a + list/array of tuples """ breaks = np.asarray(breaks) return cls.from_arrays(breaks[:-1], breaks[1:], closed, @@ -350,11 +392,20 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False): Examples -------- + >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) + IntervalIndex([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') - >>> IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) - IntervalIndex(left=[0, 1, 2], - right=[1, 2, 3], - closed='right') + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_intervals : Construct an IntervalIndex from an array + of Interval objects + IntervalIndex.from_tuples : Construct an IntervalIndex from a + list/array of tuples """ left = np.asarray(left) right = np.asarray(right) @@ -378,19 +429,27 @@ def from_intervals(cls, data, name=None, copy=False): Examples -------- - - >>> IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)]) - IntervalIndex(left=[0, 1], - right=[1, 2], - closed='right') + >>> pd.IntervalIndex.from_intervals([pd.Interval(0, 1), + ... pd.Interval(1, 2)]) + IntervalIndex([(0, 1], (1, 2]] + closed='right', dtype='interval[int64]') The generic Index constructor work identically when it infers an array of all intervals: - >>> Index([Interval(0, 1), Interval(1, 2)]) - IntervalIndex(left=[0, 1], - right=[1, 2], - closed='right') + >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)]) + IntervalIndex([(0, 1], (1, 2]] + closed='right', dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_tuples : Construct an IntervalIndex from a + list/array of tuples """ data = np.asarray(data) left, right, closed = intervals_to_interval_bounds(data) @@ -415,7 +474,19 @@ def from_tuples(cls, data, closed='right', name=None, copy=False): Examples -------- + >>> pd.IntervalIndex.from_tuples([(0, 1), (1,2)]) + IntervalIndex([(0, 1], (1, 2]], + closed='right', dtype='interval[int64]') + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_intervals : Construct an IntervalIndex from an array + of Interval objects """ left = [] right = [] @@ -1121,7 +1192,6 @@ def interval_range(start=None, end=None, periods=None, freq=None, Examples -------- - Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) @@ -1159,6 +1229,10 @@ def interval_range(start=None, end=None, periods=None, freq=None, >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]] closed='both', dtype='interval[int64]') + + See Also + -------- + IntervalIndex : an Index of intervals that are all closed on the same side. """ if com._count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, '
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` This PR has Improvements for the doc string for ``IntervalIndex`` + related changes
https://api.github.com/repos/pandas-dev/pandas/pulls/17706
2017-09-28T13:27:14Z
2017-09-28T19:26:39Z
2017-09-28T19:26:39Z
2017-10-09T21:00:10Z
DEPR: deprecate pd.TimeGrouper
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index ae55b4a0aa469..dae93feb48b02 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -492,6 +492,7 @@ Deprecations - ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`). - :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`). - :func:`DataFrame.as_blocks` is deprecated, as this is exposing the internal implementation (:issue:`17302`) +- ``pd.TimeGrouper`` is deprecated in favor of :class:`pandas.Grouper` (:issue:`16747`) .. _whatsnew_0210.deprecations.argmin_min diff --git a/pandas/core/api.py b/pandas/core/api.py index 6a32d3763ffb1..a012ccce83965 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -33,7 +33,6 @@ from pandas.tseries.offsets import DateOffset from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.timedeltas import to_timedelta -from pandas.core.resample import TimeGrouper # see gh-14094. from pandas.util._depr_module import _DeprecatedModule @@ -52,8 +51,8 @@ # deprecation, xref #13790 def match(*args, **kwargs): - import warnings + import warnings warnings.warn("pd.match() is deprecated and will be removed " "in a future version", FutureWarning, stacklevel=2) @@ -64,8 +63,20 @@ def match(*args, **kwargs): def groupby(*args, **kwargs): import warnings - warnings.warn("pd.groupby() is deprecated and will be removed " + warnings.warn("pd.groupby() is deprecated and will be removed; " "Please use the Series.groupby() or " "DataFrame.groupby() methods", FutureWarning, stacklevel=2) return args[0].groupby(*args[1:], **kwargs) + + +# deprecation, xref +class TimeGrouper(object): + + def __new__(cls, *args, **kwargs): + from pandas.core.resample import TimeGrouper + import warnings + warnings.warn("pd.TimeGrouper is deprecated and will be removed; " + "Please use pd.Grouper(freq=...)", + FutureWarning, stacklevel=2) + return TimeGrouper(*args, **kwargs) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index cbc73615811a2..c593290410b96 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -47,11 +47,11 @@ class TestPDApi(Base): 'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex', 'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index', 'Series', 'SparseArray', 'SparseDataFrame', - 'SparseSeries', 'TimeGrouper', 'Timedelta', + 'SparseSeries', 'Timedelta', 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex'] # these are already deprecated; awaiting removal - deprecated_classes = ['WidePanel', 'Panel4D', + deprecated_classes = ['WidePanel', 'Panel4D', 'TimeGrouper', 'SparseList', 'Expr', 'Term'] # these should be deprecated in the future @@ -184,6 +184,11 @@ def test_groupby(self): check_stacklevel=False): pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1]) + def test_TimeGrouper(self): + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + pd.TimeGrouper(freq='D') + # GH 15940 def test_get_store(self): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8957beacab376..d91cff436dee2 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -3335,7 +3335,7 @@ def test_groupby_with_empty(self): index = pd.DatetimeIndex(()) data = () series = pd.Series(data, index) - grouper = pd.core.resample.TimeGrouper('D') + grouper = pd.Grouper(freq='D') grouped = series.groupby(grouper) assert next(iter(grouped), None) is None @@ -3354,7 +3354,7 @@ def test_groupby_with_small_elem(self): df = pd.DataFrame({'event': ['start', 'start'], 'change': [1234, 5678]}, index=pd.DatetimeIndex(['2014-09-10', '2013-10-10'])) - grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event']) + grouped = df.groupby([pd.Grouper(freq='M'), 'event']) assert len(grouped.groups) == 2 assert grouped.ngroups == 2 assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups @@ -3369,7 +3369,7 @@ def test_groupby_with_small_elem(self): 'change': [1234, 5678, 9123]}, index=pd.DatetimeIndex(['2014-09-10', '2013-10-10', '2014-09-15'])) - grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event']) + grouped = df.groupby([pd.Grouper(freq='M'), 'event']) assert len(grouped.groups) == 2 assert grouped.ngroups == 2 assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups @@ -3385,7 +3385,7 @@ def test_groupby_with_small_elem(self): 'change': [1234, 5678, 9123]}, index=pd.DatetimeIndex(['2014-09-10', '2013-10-10', '2014-08-05'])) - grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event']) + grouped = df.groupby([pd.Grouper(freq='M'), 'event']) assert len(grouped.groups) == 3 assert grouped.ngroups == 3 assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups @@ -3682,9 +3682,9 @@ def test_nunique_with_timegrouper(self): Timestamp('2016-06-28 16:09:30'), Timestamp('2016-06-28 16:46:28')], 'data': ['1', '2', '3']}).set_index('time') - result = test.groupby(pd.TimeGrouper(freq='h'))['data'].nunique() + result = test.groupby(pd.Grouper(freq='h'))['data'].nunique() expected = test.groupby( - pd.TimeGrouper(freq='h') + pd.Grouper(freq='h') )['data'].apply(pd.Series.nunique) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index f83a3fcd0668d..fafcbf947e3df 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -52,10 +52,10 @@ def test_groupby_with_timegrouper(self): assert_frame_equal(result1, expected) df_sorted = df.sort_index() - result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum() + result2 = df_sorted.groupby(pd.Grouper(freq='5D')).sum() assert_frame_equal(result2, expected) - result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum() + result3 = df.groupby(pd.Grouper(freq='5D')).sum() assert_frame_equal(result3, expected) def test_groupby_with_timegrouper_methods(self): @@ -80,7 +80,7 @@ def test_groupby_with_timegrouper_methods(self): for df in [df_original, df_sorted]: df = df.set_index('Date', drop=False) - g = df.groupby(pd.TimeGrouper('6M')) + g = df.groupby(pd.Grouper(freq='6M')) assert g.group_keys assert isinstance(g.grouper, pd.core.groupby.BinGrouper) groups = g.groups @@ -265,11 +265,11 @@ def test_timegrouper_with_reg_groups(self): ['date', 'user_id']).sort_index().astype('int64') expected.name = 'whole_cost' - result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq), + result1 = df.sort_index().groupby([pd.Grouper(freq=freq), 'user_id'])['whole_cost'].sum() assert_series_equal(result1, expected) - result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])[ + result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ 'whole_cost'].sum() assert_series_equal(result2, expected) @@ -340,7 +340,7 @@ def sumfunc_series(x): return pd.Series([x['value'].sum()], ('sum',)) expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_series) - result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date')) + result = (df_dt.groupby(pd.Grouper(freq='M', key='date')) .apply(sumfunc_series)) assert_frame_equal(result.reset_index(drop=True), expected.reset_index(drop=True)) @@ -358,8 +358,10 @@ def sumfunc_value(x): return x.value.sum() expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_value) - result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date')) - .apply(sumfunc_value)) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date')) + .apply(sumfunc_value)) assert_series_equal(result.reset_index(drop=True), expected.reset_index(drop=True)) @@ -617,7 +619,7 @@ def test_nunique_with_timegrouper_and_nat(self): Timestamp('2016-06-28 16:46:28')], 'data': ['1', '2', '3']}) - grouper = pd.TimeGrouper(key='time', freq='h') + grouper = pd.Grouper(key='time', freq='h') result = test.groupby(grouper)['data'].nunique() expected = test[test.time.notnull()].groupby(grouper)['data'].nunique() tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 267b67972c640..4b821dade6eae 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -57,7 +57,7 @@ def demean(arr): # GH 8430 df = tm.makeTimeDataFrame() - g = df.groupby(pd.TimeGrouper('M')) + g = df.groupby(pd.Grouper(freq='M')) g.transform(lambda x: x - 1) # GH 9700 diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 28a68a0a6e36d..7449beb8f97df 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -1983,8 +1983,8 @@ def test_resample_nunique(self): pd.Timestamp('2015-06-08 00:00:00'): '2015-06-08'}}) r = df.resample('D') g = df.groupby(pd.Grouper(freq='D')) - expected = df.groupby(pd.TimeGrouper('D')).ID.apply(lambda x: - x.nunique()) + expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x: + x.nunique()) assert expected.name == 'ID' for t in [r, g]: @@ -3075,7 +3075,9 @@ def setup_method(self, method): index=date_range('1/1/2000', periods=1000)) def test_apply(self): - grouper = TimeGrouper('A', label='right', closed='right') + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + grouper = pd.TimeGrouper(freq='A', label='right', closed='right') grouped = self.ts.groupby(grouper) @@ -3093,7 +3095,9 @@ def test_count(self): expected = self.ts.groupby(lambda x: x.year).count() - grouper = TimeGrouper('A', label='right', closed='right') + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + grouper = pd.TimeGrouper(freq='A', label='right', closed='right') result = self.ts.groupby(grouper).count() expected.index = result.index assert_series_equal(result, expected)
closes #16747
https://api.github.com/repos/pandas-dev/pandas/pulls/17703
2017-09-28T10:26:16Z
2017-09-28T11:50:33Z
2017-09-28T11:50:33Z
2017-09-28T11:51:34Z
CI: Pin miniconda version
diff --git a/appveyor.yml b/appveyor.yml index a1f8886f6d068..f1259f271ee39 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -59,7 +59,7 @@ install: # install our build environment - cmd: conda config --set show_channel_urls true --set always_yes true --set changeps1 false - - cmd: conda update -q conda + # - cmd: conda update -q conda - cmd: conda config --set ssl_verify false # add the pandas channel *before* defaults to have defaults take priority diff --git a/ci/install.ps1 b/ci/install.ps1 index 64ec7f81884cd..b784b4ebf5e6a 100644 --- a/ci/install.ps1 +++ b/ci/install.ps1 @@ -7,7 +7,7 @@ $MINICONDA_URL = "http://repo.continuum.io/miniconda/" function DownloadMiniconda ($python_version, $platform_suffix) { $webclient = New-Object System.Net.WebClient - $filename = "Miniconda3-latest-Windows-" + $platform_suffix + ".exe" + $filename = "Miniconda3-4.3.21-Windows-" + $platform_suffix + ".exe" $url = $MINICONDA_URL + $filename $basedir = $pwd.Path + "\" @@ -85,7 +85,7 @@ function UpdateConda ($python_home) { function main () { InstallMiniconda "3.5" $env:PYTHON_ARCH $env:CONDA_ROOT - UpdateConda $env:CONDA_ROOT + # UpdateConda $env:CONDA_ROOT InstallCondaPackages $env:CONDA_ROOT "conda-build jinja2 anaconda-client" } diff --git a/ci/install_circle.sh b/ci/install_circle.sh index fd79f907625e9..eba98be561397 100755 --- a/ci/install_circle.sh +++ b/ci/install_circle.sh @@ -10,7 +10,9 @@ echo "[Using clean Miniconda install]" rm -rf "$MINICONDA_DIR" # install miniconda -wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 +# wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 +# Pin miniconda +wget https://repo.continuum.io/miniconda/Miniconda2-4.3.21-Linux-x86_64.sh -q -O miniconda.sh || exit 1 bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 export PATH="$MINICONDA_DIR/bin:$PATH" @@ -18,7 +20,7 @@ export PATH="$MINICONDA_DIR/bin:$PATH" echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set always_yes true --set changeps1 false || exit 1 -conda update -q conda +# conda update -q conda # add the pandas channel to take priority # to add extra packages diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..faf404ddcd293 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -34,9 +34,13 @@ fi # install miniconda if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + # temporarily pin miniconda + # time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + time wget https://repo.continuum.io/miniconda/Miniconda2-4.3.21-MacOSX-x86_64.sh -O miniconda.sh || exit 1 else - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 + # temporarily pin miniconda + # time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 + time wget https://repo.continuum.io/miniconda/Miniconda2-4.3.21-Linux-x86_64.sh -O miniconda.sh || exit 1 fi time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 @@ -48,7 +52,7 @@ echo echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 -conda update -q conda +# conda update -q conda echo echo "[add channels]" diff --git a/ci/requirements-2.7_SLOW.run b/ci/requirements-2.7_SLOW.run index f7708283ad04a..db95a6ccb2314 100644 --- a/ci/requirements-2.7_SLOW.run +++ b/ci/requirements-2.7_SLOW.run @@ -16,4 +16,4 @@ s3fs psycopg2 pymysql html5lib -beautiful-soup +beautifulsoup4 diff --git a/ci/requirements-2.7_WIN.run b/ci/requirements-2.7_WIN.run index f953682f52d45..a81542ee5006c 100644 --- a/ci/requirements-2.7_WIN.run +++ b/ci/requirements-2.7_WIN.run @@ -14,5 +14,5 @@ xlsxwriter s3fs bottleneck html5lib -beautiful-soup +beautifulsoup4 jinja2=2.8 diff --git a/ci/requirements_all.txt b/ci/requirements_all.txt index b153b6989df86..e13afd619f105 100644 --- a/ci/requirements_all.txt +++ b/ci/requirements_all.txt @@ -13,7 +13,7 @@ xlrd xlwt html5lib patsy -beautiful-soup +beautifulsoup4 numpy cython scipy diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index ff21afc11d220..c5729d421758e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -799,6 +799,10 @@ def test_complibs(self): # Remove lzo if its not available on this platform if not tables.which_lib_version('lzo'): all_complibs.remove('lzo') + # Remove bzip2 if its not available on this platform + if not tables.which_lib_version("bzip2"): + all_complibs.remove("bzip2") + all_levels = range(0, 10) all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
xref #17696
https://api.github.com/repos/pandas-dev/pandas/pulls/17700
2017-09-27T22:17:55Z
2017-09-28T09:59:26Z
2017-09-28T09:59:26Z
2017-09-28T10:53:58Z
try installing bzip2
diff --git a/ci/install_circle.sh b/ci/install_circle.sh index fd79f907625e9..d7b3a6d07c60d 100755 --- a/ci/install_circle.sh +++ b/ci/install_circle.sh @@ -69,6 +69,9 @@ time conda install -n pandas pytest>=3.1.0 || exit 1 source activate pandas time pip install moto || exit 1 +# https://github.com/conda/conda/issues/6030 +conda install gcc_linux-64 + # build but don't install echo "[build em]" time python setup.py build_ext --inplace || exit 1 diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..8460d2124348e 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -148,6 +148,9 @@ if [ -e ${REQ} ]; then time bash $REQ || exit 1 fi +# test commit +conda install bzip2 + # remove any installed pandas package # w/o removing anything else echo
Just testing this out. xref #17696
https://api.github.com/repos/pandas-dev/pandas/pulls/17698
2017-09-27T21:00:22Z
2017-09-28T10:31:32Z
null
2017-09-28T10:58:29Z
TST: add datetimelike tests for tz-aware DatetimeIndex
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 90618cd6e235f..aca7060ea1a3d 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -34,6 +34,12 @@ def verify_pickle(self, index): unpickled = tm.round_trip_pickle(index) assert index.equals(unpickled) + def _fix_tz(self, new_index, orig_index): + if hasattr(orig_index, 'tz'): + assert new_index.tz is None + new_index = new_index.tz_localize('UTC').tz_convert(orig_index.tz) + return new_index + def test_pickle_compat_construction(self): # this is testing for pickle compat if self._holder is None: @@ -278,6 +284,8 @@ def test_ensure_copied_data(self): index_type = index.__class__ result = index_type(index.values, copy=True, **init_kwargs) + result = self._fix_tz(result, index) + tm.assert_index_equal(index, result) tm.assert_numpy_array_equal(index.values, result.values, check_same='copy') @@ -501,11 +509,13 @@ def test_repeat(self): rep = 2 i = self.create_index() expected = pd.Index(i.values.repeat(rep), name=i.name) + expected = self._fix_tz(expected, i) tm.assert_index_equal(i.repeat(rep), expected) i = self.create_index() rep = np.arange(len(i)) expected = pd.Index(i.values.repeat(rep), name=i.name) + expected = self._fix_tz(expected, i) tm.assert_index_equal(i.repeat(rep), expected) def test_numpy_repeat(self): @@ -726,7 +736,9 @@ def test_equals(self): assert not idx.equals(np.array(idx)) # Cannot pass in non-int64 dtype to RangeIndex - if not isinstance(idx, RangeIndex): + # In tz-aware DatetimeIndex constructor, + # subarr.tz: ValueError: cannot localize from non-UTC data + if not isinstance(idx, RangeIndex) and not hasattr(idx, 'tz'): same_values = Index(idx, dtype=object) assert idx.equals(same_values) assert same_values.equals(idx) diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 114940009377c..d24eac89caffa 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -22,7 +22,7 @@ def test_str(self): if hasattr(idx, 'tz'): if idx.tz is not None: - assert idx.tz in str(idx) + assert str(idx.tz) in str(idx) if hasattr(idx, 'freq'): assert "freq='%s'" % idx.freqstr in str(idx) diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py index 538e10e6011ec..94e5e05ff3b91 100644 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ b/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -76,3 +76,85 @@ def test_union(self): for case in cases: result = first.union(case) assert tm.equalContents(result, everything) + + +class TestDatetimeTZIndex(DatetimeLike): + _holder = DatetimeIndex + + def setup_method(self, method): + # TODO: Consider adding another test instance that crosses + # DST boundary? Currently such fails a lot of tests. + tz = "US/Eastern" + self.indices = dict(index=tm.makeDateIndex(10).tz_localize(tz), + index_dec=date_range('20130101', periods=10, + freq='-1D').tz_localize(tz)) + self.setup_indices() + + def test_pickle_compat_construction(self): + pass + + def test_repr_roundtrip(self): + # idx = self.create_index() + # tm.assert_index_equal(eval(repr(idx)), idx) + + # The constructor is re-localizing to the dtype's TZ: + # Index values are different (100.0 %) + # [left]: DatetimeIndex(['2013-01-01 05:00:00-05:00', + # '2013-01-02 05:00:00-05:00', + # '2013-01-03 05:00:00-05:00', + # '2013-01-04 05:00:00-05:00', + # '2013-01-05 05:00:00-05:00'], + # dtype='datetime64[ns, US/Eastern]', freq='D') + # [right]: DatetimeIndex(['2013-01-01 00:00:00-05:00', + # '2013-01-02 00:00:00-05:00', + # '2013-01-03 00:00:00-05:00', + # '2013-01-04 00:00:00-05:00', + # '2013-01-05 00:00:00-05:00'], + # dtype='datetime64[ns, US/Eastern]', freq='D') + pass + + # Disable following tests currently because they test comparing values + # with numpy arrays etc which are tz-naive, leading to issues. + def test_intersection_base(self): + # Subset of the full test + for name, idx in pd.compat.iteritems(self.indices): + first = idx[:5] + second = idx[:3] + intersect = first.intersection(second) + + assert tm.equalContents(intersect, second) + # TODO: intersection with numpy array doesn't place nice + # because of the mixture of tz-naive and tz-aware timestamps + + def test_union_base(self): + pass + + def test_symmetric_difference(self): + pass + + def test_shift(self): + + # test shift for datetimeIndex and non datetimeIndex + # GH8083 + + drange = self.create_index() + result = drange.shift(1) + expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04', + '2013-01-05', + '2013-01-06'], freq='D', dtype=drange.dtype) + tm.assert_index_equal(result, expected) + + result = drange.shift(-1) + expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02', + '2013-01-03', '2013-01-04'], + freq='D', dtype=drange.dtype) + tm.assert_index_equal(result, expected) + + result = drange.shift(3, freq='2D') + expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09', + '2013-01-10', + '2013-01-11'], freq='D', dtype=drange.dtype) + tm.assert_index_equal(result, expected) + + def create_index(self): + return date_range('20130101', periods=5).tz_localize("US/Eastern")
This commit just makes quick patches to get tests working for subclassing common index tests for tz-aware DatetimeIndex, by either ignoring tests entirely or explicitly converting to the DatetimeIndex's TZ. This is split off from PR #17583 where we were trying to add another common test. Advice is requested for whether to try to also make fixes upstream or just to keep these fixes localized to the test suite. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17694
2017-09-27T16:31:40Z
2017-11-12T18:19:39Z
null
2017-11-12T18:19:40Z
DEPR: Deprecate cdate_range and merge into bdate_range
diff --git a/doc/source/api.rst b/doc/source/api.rst index 4ffeb5035912f..28d4567027572 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -218,7 +218,6 @@ Top-level dealing with datetimelike to_timedelta date_range bdate_range - cdate_range period_range timedelta_range infer_freq diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 7399deb1319d8..d2d5ee344591a 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -76,21 +76,21 @@ Overview Following table shows the type of time-related classes pandas can handle and how to create them. -================= =============================== ================================================== +================= =============================== =================================================================== Class Remarks How to create -================= =============================== ================================================== -``Timestamp`` Represents a single time stamp ``to_datetime``, ``Timestamp`` -``DatetimeIndex`` Index of ``Timestamp`` ``to_datetime``, ``date_range``, ``DatetimeIndex`` +================= =============================== =================================================================== +``Timestamp`` Represents a single timestamp ``to_datetime``, ``Timestamp`` +``DatetimeIndex`` Index of ``Timestamp`` ``to_datetime``, ``date_range``, ``bdate_range``, ``DatetimeIndex`` ``Period`` Represents a single time span ``Period`` ``PeriodIndex`` Index of ``Period`` ``period_range``, ``PeriodIndex`` -================= =============================== ================================================== +================= =============================== =================================================================== .. _timeseries.representation: -Time Stamps vs. Time Spans --------------------------- +Timestamps vs. Time Spans +------------------------- -Time-stamped data is the most basic type of timeseries data that associates +Timestamped data is the most basic type of time series data that associates values with points in time. For pandas objects it means using the points in time. @@ -149,10 +149,10 @@ future releases. Converting to Timestamps ------------------------ -To convert a Series or list-like object of date-like objects e.g. strings, +To convert a ``Series`` or list-like object of date-like objects e.g. strings, epochs, or a mixture, you can use the ``to_datetime`` function. When passed -a Series, this returns a Series (with the same index), while a list-like -is converted to a DatetimeIndex: +a ``Series``, this returns a ``Series`` (with the same index), while a list-like +is converted to a ``DatetimeIndex``: .. ipython:: python @@ -175,11 +175,9 @@ you can pass the ``dayfirst`` flag: can't be parsed with the day being first it will be parsed as if ``dayfirst`` were False. -If you pass a single string to ``to_datetime``, it returns single ``Timestamp``. - -Also, ``Timestamp`` can accept the string input. -Note that ``Timestamp`` doesn't accept string parsing option like ``dayfirst`` -or ``format``, use ``to_datetime`` if these are required. +If you pass a single string to ``to_datetime``, it returns a single ``Timestamp``. +``Timestamp`` can also accept string input, but it doesn't accept string parsing +options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are required. .. ipython:: python @@ -191,9 +189,7 @@ Providing a Format Argument ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In addition to the required datetime string, a ``format`` argument can be passed to ensure specific parsing. -It will potentially speed up the conversion considerably. - -For example: +This could also potentially speed up the conversion considerably. .. ipython:: python @@ -203,7 +199,7 @@ For example: For more information on how to specify the ``format`` options, see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior. -Assembling datetime from multiple DataFrame columns +Assembling Datetime from Multiple DataFrame Columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.18.1 @@ -238,28 +234,24 @@ Invalid Data In version 0.17.0, the default for ``to_datetime`` is now ``errors='raise'``, rather than ``errors='ignore'``. This means that invalid parsing will raise rather that return the original input as in previous versions. -Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time): - -Raise when unparseable, this is the default +The default behavior, ``errors='raise'``, is to raise when unparseable: .. code-block:: ipython In [2]: pd.to_datetime(['2009/07/31', 'asd'], errors='raise') ValueError: Unknown string format -Return the original input when unparseable +Pass ``errors='ignore'`` to return the original input when unparseable: -.. code-block:: ipython +.. ipython:: python - In [4]: pd.to_datetime(['2009/07/31', 'asd'], errors='ignore') - Out[4]: array(['2009/07/31', 'asd'], dtype=object) + pd.to_datetime(['2009/07/31', 'asd'], errors='ignore') -Return NaT for input when unparseable +Pass ``errors='coerce'`` to convert unparseable data to ``NaT`` (not a time): -.. code-block:: ipython +.. ipython:: python - In [6]: pd.to_datetime(['2009/07/31', 'asd'], errors='coerce') - Out[6]: DatetimeIndex(['2009-07-31', 'NaT'], dtype='datetime64[ns]', freq=None) + pd.to_datetime(['2009/07/31', 'asd'], errors='coerce') .. _timeseries.converting.epoch: @@ -267,12 +259,11 @@ Return NaT for input when unparseable Epoch Timestamps ~~~~~~~~~~~~~~~~ -It's also possible to convert integer or float epoch times. The default unit -for these is nanoseconds (since these are how ``Timestamp`` s are stored). However, -often epochs are stored in another ``unit`` which can be specified. These are computed -from the starting point specified by the :ref:`Origin Parameter <timeseries.origin>`. - -Typical epoch stored units +pandas supports converting integer or float epoch times to ``Timestamp`` and +``DatetimeIndex``. The default unit is nanoseconds, since that is how ``Timestamp`` +objects are stored internally. However, epochs are often stored in another ``unit`` +which can be specified. These are computed from the starting point specified by the +``origin`` parameter. .. ipython:: python @@ -299,6 +290,10 @@ Typical epoch stored units pd.to_datetime([1490195805.433, 1490195805.433502912], unit='s') pd.to_datetime(1490195805433502912, unit='ns') +.. seealso:: + + :ref:`timeseries.origin` + .. _timeseries.converting.epoch_inverse: From Timestamps to Epoch @@ -319,15 +314,13 @@ We convert the ``DatetimeIndex`` to an ``int64`` array, then divide by the conve .. _timeseries.origin: -Using the Origin Parameter -~~~~~~~~~~~~~~~~~~~~~~~~~~ +Using the ``origin`` Parameter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.20.0 Using the ``origin`` parameter, one can specify an alternative starting point for creation -of a ``DatetimeIndex``. - -Start with 1960-01-01 as the starting date +of a ``DatetimeIndex``. For example, to use 1960-01-01 as the starting date: .. ipython:: python @@ -345,8 +338,8 @@ Commonly called 'unix epoch' or POSIX time. Generating Ranges of Timestamps ------------------------------- -To generate an index with time stamps, you can use either the DatetimeIndex or -Index constructor and pass in a list of datetime objects: +To generate an index with timestamps, you can use either the ``DatetimeIndex`` or +``Index`` constructor and pass in a list of datetime objects: .. ipython:: python @@ -360,37 +353,36 @@ Index constructor and pass in a list of datetime objects: index = pd.Index(dates) index -Practically, this becomes very cumbersome because we often need a very long +In practice this becomes very cumbersome because we often need a very long index with a large number of timestamps. If we need timestamps on a regular -frequency, we can use the pandas functions ``date_range`` and ``bdate_range`` -to create timestamp indexes. +frequency, we can use the :func:`date_range` and :func:`bdate_range` functions +to create a ``DatetimeIndex``. The default frequency for ``date_range`` is a +**calendar day** while the default for ``bdate_range`` is a **business day**: .. ipython:: python - index = pd.date_range('2000-1-1', periods=1000, freq='M') + start = datetime(2011, 1, 1) + end = datetime(2012, 1, 1) + + index = pd.date_range(start, end) index - index = pd.bdate_range('2012-1-1', periods=250) + index = pd.bdate_range(start, end) index -Convenience functions like ``date_range`` and ``bdate_range`` utilize a -variety of frequency aliases. The default frequency for ``date_range`` is a -**calendar day** while the default for ``bdate_range`` is a **business day** +Convenience functions like ``date_range`` and ``bdate_range`` can utilize a +variety of :ref:`frequency aliases <timeseries.offset_aliases>`: .. ipython:: python - start = datetime(2011, 1, 1) - end = datetime(2012, 1, 1) - - rng = pd.date_range(start, end) - rng + pd.date_range(start, periods=1000, freq='M') - rng = pd.bdate_range(start, end) - rng + pd.bdate_range(start, periods=250, freq='BQS') ``date_range`` and ``bdate_range`` make it easy to generate a range of dates -using various combinations of parameters like ``start``, ``end``, -``periods``, and ``freq``: +using various combinations of parameters like ``start``, ``end``, ``periods``, +and ``freq``. The start and end dates are strictly inclusive, so dates outside +of those specified will not be generated: .. ipython:: python @@ -402,15 +394,45 @@ using various combinations of parameters like ``start``, ``end``, pd.bdate_range(start=start, periods=20) -The start and end dates are strictly inclusive. So it will not generate any -dates outside of those dates if specified. +.. _timeseries.custom-freq-ranges: + +Custom Frequency Ranges +~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + This functionality was originally exclusive to ``cdate_range``, which is + deprecated as of version 0.21.0 in favor of ``bdate_range``. Note that + ``cdate_range`` only utilizes the ``weekmask`` and ``holidays`` parameters + when custom business day, 'C', is passed as the frequency string. Support has + been expanded with ``bdate_range`` to work with any custom frequency string. + +.. versionadded:: 0.21.0 + +``bdate_range`` can also generate a range of custom frequency dates by using +the ``weekmask`` and ``holidays`` parameters. These parameters will only be +used if a custom frequency string is passed. + +.. ipython:: python + + weekmask = 'Mon Wed Fri' + + holidays = [datetime(2011, 1, 5), datetime(2011, 3, 14)] + + pd.bdate_range(start, end, freq='C', weekmask=weekmask, holidays=holidays) + + pd.bdate_range(start, end, freq='CBMS', weekmask=weekmask) + +.. seealso:: + + :ref:`timeseries.custombusinessdays` .. _timeseries.timestamp-limits: -Timestamp limitations +Timestamp Limitations --------------------- -Since pandas represents timestamps in nanosecond resolution, the timespan that +Since pandas represents timestamps in nanosecond resolution, the time span that can be represented using a 64-bit integer is limited to approximately 584 years: .. ipython:: python @@ -418,7 +440,9 @@ can be represented using a 64-bit integer is limited to approximately 584 years: pd.Timestamp.min pd.Timestamp.max -See :ref:`here <timeseries.oob>` for ways to represent data outside these bound. +.. seealso:: + + :ref:`timeseries.oob` .. _timeseries.datetimeindex: @@ -426,20 +450,20 @@ Indexing -------- One of the main uses for ``DatetimeIndex`` is as an index for pandas objects. -The ``DatetimeIndex`` class contains many timeseries related optimizations: +The ``DatetimeIndex`` class contains many time series related optimizations: - A large range of dates for various offsets are pre-computed and cached under the hood in order to make generating subsequent date ranges very fast (just have to grab a slice) - Fast shifting using the ``shift`` and ``tshift`` method on pandas objects - - Unioning of overlapping DatetimeIndex objects with the same frequency is + - Unioning of overlapping ``DatetimeIndex`` objects with the same frequency is very fast (important for fast data alignment) - Quick access to date fields via properties such as ``year``, ``month``, etc. - Regularization functions like ``snap`` and very fast ``asof`` logic -DatetimeIndex objects has all the basic functionality of regular Index objects -and a smorgasbord of advanced timeseries-specific methods for easy frequency -processing. +``DatetimeIndex`` objects have all the basic functionality of regular ``Index`` +objects, and a smorgasbord of advanced time series specific methods for easy +frequency processing. .. seealso:: :ref:`Reindexing methods <basics.reindexing>` @@ -447,8 +471,7 @@ processing. .. note:: While pandas does not force you to have a sorted date index, some of these - methods may have unexpected or incorrect behavior if the dates are - unsorted. So please be careful. + methods may have unexpected or incorrect behavior if the dates are unsorted. ``DatetimeIndex`` can be used like a regular index and offers all of its intelligent functionality like selection, slicing, etc. @@ -466,7 +489,7 @@ intelligent functionality like selection, slicing, etc. Partial String Indexing ~~~~~~~~~~~~~~~~~~~~~~~ -You can pass in dates and strings that parse to dates as indexing parameters: +Dates and strings that parse to timestamps can be passed as indexing parameters: .. ipython:: python @@ -485,9 +508,9 @@ the year or year and month as strings: ts['2011-6'] -This type of slicing will work on a DataFrame with a ``DateTimeIndex`` as well. Since the +This type of slicing will work on a ``DataFrame`` with a ``DatetimeIndex`` as well. Since the partial string selection is a form of label slicing, the endpoints **will be** included. This -would include matching times on an included date. Here's an example: +would include matching times on an included date: .. ipython:: python @@ -523,7 +546,7 @@ We are stopping on the included end-point as it is part of the index .. versionadded:: 0.18.0 -DatetimeIndex Partial String Indexing also works on DataFrames with a ``MultiIndex``. For example: +``DatetimeIndex`` partial string indexing also works on a ``DataFrame`` with a ``MultiIndex``: .. ipython:: python @@ -541,14 +564,14 @@ DatetimeIndex Partial String Indexing also works on DataFrames with a ``MultiInd .. _timeseries.slice_vs_exact_match: -Slice vs. exact match +Slice vs. Exact Match ~~~~~~~~~~~~~~~~~~~~~ .. versionchanged:: 0.20.0 -The same string used as an indexing parameter can be treated either as a slice or as an exact match depending on the resolution of an index. If the string is less accurate than the index, it will be treated as a slice, otherwise as an exact match. +The same string used as an indexing parameter can be treated either as a slice or as an exact match depending on the resolution of the index. If the string is less accurate than the index, it will be treated as a slice, otherwise as an exact match. -For example, let us consider ``Series`` object which index has minute resolution. +Consider a ``Series`` object with a minute resolution index: .. ipython:: python @@ -593,7 +616,7 @@ If the timestamp string is treated as a slice, it can be used to index ``DataFra .. warning:: - However if the string is treated as an exact match, the selection in ``DataFrame``'s ``[]`` will be column-wise and not row-wise, see :ref:`Indexing Basics <indexing.basics>`. For example ``dft_minute['2011-12-31 23:59']`` will raise ``KeyError`` as ``'2012-12-31 23:59'`` has the same resolution as index and there is no column with such name: + However, if the string is treated as an exact match, the selection in ``DataFrame``'s ``[]`` will be column-wise and not row-wise, see :ref:`Indexing Basics <indexing.basics>`. For example ``dft_minute['2011-12-31 23:59']`` will raise ``KeyError`` as ``'2012-12-31 23:59'`` has the same resolution as the index and there is no column with such name: To *always* have unambiguous selection, whether the row is treated as a slice or a single selection, use ``.loc``. @@ -616,7 +639,7 @@ Note also that ``DatetimeIndex`` resolution cannot be less precise than day. Exact Indexing ~~~~~~~~~~~~~~ -As discussed in previous section, indexing a ``DateTimeIndex`` with a partial string depends on the "accuracy" of the period, in other words how specific the interval is in relation to the resolution of the index. In contrast, indexing with ``Timestamp`` or ``datetime`` objects is exact, because the objects have exact meaning. These also follow the semantics of *including both endpoints*. +As discussed in previous section, indexing a ``DatetimeIndex`` with a partial string depends on the "accuracy" of the period, in other words how specific the interval is in relation to the resolution of the index. In contrast, indexing with ``Timestamp`` or ``datetime`` objects is exact, because the objects have exact meaning. These also follow the semantics of *including both endpoints*. These ``Timestamp`` and ``datetime`` objects have exact ``hours, minutes,`` and ``seconds``, even though they were not explicitly specified (they are ``0``). @@ -640,8 +663,8 @@ A ``truncate`` convenience function is provided that is equivalent to slicing: ts.truncate(before='10/31/2011', after='12/31/2011') -Even complicated fancy indexing that breaks the DatetimeIndex's frequency -regularity will result in a ``DatetimeIndex`` (but frequency is lost): +Even complicated fancy indexing that breaks the ``DatetimeIndex`` frequency +regularity will result in a ``DatetimeIndex``, although frequency is lost: .. ipython:: python @@ -652,7 +675,7 @@ regularity will result in a ``DatetimeIndex`` (but frequency is lost): Time/Date Components -------------------- -There are several time/date properties that one can access from ``Timestamp`` or a collection of timestamps like a ``DateTimeIndex``. +There are several time/date properties that one can access from ``Timestamp`` or a collection of timestamps like a ``DatetimeIndex``. .. csv-table:: :header: "Property", "Description" @@ -688,10 +711,10 @@ Furthermore, if you have a ``Series`` with datetimelike values, then you can acc .. _timeseries.offsets: -DateOffset objects +DateOffset Objects ------------------ -In the preceding examples, we created DatetimeIndex objects at various +In the preceding examples, we created ``DatetimeIndex`` objects at various frequencies by passing in :ref:`frequency strings <timeseries.offset_aliases>` like 'M', 'W', and 'BM to the ``freq`` keyword. Under the hood, these frequency strings are being translated into an instance of pandas ``DateOffset``, @@ -704,7 +727,7 @@ which represents a regular frequency increment. Specific offset logic like DateOffset, "Generic offset class, defaults to 1 calendar day" BDay, "business day (weekday)" - CDay, "custom business day (experimental)" + CDay, "custom business day" Week, "one week, optionally anchored on a day of the week" WeekOfMonth, "the x-th day of the y-th week of each month" LastWeekOfMonth, "the x-th day of the last week of each month" @@ -805,7 +828,7 @@ These operations (``apply``, ``rollforward`` and ``rollback``) preserves time (h hour.apply(pd.Timestamp('2014-01-01 23:00')) -Parametric offsets +Parametric Offsets ~~~~~~~~~~~~~~~~~~ Some of the offsets can be "parameterized" when created to result in different @@ -840,7 +863,7 @@ Another example is parameterizing ``YearEnd`` with the specific ending month: .. _timeseries.offsetseries: -Using offsets with ``Series`` / ``DatetimeIndex`` +Using Offsets with ``Series`` / ``DatetimeIndex`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Offsets can be used with either a ``Series`` or ``DatetimeIndex`` to @@ -1091,7 +1114,7 @@ frequencies. We will refer to these aliases as *offset aliases*. :widths: 15, 100 "B", "business day frequency" - "C", "custom business day frequency (experimental)" + "C", "custom business day frequency" "D", "calendar day frequency" "W", "weekly frequency" "M", "month end frequency" @@ -1326,10 +1349,10 @@ or calendars with additional rules. .. _timeseries.advanced_datetime: -Time series-related instance methods +Time Series-Related Instance Methods ------------------------------------ -Shifting / lagging +Shifting / Lagging ~~~~~~~~~~~~~~~~~~ One may want to *shift* or *lag* the values in a time series back and forward in @@ -1360,7 +1383,7 @@ all the dates in the index by a specified number of offsets: Note that with ``tshift``, the leading entry is no longer NaN because the data is not being realigned. -Frequency conversion +Frequency Conversion ~~~~~~~~~~~~~~~~~~~~ The primary function for changing frequencies is the ``asfreq`` function. @@ -1381,13 +1404,13 @@ method for any gaps that may appear after the frequency conversion ts.asfreq(BDay(), method='pad') -Filling forward / backward +Filling Forward / Backward ~~~~~~~~~~~~~~~~~~~~~~~~~~ Related to ``asfreq`` and ``reindex`` is the ``fillna`` function documented in the :ref:`missing data section <missing_data.fillna>`. -Converting to Python datetimes +Converting to Python Datetimes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``DatetimeIndex`` can be converted to an array of Python native datetime.datetime objects using the @@ -1471,10 +1494,10 @@ labels. ts.resample('5Min', label='left', loffset='1s').mean() The ``axis`` parameter can be set to 0 or 1 and allows you to resample the -specified axis for a DataFrame. +specified axis for a ``DataFrame``. ``kind`` can be set to 'timestamp' or 'period' to convert the resulting index -to/from time-stamp and time-span representations. By default ``resample`` +to/from timestamp and time span representations. By default ``resample`` retains the input representation. ``convention`` can be set to 'start' or 'end' when resampling period data @@ -1482,8 +1505,8 @@ retains the input representation. frequency periods. -Up Sampling -~~~~~~~~~~~ +Upsampling +~~~~~~~~~~ For upsampling, you can specify a way to upsample and the ``limit`` parameter to interpolate over the gaps that are created: @@ -1559,13 +1582,13 @@ We can select a specific column or columns using standard getitem. r[['A','B']].mean() -You can pass a list or dict of functions to do aggregation with, outputting a DataFrame: +You can pass a list or dict of functions to do aggregation with, outputting a ``DataFrame``: .. ipython:: python r['A'].agg([np.sum, np.mean, np.std]) -On a resampled DataFrame, you can pass a list of functions to apply to each +On a resampled ``DataFrame``, you can pass a list of functions to apply to each column, which produces an aggregated result with a hierarchical index: .. ipython:: python @@ -1573,7 +1596,7 @@ column, which produces an aggregated result with a hierarchical index: r.agg([np.sum, np.mean]) By passing a dict to ``aggregate`` you can apply a different aggregation to the -columns of a DataFrame: +columns of a ``DataFrame``: .. ipython:: python :okexcept: @@ -1890,7 +1913,7 @@ frequencies ``Q-JAN`` through ``Q-DEC``. .. _timeseries.interchange: -Converting between Representations +Converting Between Representations ---------------------------------- Timestamped data can be converted to PeriodIndex-ed data using ``to_period`` @@ -1934,7 +1957,7 @@ the quarter end: .. _timeseries.oob: -Representing out-of-bounds spans +Representing Out-of-Bounds Spans -------------------------------- If you have data that is outside of the ``Timestamp`` bounds, see :ref:`Timestamp limitations <timeseries.timestamp-limits>`, @@ -2031,7 +2054,7 @@ which gives you more control over which time zone is used: rng_dateutil.tz == tz_dateutil Timestamps, like Python's ``datetime.datetime`` object can be either time zone -naive or time zone aware. Naive time series and DatetimeIndex objects can be +naive or time zone aware. Naive time series and ``DatetimeIndex`` objects can be *localized* using ``tz_localize``: .. ipython:: python @@ -2099,8 +2122,8 @@ Localization of ``Timestamp`` functions just like ``DatetimeIndex`` and ``Series rng[5].tz_localize('Asia/Shanghai') -Operations between Series in different time zones will yield UTC -Series, aligning the data on the UTC timestamps: +Operations between ``Series`` in different time zones will yield UTC +``Series``, aligning the data on the UTC timestamps: .. ipython:: python @@ -2180,7 +2203,7 @@ constructor as well as ``tz_localize``. .. _timeseries.timezone_series: -TZ aware Dtypes +TZ Aware Dtypes ~~~~~~~~~~~~~~~ .. versionadded:: 0.17.0 diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index c8a0a6bff5cc7..d69a5c22acc03 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -488,7 +488,7 @@ Additionally, DataFrames with datetime columns that were parsed by :func:`read_s Consistency of Range Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In previous versions, there were some inconsistencies between the various range functions: :func:`date_range`, :func:`bdate_range`, :func:`cdate_range`, :func:`period_range`, :func:`timedelta_range`, and :func:`interval_range`. (:issue:`17471`). +In previous versions, there were some inconsistencies between the various range functions: :func:`date_range`, :func:`bdate_range`, :func:`period_range`, :func:`timedelta_range`, and :func:`interval_range`. (:issue:`17471`). One of the inconsistent behaviors occurred when the ``start``, ``end`` and ``period`` parameters were all specified, potentially leading to ambiguous ranges. When all three parameters were passed, ``interval_range`` ignored the ``period`` parameter, ``period_range`` ignored the ``end`` parameter, and the other range functions raised. To promote consistency among the range functions, and avoid potentially ambiguous ranges, ``interval_range`` and ``period_range`` will now raise when all three parameters are passed. @@ -571,8 +571,9 @@ Deprecations - :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`). - :func:`DataFrame.as_blocks` is deprecated, as this is exposing the internal implementation (:issue:`17302`) - ``pd.TimeGrouper`` is deprecated in favor of :class:`pandas.Grouper` (:issue:`16747`) +- ``cdate_range`` has been deprecated in favor of :func:`bdate_range`, which has gained ``weekmask`` and ``holidays`` parameters for building custom frequency date ranges. See the :ref:`documentation <timeseries.custom-freq-ranges>` for more details (:issue:`17596`) -.. _whatsnew_0210.deprecations.argmin_min +.. _whatsnew_0210.deprecations.argmin_min: Series.argmax and Series.argmin ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -738,9 +739,9 @@ Numeric Categorical ^^^^^^^^^^^ -- Bug in :func:`Series.isin` when called with a categorical (:issue`16639`) +- Bug in :func:`Series.isin` when called with a categorical (:issue:`16639`) - Bug in the categorical constructor with empty values and categories causing the ``.categories`` to be an empty ``Float64Index`` rather than an empty ``Index`` with object dtype (:issue:`17248`) -- Bug in categorical operations with :ref:`Series.cat <categorical.cat>' not preserving the original Series' name (:issue:`17509`) +- Bug in categorical operations with :ref:`Series.cat <categorical.cat>` not preserving the original Series' name (:issue:`17509`) PyPy ^^^^ diff --git a/pandas/core/api.py b/pandas/core/api.py index a012ccce83965..2f818a400162b 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -16,8 +16,7 @@ PeriodIndex, NaT) from pandas.core.indexes.period import Period, period_range, pnow from pandas.core.indexes.timedeltas import Timedelta, timedelta_range -from pandas.core.indexes.datetimes import (Timestamp, date_range, bdate_range, - cdate_range) +from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range from pandas.core.indexes.interval import Interval, interval_range from pandas.core.series import Series diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9127864eab8a1..1419da3fa8861 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -17,6 +17,7 @@ is_period_dtype, is_bool_dtype, is_string_dtype, + is_string_like, is_list_like, is_scalar, pandas_dtype, @@ -37,7 +38,8 @@ Resolution) from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) -from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay +from pandas.tseries.offsets import ( + DateOffset, generate_range, Tick, CDay, prefix_mapping) from pandas.core.tools.datetimes import ( parse_time_string, normalize_date, to_time) from pandas.core.tools.timedeltas import to_timedelta @@ -2049,7 +2051,8 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, - normalize=True, name=None, closed=None, **kwargs): + normalize=True, name=None, weekmask=None, holidays=None, + closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with business day as the default frequency @@ -2071,6 +2074,20 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex + weekmask : string or None, default None + Weekmask of valid business days, passed to ``numpy.busdaycalendar``, + only used when custom frequency strings are passed. The default + value None is equivalent to 'Mon Tue Wed Thu Fri' + + .. versionadded:: 0.21.0 + + holidays : list-like or None, default None + Dates to exclude from the set of valid business days, passed to + ``numpy.busdaycalendar``, only used when custom frequency strings + are passed + + .. versionadded:: 0.21.0 + closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) @@ -2088,6 +2105,18 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, rng : DatetimeIndex """ + if is_string_like(freq) and freq.startswith('C'): + try: + weekmask = weekmask or 'Mon Tue Wed Thu Fri' + freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) + except (KeyError, TypeError): + msg = 'invalid custom frequency string: {freq}'.format(freq=freq) + raise ValueError(msg) + elif holidays or weekmask: + msg = ('a custom frequency string is required when holidays or ' + 'weekmask are passed, got frequency {freq}').format(freq=freq) + raise ValueError(msg) + return DatetimeIndex(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs) @@ -2099,6 +2128,8 @@ def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency + .. deprecated:: 0.21.0 + Parameters ---------- start : string or datetime-like, default None @@ -2137,6 +2168,9 @@ def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, ------- rng : DatetimeIndex """ + warnings.warn("cdate_range is deprecated and will be removed in a future " + "version, instead use pd.bdate_range(..., freq='{freq}')" + .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index c593290410b96..fad455d6391c3 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -63,7 +63,7 @@ class TestPDApi(Base): # top-level functions funcs = ['bdate_range', 'concat', 'crosstab', 'cut', 'date_range', 'interval_range', 'eval', - 'factorize', 'get_dummies', 'cdate_range', + 'factorize', 'get_dummies', 'infer_freq', 'isna', 'isnull', 'lreshape', 'melt', 'notna', 'notnull', 'offsets', 'merge', 'merge_ordered', 'merge_asof', @@ -240,3 +240,13 @@ def test_deprecation_access_func(self): [c1, c2], sort_categories=True, ignore_order=True) + + +class TestCDateRange(object): + + def test_deprecation_cdaterange(self): + # GH17596 + from pandas.core.indexes.datetimes import cdate_range + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + cdate_range('2017-01-01', '2017-12-31') diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index c373942cb4c63..3b40ef092f364 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -1,6 +1,5 @@ """ -test date_range, bdate_range, cdate_range -construction from the convenience range functions +test date_range, bdate_range construction from the convenience range functions """ import pytest @@ -12,10 +11,9 @@ import pandas as pd import pandas.util.testing as tm from pandas import compat -from pandas.core.indexes.datetimes import bdate_range, cdate_range -from pandas import date_range, offsets, DatetimeIndex, Timestamp -from pandas.tseries.offsets import (generate_range, CDay, BDay, - DateOffset, MonthEnd) +from pandas import date_range, bdate_range, offsets, DatetimeIndex, Timestamp +from pandas.tseries.offsets import (generate_range, CDay, BDay, DateOffset, + MonthEnd, prefix_mapping) from pandas.tests.series.common import TestData @@ -241,9 +239,6 @@ def test_precision_finer_than_offset(self): class TestBusinessDateRange(object): - def setup_method(self, method): - self.rng = bdate_range(START, END) - def test_constructor(self): bdate_range(START, END, freq=BDay()) bdate_range(START, periods=20, freq=BDay()) @@ -258,33 +253,31 @@ def test_constructor(self): def test_naive_aware_conflicts(self): naive = bdate_range(START, END, freq=BDay(), tz=None) - aware = bdate_range(START, END, freq=BDay(), - tz="Asia/Hong_Kong") - tm.assert_raises_regex(TypeError, "tz-naive.*tz-aware", - naive.join, aware) - tm.assert_raises_regex(TypeError, "tz-naive.*tz-aware", - aware.join, naive) + aware = bdate_range(START, END, freq=BDay(), tz="Asia/Hong_Kong") + + msg = 'tz-naive.*tz-aware' + with tm.assert_raises_regex(TypeError, msg): + naive.join(aware) + + with tm.assert_raises_regex(TypeError, msg): + aware.join(naive) def test_cached_range(self): DatetimeIndex._cached_range(START, END, offset=BDay()) DatetimeIndex._cached_range(START, periods=20, offset=BDay()) DatetimeIndex._cached_range(end=START, periods=20, offset=BDay()) - tm.assert_raises_regex(TypeError, "offset", - DatetimeIndex._cached_range, - START, END) + with tm.assert_raises_regex(TypeError, "offset"): + DatetimeIndex._cached_range(START, END) - tm.assert_raises_regex(TypeError, "specify period", - DatetimeIndex._cached_range, START, - offset=BDay()) + with tm.assert_raises_regex(TypeError, "specify period"): + DatetimeIndex._cached_range(START, offset=BDay()) - tm.assert_raises_regex(TypeError, "specify period", - DatetimeIndex._cached_range, end=END, - offset=BDay()) + with tm.assert_raises_regex(TypeError, "specify period"): + DatetimeIndex._cached_range(end=END, offset=BDay()) - tm.assert_raises_regex(TypeError, "start or end", - DatetimeIndex._cached_range, periods=20, - offset=BDay()) + with tm.assert_raises_regex(TypeError, "start or end"): + DatetimeIndex._cached_range(periods=20, offset=BDay()) def test_cached_range_bug(self): rng = date_range('2010-09-01 05:00:00', periods=50, @@ -300,8 +293,9 @@ def test_timezone_comparaison_bug(self): def test_timezone_comparaison_assert(self): start = Timestamp('20130220 10:00', tz='US/Eastern') - pytest.raises(AssertionError, date_range, start, periods=2, - tz='Europe/Berlin') + msg = 'Inferred time zone not equal to passed time zone' + with tm.assert_raises_regex(AssertionError, msg): + date_range(start, periods=2, tz='Europe/Berlin') def test_misc(self): end = datetime(2009, 5, 13) @@ -315,14 +309,17 @@ def test_misc(self): def test_date_parse_failure(self): badly_formed_date = '2007/100/1' - pytest.raises(ValueError, Timestamp, badly_formed_date) + with pytest.raises(ValueError): + Timestamp(badly_formed_date) + + with pytest.raises(ValueError): + bdate_range(start=badly_formed_date, periods=10) - pytest.raises(ValueError, bdate_range, start=badly_formed_date, - periods=10) - pytest.raises(ValueError, bdate_range, end=badly_formed_date, - periods=10) - pytest.raises(ValueError, bdate_range, badly_formed_date, - badly_formed_date) + with pytest.raises(ValueError): + bdate_range(end=badly_formed_date, periods=10) + + with pytest.raises(ValueError): + bdate_range(badly_formed_date, badly_formed_date) def test_daterange_bug_456(self): # GH #456 @@ -334,8 +331,9 @@ def test_daterange_bug_456(self): assert isinstance(result, DatetimeIndex) def test_error_with_zero_monthends(self): - pytest.raises(ValueError, date_range, '1/1/2000', '1/1/2001', - freq=MonthEnd(0)) + msg = 'Offset <0 \* MonthEnds> did not increment date' + with tm.assert_raises_regex(ValueError, msg): + date_range('1/1/2000', '1/1/2001', freq=MonthEnd(0)) def test_range_bug(self): # GH #770 @@ -343,8 +341,8 @@ def test_range_bug(self): result = date_range("2011-1-1", "2012-1-31", freq=offset) start = datetime(2011, 1, 1) - exp_values = [start + i * offset for i in range(5)] - tm.assert_index_equal(result, DatetimeIndex(exp_values)) + expected = DatetimeIndex([start + i * offset for i in range(5)]) + tm.assert_index_equal(result, expected) def test_range_tz_pytz(self): # see gh-2906 @@ -525,20 +523,18 @@ def test_freq_divides_end_in_nanos(self): class TestCustomDateRange(object): - def setup_method(self, method): - self.rng = cdate_range(START, END) def test_constructor(self): - cdate_range(START, END, freq=CDay()) - cdate_range(START, periods=20, freq=CDay()) - cdate_range(end=START, periods=20, freq=CDay()) + bdate_range(START, END, freq=CDay()) + bdate_range(START, periods=20, freq=CDay()) + bdate_range(end=START, periods=20, freq=CDay()) msg = 'periods must be a number, got C' with tm.assert_raises_regex(TypeError, msg): date_range('2011-1-1', '2012-1-1', 'C') with tm.assert_raises_regex(TypeError, msg): - cdate_range('2011-1-1', '2012-1-1', 'C') + bdate_range('2011-1-1', '2012-1-1', 'C') def test_cached_range(self): DatetimeIndex._cached_range(START, END, offset=CDay()) @@ -547,66 +543,93 @@ def test_cached_range(self): DatetimeIndex._cached_range(end=START, periods=20, offset=CDay()) - pytest.raises(Exception, DatetimeIndex._cached_range, START, END) + # with pytest.raises(TypeError): + with tm.assert_raises_regex(TypeError, "offset"): + DatetimeIndex._cached_range(START, END) - pytest.raises(Exception, DatetimeIndex._cached_range, START, - freq=CDay()) + # with pytest.raises(TypeError): + with tm.assert_raises_regex(TypeError, "specify period"): + DatetimeIndex._cached_range(START, offset=CDay()) - pytest.raises(Exception, DatetimeIndex._cached_range, end=END, - freq=CDay()) + # with pytest.raises(TypeError): + with tm.assert_raises_regex(TypeError, "specify period"): + DatetimeIndex._cached_range(end=END, offset=CDay()) - pytest.raises(Exception, DatetimeIndex._cached_range, periods=20, - freq=CDay()) + # with pytest.raises(TypeError): + with tm.assert_raises_regex(TypeError, "start or end"): + DatetimeIndex._cached_range(periods=20, offset=CDay()) def test_misc(self): end = datetime(2009, 5, 13) - dr = cdate_range(end=end, periods=20) + dr = bdate_range(end=end, periods=20, freq='C') firstDate = end - 19 * CDay() assert len(dr) == 20 assert dr[0] == firstDate assert dr[-1] == end - def test_date_parse_failure(self): - badly_formed_date = '2007/100/1' - - pytest.raises(ValueError, Timestamp, badly_formed_date) - - pytest.raises(ValueError, cdate_range, start=badly_formed_date, - periods=10) - pytest.raises(ValueError, cdate_range, end=badly_formed_date, - periods=10) - pytest.raises(ValueError, cdate_range, badly_formed_date, - badly_formed_date) - def test_daterange_bug_456(self): # GH #456 - rng1 = cdate_range('12/5/2011', '12/5/2011') - rng2 = cdate_range('12/2/2011', '12/5/2011') + rng1 = bdate_range('12/5/2011', '12/5/2011', freq='C') + rng2 = bdate_range('12/2/2011', '12/5/2011', freq='C') rng2.offset = CDay() result = rng1.union(rng2) assert isinstance(result, DatetimeIndex) def test_cdaterange(self): - rng = cdate_range('2013-05-01', periods=3) - xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-03']) - tm.assert_index_equal(xp, rng) + result = bdate_range('2013-05-01', periods=3, freq='C') + expected = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-03']) + tm.assert_index_equal(result, expected) def test_cdaterange_weekmask(self): - rng = cdate_range('2013-05-01', periods=3, - weekmask='Sun Mon Tue Wed Thu') - xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-05']) - tm.assert_index_equal(xp, rng) + result = bdate_range('2013-05-01', periods=3, freq='C', + weekmask='Sun Mon Tue Wed Thu') + expected = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-05']) + tm.assert_index_equal(result, expected) + + # raise with non-custom freq + msg = ('a custom frequency string is required when holidays or ' + 'weekmask are passed, got frequency B') + with tm.assert_raises_regex(ValueError, msg): + bdate_range('2013-05-01', periods=3, + weekmask='Sun Mon Tue Wed Thu') def test_cdaterange_holidays(self): - rng = cdate_range('2013-05-01', periods=3, holidays=['2013-05-01']) - xp = DatetimeIndex(['2013-05-02', '2013-05-03', '2013-05-06']) - tm.assert_index_equal(xp, rng) + result = bdate_range('2013-05-01', periods=3, freq='C', + holidays=['2013-05-01']) + expected = DatetimeIndex(['2013-05-02', '2013-05-03', '2013-05-06']) + tm.assert_index_equal(result, expected) + + # raise with non-custom freq + msg = ('a custom frequency string is required when holidays or ' + 'weekmask are passed, got frequency B') + with tm.assert_raises_regex(ValueError, msg): + bdate_range('2013-05-01', periods=3, holidays=['2013-05-01']) def test_cdaterange_weekmask_and_holidays(self): - rng = cdate_range('2013-05-01', periods=3, - weekmask='Sun Mon Tue Wed Thu', - holidays=['2013-05-01']) - xp = DatetimeIndex(['2013-05-02', '2013-05-05', '2013-05-06']) - tm.assert_index_equal(xp, rng) + result = bdate_range('2013-05-01', periods=3, freq='C', + weekmask='Sun Mon Tue Wed Thu', + holidays=['2013-05-01']) + expected = DatetimeIndex(['2013-05-02', '2013-05-05', '2013-05-06']) + tm.assert_index_equal(result, expected) + + # raise with non-custom freq + msg = ('a custom frequency string is required when holidays or ' + 'weekmask are passed, got frequency B') + with tm.assert_raises_regex(ValueError, msg): + bdate_range('2013-05-01', periods=3, + weekmask='Sun Mon Tue Wed Thu', + holidays=['2013-05-01']) + + @pytest.mark.parametrize('freq', [freq for freq in prefix_mapping + if freq.startswith('C')]) + def test_all_custom_freq(self, freq): + # should not raise + bdate_range(START, END, freq=freq, weekmask='Mon Wed Fri', + holidays=['2009-03-14']) + + bad_freq = freq + 'FOO' + msg = 'invalid custom frequency string: {freq}' + with tm.assert_raises_regex(ValueError, msg.format(freq=bad_freq)): + bdate_range(START, END, freq=bad_freq) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 86e65feec04f3..7cb051d351444 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -10,7 +10,6 @@ import pandas._libs.tslib as tslib import pandas.util.testing as tm from pandas.errors import PerformanceWarning -from pandas.core.indexes.datetimes import cdate_range from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta, date_range, TimedeltaIndex, _np_version_under1p10, Index, datetime, Float64Index, offsets, bdate_range) @@ -1208,7 +1207,7 @@ def test_identical(self): class TestCustomDatetimeIndex(object): def setup_method(self, method): - self.rng = cdate_range(START, END) + self.rng = bdate_range(START, END, freq='C') def test_comparison(self): d = self.rng[10] @@ -1277,10 +1276,11 @@ def test_summary(self): self.rng[2:2].summary() def test_summary_pytz(self): - cdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() + bdate_range('1/1/2005', '1/1/2009', freq='C', tz=pytz.utc).summary() def test_summary_dateutil(self): - cdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() + bdate_range('1/1/2005', '1/1/2009', freq='C', + tz=dateutil.tz.tzutc()).summary() def test_equals(self): assert not self.rng.equals(list(self.rng)) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 4ffd2e1cd1e61..ff436e0501849 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -4,7 +4,6 @@ import pandas as pd import pandas.util.testing as tm -from pandas.core.indexes.datetimes import cdate_range from pandas import (DatetimeIndex, date_range, Series, bdate_range, DataFrame, Int64Index, Index, to_datetime) from pandas.tseries.offsets import Minute, BMonthEnd, MonthEnd @@ -345,7 +344,7 @@ def test_month_range_union_tz_dateutil(self): class TestCustomDatetimeIndex(object): def setup_method(self, method): - self.rng = cdate_range(START, END) + self.rng = bdate_range(START, END, freq='C') def test_union(self): # overlapping @@ -412,7 +411,7 @@ def test_outer_join(self): def test_intersection_bug(self): # GH #771 - a = cdate_range('11/30/2011', '12/31/2011') - b = cdate_range('12/10/2011', '12/20/2011') + a = bdate_range('11/30/2011', '12/31/2011', freq='C') + b = bdate_range('12/10/2011', '12/20/2011', freq='C') result = a.intersection(b) tm.assert_index_equal(result, b) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index ea37434e3a8d9..3a2a613986dca 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -2987,6 +2987,7 @@ def generate_range(start=None, end=None, periods=None, CustomBusinessHour, # 'CBH' MonthEnd, # 'M' MonthBegin, # 'MS' + Nano, # 'N' SemiMonthEnd, # 'SM' SemiMonthBegin, # 'SMS' Week, # 'W' @@ -3002,5 +3003,3 @@ def generate_range(start=None, end=None, periods=None, FY5253, FY5253Quarter, ]) - -prefix_mapping['N'] = Nano
- [X] closes #17596 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry A bit going on here, so see summary below. Deprecated `cdate_range`: - Removed from api (was added to api after 0.20.3 release) - Added a `FutureWarning` to `cdate_range` indicating deprecation - Added a test for this too - Converted other tests using `cdate_range` to use `bdate_range` instead - Cleaned up the entire test script to make it more consistent with others (i.e. `with` context) - Removed a reference to `cdate_range` from the current whatsnew (previous commit I wrote) Expanded functionality of `bdate_range`: - Added the additional `cdate_range` params (`weekmask`, `holidays`) for custom frequency ranges - Expanded that functionality beyond what was previously present in `cdate_range` - `cdate_range` only supported cday with the `weekmask` and `holidays` params, now all custom freqs are supported. - Did this by checking if the passed freq starts with 'C'. - Added a test that should catch if a new non-custom freq is added that starts with 'C'. - Didn't add this to `cdate_range`; left that as-is. - Added a `UserWarning` if non-default `weekmask` or `holidays` are passed without a custom freq. - Added a test for this too. Edits to `timeseries.rst`: - Added a custom freq `bdate_range` example to the "Generating Ranges of Timestamps" section - Reordered some of the examples in the "Generating Ranges of Timestamps" section - Tried to order examples by simplicity - Otherwise, mostly small rewording changes and other such small fixes (ticks, capitalization, etc.).
https://api.github.com/repos/pandas-dev/pandas/pulls/17691
2017-09-27T05:48:25Z
2017-10-02T11:28:31Z
2017-10-02T11:28:30Z
2017-10-02T13:39:09Z
typo fix evalute_compare-->evaluate_compare
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index dba616c2d15e6..c4e1398d0178f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3742,7 +3742,7 @@ def _evaluate_with_timedelta_like(self, other, op, opstr): def _evaluate_with_datetime_like(self, other, op, opstr): raise TypeError("can only perform ops with datetime like values") - def _evalute_compare(self, op): + def _evaluate_compare(self, op): raise base.AbstractMethodError(self) @classmethod
It's never actually called and if it did would just raise AbstractMethodError. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17688
2017-09-27T01:56:31Z
2017-09-27T10:20:44Z
2017-09-27T10:20:44Z
2017-10-30T16:23:19Z
BUG: remove tab completion for deprecated functions
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c8476841bfce4..7a2da9655cc4a 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -10,10 +10,11 @@ class DirNamesMixin(object): _accessors = frozenset([]) + _deprecations = frozenset([]) def _dir_deletions(self): """ delete unwanted __dir__ for this object """ - return self._accessors + return self._accessors | self._deprecations def _dir_additions(self): """ add addtional __dir__ for this object """ diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 8b055e9ae59c3..011aa74632296 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -231,6 +231,7 @@ class Categorical(PandasObject): # ops, which raise __array_priority__ = 1000 _dtype = CategoricalDtype() + _deprecations = frozenset(['labels']) _typ = 'categorical' def __init__(self, values, categories=None, ordered=None, dtype=None, @@ -412,13 +413,6 @@ def dtype(self): """The :ref:`~pandas.api.types.CategoricalDtype` for this instance""" return self._dtype - def __dir__(self): - # Avoid IPython warnings for deprecated properties - # https://github.com/pandas-dev/pandas/issues/16409 - rv = set(dir(type(self))) - rv.discard("labels") - return sorted(rv) - @property def _constructor(self): return Categorical diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 912dbdb9de705..579d9f10d5875 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -299,6 +299,7 @@ def _constructor(self): return DataFrame _constructor_sliced = Series + _deprecations = NDFrame._deprecations | frozenset(['sortlevel']) @property def _constructor_expanddim(self): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a7be145f21083..2fb0e348c01c0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -112,6 +112,8 @@ class NDFrame(PandasObject, SelectionMixin): '__array_interface__'] _internal_names_set = set(_internal_names) _accessors = frozenset([]) + _deprecations = frozenset(['as_blocks', 'blocks', + 'consolidate', 'convert_objects']) _metadata = [] is_copy = None diff --git a/pandas/core/series.py b/pandas/core/series.py index db8ee2529ef57..89add1ef4c590 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -145,6 +145,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): """ _metadata = ['name'] _accessors = frozenset(['dt', 'cat', 'str']) + _deprecations = generic.NDFrame._deprecations | frozenset( + ['sortlevel', 'reshape']) _allow_index_ops = True def __init__(self, data=None, index=None, dtype=None, name=None, diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 230a5806ccb2e..5ea8230ced41b 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -438,3 +438,14 @@ def _check_f(base, f): # rename f = lambda x: x.rename({1: 'foo'}, inplace=True) _check_f(d.copy(), f) + + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip('IPython', minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; df = pd.DataFrame()" + ip.run_code(code) + with tm.assert_produces_warning(None): + with provisionalcompleter('ignore'): + list(ip.Completer.completions('df.', 1)) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index d0805e2bb54d2..56b8a90ec0c9f 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -407,3 +407,14 @@ def test_empty_method(self): for full_series in [pd.Series([1]), pd.Series(index=[1])]: assert not full_series.empty + + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip('IPython', minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; s = pd.Series()" + ip.run_code(code) + with tm.assert_produces_warning(None): + with provisionalcompleter('ignore'): + list(ip.Completer.completions('s.', 1))
closes #17674
https://api.github.com/repos/pandas-dev/pandas/pulls/17683
2017-09-26T13:51:02Z
2017-09-26T16:25:08Z
2017-09-26T16:25:08Z
2017-09-26T16:27:47Z
DOC: Add references for different index types + examples for pd.Index
diff --git a/doc/source/api.rst b/doc/source/api.rst index 28d4567027572..d98a18e6f7e36 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1626,6 +1626,52 @@ Conversion .. currentmodule:: pandas +PeriodIndex +-------------- + +.. autosummary:: + :toctree: generated/ + :template: autosummary/class_without_autosummary.rst + + PeriodIndex + +Attributes +~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + PeriodIndex.day + PeriodIndex.dayofweek + PeriodIndex.dayofyear + PeriodIndex.days_in_month + PeriodIndex.daysinmonth + PeriodIndex.end_time + PeriodIndex.freq + PeriodIndex.freqstr + PeriodIndex.hour + PeriodIndex.is_leap_year + PeriodIndex.minute + PeriodIndex.month + PeriodIndex.quarter + PeriodIndex.qyear + PeriodIndex.second + PeriodIndex.start_time + PeriodIndex.week + PeriodIndex.weekday + PeriodIndex.weekofyear + PeriodIndex.year + +Methods +~~~~~~~ +.. autosummary:: + :toctree: generated/ + + PeriodIndex.asfreq + PeriodIndex.strftime + PeriodIndex.to_timestamp + PeriodIndex.tz_convert + PeriodIndex.tz_localize + Scalars ------- @@ -1653,13 +1699,11 @@ Attributes Period.is_leap_year Period.minute Period.month - Period.now Period.ordinal Period.quarter Period.qyear Period.second Period.start_time - Period.strftime Period.week Period.weekday Period.weekofyear @@ -1671,6 +1715,7 @@ Methods :toctree: generated/ Period.asfreq + Period.now Period.strftime Period.to_timestamp diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py index f06915997c616..680983cdf6443 100755 --- a/doc/sphinxext/numpydoc/numpydoc.py +++ b/doc/sphinxext/numpydoc/numpydoc.py @@ -45,7 +45,7 @@ def mangle_docstrings(app, what, name, obj, options, lines, # PANDAS HACK (to remove the list of methods/attributes for Categorical) no_autosummary = [".Categorical", "CategoricalIndex", "IntervalIndex", "RangeIndex", "Int64Index", "UInt64Index", - "Float64Index"] + "Float64Index", "PeriodIndex"] if what == "class" and any(name.endswith(n) for n in no_autosummary): cfg['class_members_list'] = False diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c4e1398d0178f..cc917ea3503bc 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -123,6 +123,23 @@ class Index(IndexOpsMixin, PandasObject): Notes ----- An Index instance can **only** contain hashable objects + + Examples + -------- + >>> pd.Index([1, 2, 3]) + Int64Index([1, 2, 3], dtype='int64') + + >>> pd.Index(list('abc')) + Index(['a', 'b', 'c'], dtype='object') + + See Also + --------- + RangeIndex : Index implementing a monotonic integer range + CategoricalIndex : Index of :class:`Categorical` s. + MultiIndex : A multi-level, or hierarchical, Index + IntervalIndex : an Index of :class:`Interval` s. + DatetimeIndex, TimedeltaIndex, PeriodIndex + Int64Index, UInt64Index, Float64Index """ # To hand over control to subclasses _join_precedence = 1 diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1419da3fa8861..862bc51ada9d2 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -208,9 +208,14 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, Notes ----- - To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. + + See Also + --------- + Index : The base pandas Index type + TimedeltaIndex : Index of timedelta64 data + PeriodIndex : Index of Period data """ _typ = 'datetimeindex' diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 29699f664bbf3..7bf7cfce515a1 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -147,7 +147,7 @@ class IntervalIndex(IntervalMixin, Index): See Also -------- - Index + Index : The base pandas Index type Interval : A bounded slice-like interval interval_range : Function to create a fixed frequency IntervalIndex, IntervalIndex.from_arrays, IntervalIndex.from_breaks, diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 9ffac0832062d..d200642a9f28f 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -95,6 +95,7 @@ class MultiIndex(Index): MultiIndex.from_product : Create a MultiIndex from the cartesian product of iterables MultiIndex.from_tuples : Convert list of tuples to a MultiIndex + Index : The base pandas Index type """ # initialize to zero-length tuples to make everything work diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 142e0f36c66ec..9fc47ad7b773c 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -108,19 +108,21 @@ def is_all_dates(self): Make a copy of input ndarray name : object Name to be stored in the index + Notes ----- An Index instance can **only** contain hashable objects. + + See also + -------- + Index : The base pandas Index type """ _int64_descr_args = dict( klass='Int64Index', ltype='integer', dtype='int64', - extra="""This is the default index type used - by the DataFrame and Series ctors when no explicit - index is provided by the user. -""" + extra='' ) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index e6fc47845012a..b70b4c4e4067c 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -164,6 +164,13 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): >>> idx = PeriodIndex(year=year_arr, quarter=q_arr) >>> idx2 = PeriodIndex(start='2000', end='2010', freq='A') + + See Also + --------- + Index : The base pandas Index type + Period : Represents a period of time + DatetimeIndex : Index with datetime64 data + TimedeltaIndex : Index of timedelta64 data """ _box_scalars = True _typ = 'periodindex' diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index a3b899d58255b..9f7bac641ae08 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -23,9 +23,14 @@ class RangeIndex(Int64Index): """ - Immutable Index implementing a monotonic range. RangeIndex is a - memory-saving special case of Int64Index limited to representing - monotonic ranges. + Immutable Index implementing a monotonic integer range. + + RangeIndex is a memory-saving special case of Int64Index limited to + representing monotonic ranges. Using RangeIndex may in some instances + improve computing speed. + + This is the default index type used + by DataFrame and Series when no explicit index is provided by the user. Parameters ---------- @@ -38,6 +43,10 @@ class RangeIndex(Int64Index): copy : bool, default False Unused, accepted for homogeneity with other index types. + See Also + -------- + Index : The base pandas Index type + Int64Index : Index of int64 data """ _typ = 'rangeindex' diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 12b7936503ad7..89757c2bf40da 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -114,6 +114,13 @@ class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index): To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. + + See Also + --------- + Index : The base pandas Index type + Timedelta : Represents a duration between two dates or times. + DatetimeIndex : Index of datetime64 data + PeriodIndex : Index of Period data """ _typ = 'timedeltaindex'
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Added 'See also'-references for different index types to make it easier to find the correct index type when in doubt + added simple examples for pd.Index
https://api.github.com/repos/pandas-dev/pandas/pulls/17680
2017-09-26T11:08:16Z
2017-10-06T07:15:33Z
2017-10-06T07:15:33Z
2017-10-09T21:00:05Z
DOC: correct grammar in unicode section
diff --git a/doc/source/options.rst b/doc/source/options.rst index f042e4d3f5120..2da55a5a658a4 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -474,10 +474,10 @@ Unicode Formatting Enabling this option will affect the performance for printing of DataFrame and Series (about 2 times slower). Use only when it is actually required. -Some East Asian countries use Unicode characters its width is corresponding to 2 alphabets. -If DataFrame or Series contains these characters, default output cannot be aligned properly. +Some East Asian countries use Unicode characters whose width corresponds to two Latin characters. +If a DataFrame or Series contains these characters, the default output mode may not align them properly. -.. note:: Screen captures are attached for each outputs to show the actual results. +.. note:: Screen captures are attached for each output to show the actual results. .. ipython:: python @@ -486,8 +486,9 @@ If DataFrame or Series contains these characters, default output cannot be align .. image:: _static/option_unicode01.png -Enable ``display.unicode.east_asian_width`` allows pandas to check each character's "East Asian Width" property. -These characters can be aligned properly by checking this property, but it takes longer time than standard ``len`` function. +Enabling ``display.unicode.east_asian_width`` allows pandas to check each character's "East Asian Width" property. +These characters can be aligned properly by setting this option to ``True``. However, this will result in longer render +times than the standard ``len`` function. .. ipython:: python @@ -496,9 +497,10 @@ These characters can be aligned properly by checking this property, but it takes .. image:: _static/option_unicode02.png -In addition, Unicode contains characters which width is "Ambiguous". These character's width should be either 1 or 2 depending on terminal setting or encoding. Because this cannot be distinguished from Python, ``display.unicode.ambiguous_as_wide`` option is added to handle this. +In addition, Unicode characters whose width is "Ambiguous" can either be 1 or 2 characters wide depending on the +terminal setting or encoding. The option ``display.unicode.ambiguous_as_wide`` can be used to handle the ambiguity. -By default, "Ambiguous" character's width, "¡" (inverted exclamation) in below example, is regarded as 1. +By default, an "Ambiguous" character's width, such as "¡" (inverted exclamation) in the example below, is taken to be 1. .. ipython:: python @@ -507,7 +509,10 @@ By default, "Ambiguous" character's width, "¡" (inverted exclamation) in below .. image:: _static/option_unicode03.png -Enabling ``display.unicode.ambiguous_as_wide`` lets pandas to figure these character's width as 2. Note that this option will be effective only when ``display.unicode.east_asian_width`` is enabled. Confirm starting position has been changed, but is not aligned properly because the setting is mismatched with this environment. +Enabling ``display.unicode.ambiguous_as_wide`` makes pandas interpret these characters' widths to be 2. +(Note that this option will only be effective when ``display.unicode.east_asian_width`` is enabled.) + +However, setting this option incorrectly for your terminal will cause these characters to be aligned incorrectly: .. ipython:: python
Rewrite some sentences in the Unicode section for grammatical correctness.
https://api.github.com/repos/pandas-dev/pandas/pulls/17678
2017-09-26T04:50:37Z
2017-09-26T10:34:34Z
2017-09-26T10:34:34Z
2017-09-26T10:34:39Z
Melt enhance
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 1b81d83bb76c7..2a5fc732ccbb4 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -265,24 +265,59 @@ the right thing: Reshaping by Melt ----------------- -The top-level :func:`melt` and :func:`~DataFrame.melt` functions are useful to +The top-level :func:`melt` function and the equivalent :func:`DataFrame.melt` method are useful to massage a DataFrame into a format where one or more columns are identifier variables, while all other columns, considered measured variables, are "unpivoted" to the -row axis, leaving just two non-identifier columns, "variable" and "value". The -names of those columns can be customized by supplying the ``var_name`` and +row axis, leaving just two non-identifier columns, "variable" and "value". + +For instance, it is possible to unpivot the fruit columns (``Mango``, ``Orange``, and ``Watermelon``) into a single column +with their corresponding values in another. + +.. ipython:: python + + df = pd.DataFrame({'State': ['Texas', 'Florida', 'Alabama'], + 'Mango':[4, 10, 90], + 'Orange': [10, 8, 14], + 'Watermelon':[40, 99, 43]}, + columns=['State', 'Mango', 'Orange', 'Watermelon']) + + df + + df.melt(id_vars='State', value_vars=['Mango', 'Orange', 'Watermelon']) + +The resulting names of the unpivoted columns can be customized by supplying strings to the ``var_name`` and ``value_name`` parameters. -For instance, +.. ipython:: python + + df.melt(id_vars='State', value_vars=['Mango', 'Orange', 'Watermelon'], + var_name='Fruit', value_name='Pounds') + +.. versionadded:: 0.22.0 + +Passing a list of lists to `value_vars` allows you to simultaneously melt +independent column groups. The following DataFrame contains an addtional column grouping of drinks (``Gin`` and ``Vokda``) +that may be unpivoted along with the fruit columns. The groups need not be the same size. Additionally, +the ``var_name`` and ``value_name`` parameters may be passed a list of strings to name each of the returned +variable and value columns. .. ipython:: python - cheese = pd.DataFrame({'first' : ['John', 'Mary'], - 'last' : ['Doe', 'Bo'], - 'height' : [5.5, 6.0], - 'weight' : [130, 150]}) - cheese - cheese.melt(id_vars=['first', 'last']) - cheese.melt(id_vars=['first', 'last'], var_name='quantity') + df = pd.DataFrame({'State': ['Texas', 'Florida', 'Alabama'], + 'Mango':[4, 10, 90], + 'Orange': [10, 8, 14], + 'Watermelon':[40, 99, 43], + 'Gin':[16, 200, 34], + 'Vodka':[20, 33, 18]}, + columns=['State', 'Mango', 'Orange', 'Watermelon', + 'Gin', 'Vodka']) + + df + + df.melt(id_vars='State', + value_vars=[['Mango', 'Orange', 'Watermelon'], ['Gin', 'Vodka']], + var_name=['Fruit', 'Drink'], + value_name=['Pounds', 'Ounces']) Another way to transform is to use the ``wide_to_long`` panel data convenience function. diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 93fd218bd7743..1a540167ed28d 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -8,12 +8,50 @@ deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. +Highlights include: + +- The :meth:`DataFrame.melt` method and top-level :func:`melt` function can now simultaneously unpivot independent groups of columns, see :ref:`here <whatsnew_0220.enhancements.melt>`. + +.. contents:: What's new in v0.22.0 + :local: + :backlinks: none + :depth: 2 + .. _whatsnew_0220.enhancements: New features ~~~~~~~~~~~~ -- +.. _whatsnew_0220.enhancements.melt: + +Simultaneous unpivoting of independent groups of columns with ``melt`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Previously, ``melt`` was only able to unpivot a single group of columns. This was done by passing all the column names in the group as a list to the ``value_vars`` parameter. + +In the following DataFrame, there are two groups, fruits (``Mango``, ``Orange``, ``Watermelon``) and drinks (``Gin``, ``Vodka``) that can each be unpivoted into their own column. Previously, ``melt`` could only unpivot a single column grouping: + +.. ipython:: python + + df = pd.DataFrame({'State': ['Texas', 'Florida', 'Alabama'], + 'Mango':[4, 10, 90], + 'Orange': [10, 8, 14], + 'Watermelon':[40, 99, 43], + 'Gin':[16, 200, 34], + 'Vodka':[20, 33, 18]}, + columns=['State', 'Mango', 'Orange', + 'Watermelon', 'Gin', 'Vodka']) + + df.melt(id_vars='State', value_vars=['Mango', 'Orange', 'Watermelon'], + var_name='Fruit', value_name='Pounds') + +Now, ``melt`` can unpivot any number of column groups by passing a list of lists to the ``value_vars`` parameter. The resulting unpivoted columns can be named by passing a list to ``var_name``. The corresponding values of each group may also be named by passing a list to ``value_name``. Notice that the column groups need not be equal in length: + +.. ipython:: python + + df.melt(id_vars='State', + value_vars=[['Mango', 'Orange', 'Watermelon'], ['Gin', 'Vodka']], + var_name=['Fruit', 'Drink'], + value_name=['Pounds', 'Ounces']) - - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9ce6b6148be56..3085327f63bb9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4587,10 +4587,10 @@ def unstack(self, level=-1, fill_value=None): leaving identifier variables set. This function is useful to massage a DataFrame into a format where one - or more columns are identifier variables (`id_vars`), while all other - columns, considered measured variables (`value_vars`), are "unpivoted" to - the row axis, leaving just two non-identifier columns, 'variable' and - 'value'. + or more columns are identifier variables (`id_vars`), while other groups of + columns, considered measured variables (`value_vars`), are "unpivoted" so + that each group consists of two new columns, a 'variable', labeled by + `var_name`, and its corresponding 'value', labeled by `value_name`. %(versionadded)s Parameters @@ -4599,13 +4599,14 @@ def unstack(self, level=-1, fill_value=None): id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional - Column(s) to unpivot. If not specified, uses all columns that - are not set as `id_vars`. - var_name : scalar - Name to use for the 'variable' column. If None it uses + Column(s) to unpivot. If list of lists, simultaneously unpivot + each sublist into its own variable column. If not specified, uses all + columns that are not set as `id_vars`. + var_name : scalar or list + Name(s) to use for the 'variable' column(s). If None it uses ``frame.columns.name`` or 'variable'. - value_name : scalar, default 'value' - Name to use for the 'value' column. + value_name : scalar or list, default 'value' + Name(s) to use for the 'value' column(s). col_level : int or string, optional If columns are a MultiIndex then use this level to melt. @@ -4673,6 +4674,31 @@ def unstack(self, level=-1, fill_value=None): 1 b B E 3 2 c B E 5 + .. versionadded:: 0.22.0 + + Simultaneously melt multiple groups of columns: + + >>> df2 = pd.DataFrame({'City': ['Houston', 'Miami'], + 'Mango':[4, 10], + 'Orange': [10, 8], + 'Gin':[16, 200], + 'Vodka':[20, 33]}, + columns=['City','Mango', 'Orange', 'Gin', 'Vodka']) + >>> df2 + City Mango Orange Gin Vodka + 0 Houston 4 10 16 20 + 1 Miami 10 8 200 33 + + >>> %(caller)sid_vars='City', + value_vars=[['Mango', 'Orange'], ['Gin', 'Vodka']], + var_name=['Fruit', 'Drink'], + value_name=['Pounds', 'Ounces']) + City Fruit Pounds Drink Ounces + 0 Houston Mango 4 Gin 16 + 1 Miami Mango 10 Gin 200 + 2 Houston Orange 10 Vodka 20 + 3 Miami Orange 8 Vodka 33 + """) @Appender(_shared_docs['melt'] % diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 46edc0b96b7c2..42733112bfc5a 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -6,7 +6,8 @@ from pandas import compat from pandas.core.categorical import Categorical -from pandas.core.dtypes.generic import ABCMultiIndex +from pandas.core.frame import DataFrame +from pandas.core.index import MultiIndex from pandas.core.frame import _shared_docs from pandas.util._decorators import Appender @@ -16,72 +17,264 @@ from pandas.core.tools.numeric import to_numeric -@Appender(_shared_docs['melt'] % - dict(caller='pd.melt(df, ', - versionadded="", - other='DataFrame.melt')) -def melt(frame, id_vars=None, value_vars=None, var_name=None, - value_name='value', col_level=None): - # TODO: what about the existing index? - if id_vars is not None: - if not is_list_like(id_vars): - id_vars = [id_vars] - elif (isinstance(frame.columns, ABCMultiIndex) and - not isinstance(id_vars, list)): - raise ValueError('id_vars must be a list of tuples when columns' - ' are a MultiIndex') +# ensure that the the correct tuple/list is used when selecting +# from a MultiIndex/Index frame +def check_vars(frame, var, var_string, num_col_levels): + for v in var: + if num_col_levels > 1: + if not isinstance(v, tuple): + raise ValueError('{} must be a list of tuples' + ' when columns are a MultiIndex' + .format(var_string)) + elif len(v) != num_col_levels: + raise ValueError('all tuples in {} must be length {}' + .format(var_string, + frame.columns.nlevels)) else: - id_vars = list(id_vars) - else: - id_vars = [] - - if value_vars is not None: - if not is_list_like(value_vars): - value_vars = [value_vars] - elif (isinstance(frame.columns, ABCMultiIndex) and - not isinstance(value_vars, list)): - raise ValueError('value_vars must be a list of tuples when' - ' columns are a MultiIndex') - else: - value_vars = list(value_vars) - frame = frame.loc[:, id_vars + value_vars] + if is_list_like(v) and len(v) > 1: + raise ValueError('DataFrame has only a single level of ' + 'columns. {} is not a column'.format(v)) + + +def melt_one(frame, id_vars=None, value_vars=None, var_name=None, + value_name='value', col_level=None, extra_group=0, + var_end=None): + """ + melts exactly one group. Parameters are a single list not + a list of lists like the main melt function + """ + + # TODO: what about the existing index? + # Assume all column levels used when none given + if len(col_level) == 0: + num_col_levels = frame.columns.nlevels else: - frame = frame.copy() + num_col_levels = len(col_level) - if col_level is not None: # allow list or other? - # frame is a copy - frame.columns = frame.columns.get_level_values(col_level) + check_vars(frame, id_vars, 'id_vars', num_col_levels) + check_vars(frame, value_vars, 'value_vars', num_col_levels) - if var_name is None: - if isinstance(frame.columns, ABCMultiIndex): - if len(frame.columns.names) == len(set(frame.columns.names)): - var_name = frame.columns.names + if var_name != [] and len(var_name) != num_col_levels: + raise ValueError('Length of var_name must match effective number of ' + 'column levels.') + + # allow both integer location and label for column levels + if col_level != []: + droplevels = list(range(frame.columns.nlevels)) + for level in col_level: + if isinstance(level, int): + droplevels.remove(level) + else: + droplevels.remove(frame.columns.names.index(level)) + if droplevels != []: + frame = frame.copy() + frame.columns = frame.columns.droplevel(droplevels) + + for iv in id_vars: + if iv not in frame.columns: + raise KeyError('{} not in columns'.format(iv)) + + if value_vars != []: + for vv in value_vars: + if vv not in frame.columns: + raise KeyError('{} not in columns'.format(vv)) + + # use column level names if available, if not auto-name them + if var_name == []: + names = list(frame.columns.names) + if len(names) == 1: + if names[0] is None: + var_name.append('variable') else: - var_name = ['variable_{i}'.format(i=i) - for i in range(len(frame.columns.names))] + var_name.append(names[0]) + elif names.count(None) == 1: + names[names.index(None)] = 'variable' + var_name = names + else: + # small API break - use column level names when avaialable + missing_name_count = 0 + for name in names: + if name is None: + var_name.append('variable_{}'.format(missing_name_count)) + missing_name_count += 1 + else: + var_name.append(name) + + # when using default var_name, append int to make unique + if var_end is not None: + var_name = [vn + '_' + str(var_end) for vn in var_name] + + N = len(frame) + + # find integer location of all the melted columns + non_id_ilocs = [] + if value_vars != []: + for v in value_vars: + for i, v1 in enumerate(frame.columns): + if v == v1: + non_id_ilocs.append(i) + else: + if id_vars == []: + non_id_ilocs = list(range(frame.shape[1])) else: - var_name = [frame.columns.name if frame.columns.name is not None - else 'variable'] - if isinstance(var_name, compat.string_types): - var_name = [var_name] + for i, v in enumerate(frame.columns): + if v not in id_vars: + non_id_ilocs.append(i) - N, K = frame.shape - K -= len(id_vars) + K = len(non_id_ilocs) mdata = {} + mcolumns = [] + + # id_vars do not get melted, but need to repeat for each + # column in melted group. extra_group is used for cases + # when first group is not the longest for col in id_vars: - mdata[col] = np.tile(frame.pop(col).values, K) + pandas_obj = frame[col] + if isinstance(pandas_obj, DataFrame): + for i in range(pandas_obj.shape[1]): + col_name = col + '_id_' + str(i) + mdata[col_name] = np.tile(pandas_obj.iloc[:, i].values, + K + extra_group) + mcolumns.append(col_name) + else: + mdata[col] = np.tile(pandas_obj, K + extra_group) + mcolumns.append(col) - mcolumns = id_vars + var_name + [value_name] + # melt all the columns into one long array + values = np.concatenate([frame.iloc[:, i] for i in non_id_ilocs]) + if extra_group > 0: + values = np.concatenate((values, np.full([N * extra_group], np.nan))) + mdata[value_name[0]] = values - mdata[value_name] = frame.values.ravel('F') + # the column names of the melted groups need to repeat for i, col in enumerate(var_name): - # asanyarray will keep the columns as an Index - mdata[col] = np.asanyarray(frame.columns - ._get_level_values(i)).repeat(N) + values = frame.columns[non_id_ilocs]._get_level_values(i) - from pandas import DataFrame - return DataFrame(mdata, columns=mcolumns) + if isinstance(values, MultiIndex): + # asanyarray will keep the columns as an Index + values = np.asanyarray(values).repeat(N) + else: + # faster to use lists than np.repeat? + data_list = [] + for v in values.tolist(): + data_list.extend([v] * N) + values = data_list + + # Append missing values for any groups shorter than largest + if extra_group > 0: + values = np.concatenate((values, + np.full([N * extra_group], np.nan))) + mdata[col] = values + mcolumns += var_name + value_name + + return mdata, mcolumns + + +def convert_to_list(val): + if val is None: + return [] + elif isinstance(val, np.ndarray): + return val.tolist() + elif not is_list_like(val): + return [val] + else: + return list(val) + + +@Appender(_shared_docs['melt'] % + dict(caller='pd.melt(df, ', + versionadded="", + other='DataFrame.melt')) +def melt(frame, id_vars=None, value_vars=None, var_name=None, + value_name='value', col_level=None): + + # much easier to handle parameters when they are all lists + id_vars = convert_to_list(id_vars) + value_vars = convert_to_list(value_vars) + var_name = convert_to_list(var_name) + value_name = convert_to_list(value_name) + col_level = convert_to_list(col_level) + + # Whan a list of list is passed, assume multiple melt groups + if value_vars != [] and isinstance(value_vars[0], list): + if var_name != []: + if len(value_vars) != len(var_name): + raise ValueError('Number of inner lists of value_vars must ' + 'equal length of var_name ' + '{} != {}'.format(len(value_vars), + len(var_name))) + else: + # for consistency, when the default var_name is used + var_name = [[]] * len(value_vars) + + if len(value_name) > 1: + if len(value_vars) != len(value_name): + raise ValueError('Number of inner lists of value_vars must ' + 'equal length of value_name ' + '{} != {}'.format(len(value_vars), + len(value_name))) + # allow for value_name to be a single item and attach int to it + else: + value_name = [value_name[0] + '_' + str(i) + for i in range(len(value_vars))] + + # get the total number of columns in each melt group + # This is not just the length of the list because this function + # handles columns with the same names, which is commong when + # using multiindex frames + value_vars_length = [] + for vv in value_vars: + count = 0 + for col in frame.columns.values: + if col in vv: + count += 1 + value_vars_length.append(count) + + # Need the max number of columns for all the melt groups to + # correctly append NaNs to end of unbalanced melt groups + max_group_len = max(value_vars_length) + + # store each melted group as a dictionary in a list + mdata_list = [] + + # store columns from each melted group in a list of lists + mcolumns_list = [] + + # individually melt each group + vars_zipped = zip(value_vars, var_name, value_name, value_vars_length) + for i, (val_v, var_n, val_n, vvl) in enumerate(vars_zipped): + var_n = convert_to_list(var_n) + val_n = convert_to_list(val_n) + + # only melt the id_vars for the first group + id_vars_ = [] if i > 0 else id_vars + + # append int at end of var_name to make unique + var_end = i if var_n == [] else None + + md, mc = melt_one(frame, id_vars=id_vars_, value_vars=val_v, + var_name=var_n, value_name=val_n, + col_level=col_level, + extra_group=max_group_len - vvl, + var_end=var_end) + + mdata_list.append(md) + mcolumns_list.append(mc) + + # make one large dictionary with all data for constructor + mdata = {} + for d in mdata_list: + mdata.update(d) + + mcolumns = [e for lst in mcolumns_list for e in lst] + return DataFrame(mdata, columns=mcolumns) + + else: + mdata, mcolumns = melt_one(frame, id_vars=id_vars, + value_vars=value_vars, var_name=var_name, + value_name=value_name, col_level=col_level) + return DataFrame(mdata, columns=mcolumns) def lreshape(data, groups, dropna=True, label=None): diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index b7422dfd7e911..14d60fcda9d4f 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -30,6 +30,33 @@ def setup_method(self, method): self.df1.columns = [list('ABC'), list('abc')] self.df1.columns.names = ['CAP', 'low'] + self.df2 = DataFrame( + {'City': ['Houston', 'Austin', 'Hoover'], + 'State': ['Texas', 'Texas', 'Alabama'], + 'Name': ['Aria', 'Penelope', 'Niko'], + 'Mango': [4, 10, 90], + 'Orange': [10, 8, 14], + 'Watermelon': [40, 99, 43], + 'Gin': [16, 200, 34], + 'Vodka': [20, 33, 18]}, columns=['City', 'State', 'Name', 'Mango', + 'Orange', 'Watermelon', 'Gin', + 'Vodka']) + + self.df3 = DataFrame( + {'group': ['a', 'b', 'c'], + 'exp_1': [4, 10, -9], + 'exp_2': [10, 8, 14], + 'res_1': [8, 5, 4], + 'res_3': [11, 0, 7]}, columns=['group', 'exp_1', 'exp_2', + 'res_1', 'res_3']) + + self.df4 = self.df2.copy() + self.df4.columns = pd.MultiIndex.from_arrays([list('aabbcccd'), + list('ffffgggg'), + self.df4.columns], + names=[None, None, + 'some vars']) + def test_top_level_method(self): result = melt(self.df) assert result.columns.tolist() == ['variable', 'value'] @@ -212,6 +239,63 @@ def test_multiindex(self): res = self.df1.melt() assert res.columns.tolist() == ['CAP', 'low', 'value'] + def test_simultaneous_melt(self): + data = {'City': ['Houston', 'Austin', 'Hoover', 'Houston', 'Austin', + 'Hoover', 'Houston', 'Austin', 'Hoover'], + 'State': ['Texas', 'Texas', 'Alabama', 'Texas', 'Texas', + 'Alabama', 'Texas', 'Texas', 'Alabama'], + 'Fruit': ['Mango', 'Mango', 'Mango', 'Orange', 'Orange', + 'Orange', 'Watermelon', 'Watermelon', 'Watermelon'], + 'Pounds': [4, 10, 90, 10, 8, 14, 40, 99, 43], + 'Drink': ['Gin', 'Gin', 'Gin', 'Vodka', 'Vodka', 'Vodka', + 'nan', 'nan', 'nan'], + 'Ounces': [16.0, 200.0, 34.0, 20.0, 33.0, 18.0, nan, + nan, nan]} + expected1 = DataFrame(data, columns=['City', 'State', 'Fruit', + 'Pounds', 'Drink', 'Ounces']) + result1 = self.df2.melt(id_vars=['City', 'State'], + value_vars=[['Mango', 'Orange', 'Watermelon'], + ['Gin', 'Vodka']], + var_name=['Fruit', 'Drink'], + value_name=['Pounds', 'Ounces']) + tm.assert_frame_equal(result1, expected1) + + # single item groups + result2 = self.df2.melt(id_vars='State', + value_vars=[['Mango'], ['Vodka']], + var_name=['Fruit', 'Drink']) + + data = {'Drink': ['Vodka', 'Vodka', 'Vodka'], + 'Fruit': ['Mango', 'Mango', 'Mango'], + 'State': ['Texas', 'Texas', 'Alabama'], + 'value_0': [4, 10, 90], + 'value_1': [20, 33, 18]} + expected2 = DataFrame(data, columns=['State', 'Fruit', 'value_0', + 'Drink', 'value_1']) + tm.assert_frame_equal(result2, expected2) + + with pytest.raises(ValueError): + self.df2.melt(id_vars='State', + value_vars=[['Vodka'], ['Mango', 'Name'], + ['Orange', 'Watermelon']], + var_name=['Fruit', 'Drink']) + + def test_melt_multiindex(self): + data = {('a', 'f', 'State'): ['Texas', 'Texas', 'Alabama', + 'Texas', 'Texas', 'Alabama'], + 'variable_0': ['b', 'b', 'b', 'c', 'c', 'c'], + 'variable_1': ['f', 'f', 'f', 'g', 'g', 'g'], + 'some vars': ['Name', 'Name', 'Name', 'Watermelon', + 'Watermelon', 'Watermelon'], + 'value': ['Aria', 'Penelope', 'Niko', 40, 99, 43]} + expected = DataFrame(data, columns=[('a', 'f', 'State'), 'variable_0', + 'variable_1', 'some vars', + 'value']) + result = self.df4.melt(id_vars=[('a', 'f', 'State')], + value_vars=[('b', 'f', 'Name'), + ('c', 'g', 'Watermelon')]) + tm.assert_frame_equal(expected, result) + class TestLreshape(object):
- [ x] closes #17676 - [ x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17677
2017-09-25T23:51:25Z
2018-02-10T18:43:05Z
null
2018-02-10T18:43:06Z