id
stringlengths 30
32
| content
stringlengths 139
2.8k
|
|---|---|
codereview_new_python_data_10667
|
def test_repartition(axis, dtype):
if dtype == "DataFrame":
results = {
- None: ([4, 0, 0, 0], [3, 0, 0, 0]),
- 0: ([4, 0, 0, 0], [2, 1]),
- 1: ([2, 2], [3, 0, 0, 0]),
}
else:
results = {
- None: ([4, 0, 0, 0], [1]),
- 0: ([4, 0, 0, 0], [1]),
1: ([2, 2], [1]),
}
# in some cases empty partitions aren't filtered so it's better to use
IIUC empty partitions are not filtered out only in cases when there's not enough metadata to do so:
https://github.com/modin-project/modin/blob/f137564496ceff581a312de9af5ecbd143c7ee32/modin/core/dataframe/pandas/dataframe/dataframe.py#L471-L478
`._repartition` seems to have all the required metadata in the end, why does this change needed then?
def test_repartition(axis, dtype):
if dtype == "DataFrame":
results = {
+ None: ([4], [3]),
+ 0: ([4], [2, 1]),
+ 1: ([2, 2], [3]),
}
else:
results = {
+ None: ([4], [1]),
+ 0: ([4], [1]),
1: ([2, 2], [1]),
}
# in some cases empty partitions aren't filtered so it's better to use
|
codereview_new_python_data_10668
|
def apply_full_axis(
Setting it to True disables shuffling data from one partition to another.
synchronize : boolean, default: True
Synchronize external indexes (`new_index`, `new_columns`) with internal indexes.
Returns
-------
```suggestion
Synchronize external indexes (`new_index`, `new_columns`) with internal indexes.
This could be used when you're certain that the indices in partitions are equal to
the provided hints in order to save time on syncing them.
```
def apply_full_axis(
Setting it to True disables shuffling data from one partition to another.
synchronize : boolean, default: True
Synchronize external indexes (`new_index`, `new_columns`) with internal indexes.
+ This could be used when you're certain that the indices in partitions are equal to
+ the provided hints in order to save time on syncing them.
Returns
-------
|
codereview_new_python_data_10669
|
def repartition(self, axis=None):
new_index=self._modin_frame._index_cache,
new_columns=self._modin_frame._columns_cache,
keep_partitioning=False,
- synchronize=False,
)
)
return new_query_compiler
```suggestion
sync_labels=False,
```
def repartition(self, axis=None):
new_index=self._modin_frame._index_cache,
new_columns=self._modin_frame._columns_cache,
keep_partitioning=False,
+ sync_labels=False,
)
)
return new_query_compiler
|
codereview_new_python_data_10670
|
def check_parameters_support(
if read_kwargs.get("skipfooter"):
if read_kwargs.get("nrows") or read_kwargs.get("engine") == "c":
- return (False, "raise exception by pandas itself")
skiprows_supported = True
if is_list_like(skiprows_md) and skiprows_md[0] < header_size:
```suggestion
return (False, "Exception is raised by pandas itself")
```
def check_parameters_support(
if read_kwargs.get("skipfooter"):
if read_kwargs.get("nrows") or read_kwargs.get("engine") == "c":
+ return (False, "Exception is raised by pandas itself")
skiprows_supported = True
if is_list_like(skiprows_md) and skiprows_md[0] < header_size:
|
codereview_new_python_data_10671
|
def _read_csv_check_support(
)
if read_csv_kwargs.get("skipfooter") and read_csv_kwargs.get("nrows"):
- return (False, "raise exception by pandas itself")
for arg, def_value in cls.read_csv_unsup_defaults.items():
if read_csv_kwargs[arg] != def_value:
```suggestion
return (False, "Exception is raised by pandas itself")
```
def _read_csv_check_support(
)
if read_csv_kwargs.get("skipfooter") and read_csv_kwargs.get("nrows"):
+ return (False, "Exception is raised by pandas itself")
for arg, def_value in cls.read_csv_unsup_defaults.items():
if read_csv_kwargs[arg] != def_value:
|
codereview_new_python_data_10672
|
def _read(cls, io, **kwargs):
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
- if not all(column_names) or kwargs["usecols"]:
# some column names are empty, use pandas reader to take the names from it
pandas_kw["nrows"] = 1
df = pandas.read_excel(io, **pandas_kw)
Previously, there was no processing of `usecols` parameter in dispatcher. However, for the correct calculation of the columns, we can use pandas, just as we do for `read_csv`.
def _read(cls, io, **kwargs):
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
+ if not all(column_names) or kwargs.get("usecols"):
# some column names are empty, use pandas reader to take the names from it
pandas_kw["nrows"] = 1
df = pandas.read_excel(io, **pandas_kw)
|
codereview_new_python_data_10673
|
def _read(cls, io, **kwargs):
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
- if not all(column_names) or kwargs["usecols"]:
# some column names are empty, use pandas reader to take the names from it
pandas_kw["nrows"] = 1
df = pandas.read_excel(io, **pandas_kw)
```suggestion
if not all(column_names) or kwargs.get("usecols"):
```
?
def _read(cls, io, **kwargs):
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
+ if not all(column_names) or kwargs.get("usecols"):
# some column names are empty, use pandas reader to take the names from it
pandas_kw["nrows"] = 1
df = pandas.read_excel(io, **pandas_kw)
|
codereview_new_python_data_10674
|
def modin_df_almost_equals_pandas(modin_df, pandas_df):
)
-def try_almost_equals_compare(df1, df2):
"""Compare two dataframes as nearly equal if possible, otherwise compare as completely equal."""
# `modin_df_almost_equals_pandas` is numeric-only comparator
dtypes1, dtypes2 = map(
```suggestion
def try_modin_df_almost_equals_compare(df1, df2):
```
def modin_df_almost_equals_pandas(modin_df, pandas_df):
)
+def try_modin_df_almost_equals_compare(df1, df2):
"""Compare two dataframes as nearly equal if possible, otherwise compare as completely equal."""
# `modin_df_almost_equals_pandas` is numeric-only comparator
dtypes1, dtypes2 = map(
|
codereview_new_python_data_10675
|
def walk(
elif depth == 1:
if len(value) != 2:
raise ValueError(
- f"Incorrect rename format. Renamer must consist of exactly two elements, got {len(value)=}."
)
func_name, func = value
yield key, func, func_name, col_renaming_required
```suggestion
f"Incorrect rename format. Renamer must consist of exactly two elements, got: {len(value)}."
```
def walk(
elif depth == 1:
if len(value) != 2:
raise ValueError(
+ f"Incorrect rename format. Renamer must consist of exactly two elements, got: {len(value)}."
)
func_name, func = value
yield key, func, func_name, col_renaming_required
|
codereview_new_python_data_10676
|
def try_modin_df_almost_equals_compare(df1, df2):
if all(is_numeric_dtype(dtype) for dtype in dtypes1) and all(
is_numeric_dtype(dtype) for dtype in dtypes2
):
- return modin_df_almost_equals_pandas(df1, df2)
- else:
- return df_equals(df1, df2)
def df_is_empty(df):
To fix CodeQL warning:
```suggestion
modin_df_almost_equals_pandas(df1, df2)
```
def try_modin_df_almost_equals_compare(df1, df2):
if all(is_numeric_dtype(dtype) for dtype in dtypes1) and all(
is_numeric_dtype(dtype) for dtype in dtypes2
):
+ modin_df_almost_equals_pandas(df1, df2)
+ df_equals(df1, df2)
def df_is_empty(df):
|
codereview_new_python_data_10677
|
def try_modin_df_almost_equals_compare(df1, df2):
if all(is_numeric_dtype(dtype) for dtype in dtypes1) and all(
is_numeric_dtype(dtype) for dtype in dtypes2
):
- return modin_df_almost_equals_pandas(df1, df2)
- else:
- return df_equals(df1, df2)
def df_is_empty(df):
`df_equals` this function does not return anything.
```suggestion
df_equals(df1, df2)
```
def try_modin_df_almost_equals_compare(df1, df2):
if all(is_numeric_dtype(dtype) for dtype in dtypes1) and all(
is_numeric_dtype(dtype) for dtype in dtypes2
):
+ modin_df_almost_equals_pandas(df1, df2)
+ df_equals(df1, df2)
def df_is_empty(df):
|
codereview_new_python_data_10678
|
def try_modin_df_almost_equals_compare(df1, df2):
is_numeric_dtype(dtype) for dtype in dtypes2
):
modin_df_almost_equals_pandas(df1, df2)
df_equals(df1, df2)
```suggestion
modin_df_almost_equals_pandas(df1, df2)
else:
df_equals(df1, df2)
```
def try_modin_df_almost_equals_compare(df1, df2):
is_numeric_dtype(dtype) for dtype in dtypes2
):
modin_df_almost_equals_pandas(df1, df2)
+ else:
df_equals(df1, df2)
|
codereview_new_python_data_10679
|
def groupby_agg(
# Defaulting to pandas in case of an empty frame as we can't process it properly.
# Higher API level won't pass empty data here unless the frame has delayed
# computations. So we apparently lose some laziness here (due to index access)
- # because of the disability to process empty groupby natively.
if len(self.columns) == 0 or len(self.index) == 0:
return super().groupby_agg(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, how, drop
```suggestion
# because of the inability to process empty groupby natively.
```
def groupby_agg(
# Defaulting to pandas in case of an empty frame as we can't process it properly.
# Higher API level won't pass empty data here unless the frame has delayed
# computations. So we apparently lose some laziness here (due to index access)
+ # because of the inability to process empty groupby natively.
if len(self.columns) == 0 or len(self.index) == 0:
return super().groupby_agg(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, how, drop
|
codereview_new_python_data_10680
|
def modin_df_almost_equals_pandas(modin_df, pandas_df):
def try_modin_df_almost_equals_compare(df1, df2):
"""Compare two dataframes as nearly equal if possible, otherwise compare as completely equal."""
# `modin_df_almost_equals_pandas` is numeric-only comparator
- dtypes1, dtypes2 = map(
- lambda df: dtype if is_list_like(dtype := df.dtypes) else [dtype], (df1, df2)
- )
- if all(is_numeric_dtype(dtype) for dtype in dtypes1) and all(
- is_numeric_dtype(dtype) for dtype in dtypes2
- ):
modin_df_almost_equals_pandas(df1, df2)
else:
df_equals(df1, df2)
```suggestion
dtypes1, dtypes2 = [dtype if is_list_like(dtype := df.dtypes) else [dtype] for df in (df1, df2)]
```
?..
def modin_df_almost_equals_pandas(modin_df, pandas_df):
def try_modin_df_almost_equals_compare(df1, df2):
"""Compare two dataframes as nearly equal if possible, otherwise compare as completely equal."""
# `modin_df_almost_equals_pandas` is numeric-only comparator
+ dtypes1, dtypes2 = [
+ dtype if is_list_like(dtype := df.dtypes) else [dtype] for df in (df1, df2)
+ ]
+ if all(map(is_numeric_dtype, dtypes1)) and all(map(is_numeric_dtype, dtypes2)):
modin_df_almost_equals_pandas(df1, df2)
else:
df_equals(df1, df2)
|
codereview_new_python_data_10681
|
def modin_df_almost_equals_pandas(modin_df, pandas_df):
def try_modin_df_almost_equals_compare(df1, df2):
"""Compare two dataframes as nearly equal if possible, otherwise compare as completely equal."""
# `modin_df_almost_equals_pandas` is numeric-only comparator
- dtypes1, dtypes2 = map(
- lambda df: dtype if is_list_like(dtype := df.dtypes) else [dtype], (df1, df2)
- )
- if all(is_numeric_dtype(dtype) for dtype in dtypes1) and all(
- is_numeric_dtype(dtype) for dtype in dtypes2
- ):
modin_df_almost_equals_pandas(df1, df2)
else:
df_equals(df1, df2)
```suggestion
if all(map(is_numeric_dtype, dtypes1)) and all(map(is_numeric_dtype, dtypes2)):
```
?
def modin_df_almost_equals_pandas(modin_df, pandas_df):
def try_modin_df_almost_equals_compare(df1, df2):
"""Compare two dataframes as nearly equal if possible, otherwise compare as completely equal."""
# `modin_df_almost_equals_pandas` is numeric-only comparator
+ dtypes1, dtypes2 = [
+ dtype if is_list_like(dtype := df.dtypes) else [dtype] for df in (df1, df2)
+ ]
+ if all(map(is_numeric_dtype, dtypes1)) and all(map(is_numeric_dtype, dtypes2)):
modin_df_almost_equals_pandas(df1, df2)
else:
df_equals(df1, df2)
|
codereview_new_python_data_10682
|
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
-@pytest.mark.parametrize("converter", [int, float])
-def test___int__and__float__(converter, count_elements):
eval_general(
*create_test_series(test_data["int_data"]),
- lambda df: converter(df[:count_elements]),
)
I would rather we test int and float methods separately. We have a test for bool separately as well.
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
+def test___int__(count_elements):
+ eval_general(
+ *create_test_series(test_data["float_data"]),
+ lambda df: int(df[:count_elements]),
+ )
+
+
+@pytest.mark.parametrize("count_elements", [0, 1, 10])
+def test___float__(count_elements):
eval_general(
*create_test_series(test_data["int_data"]),
+ lambda df: float(df[:count_elements]),
)
|
codereview_new_python_data_10683
|
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___int__(count_elements):
eval_general(
- *create_test_series(test_data["float_nan_data"]),
- lambda df: int(df[:count_elements]),
)
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___float__(count_elements):
eval_general(
- *create_test_series(test_data["int_data"]),
- lambda df: float(df[:count_elements]),
)
Actually, I have two comments here.
1) Why do we need to test a series with the number of elements other than 1?
2) If a series consist of one np.nan, we just check exception equality. Should we test both a valid number and np.nan in the series?
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___int__(count_elements):
eval_general(
+ *create_test_series([1.5] * count_elements),
+ lambda df: int(df),
)
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___float__(count_elements):
eval_general(
+ *create_test_series([1] * count_elements),
+ lambda df: float(df),
)
|
codereview_new_python_data_10684
|
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___int__(count_elements):
- eval_general(
- *create_test_series([1.5] * count_elements),
- lambda df: int(df),
- )
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___float__(count_elements):
- eval_general(
- *create_test_series([1] * count_elements),
- lambda df: float(df),
- )
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
## Unnecessary lambda
This 'lambda' is just a simple wrapper around a callable object. Use that object directly.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/496)
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___int__(count_elements):
+ eval_general(*create_test_series([1.5] * count_elements), int)
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___float__(count_elements):
+ eval_general(*create_test_series([1] * count_elements), float)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
|
codereview_new_python_data_10685
|
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___int__(count_elements):
- eval_general(
- *create_test_series([1.5] * count_elements),
- lambda df: int(df),
- )
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___float__(count_elements):
- eval_general(
- *create_test_series([1] * count_elements),
- lambda df: float(df),
- )
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
## Unnecessary lambda
This 'lambda' is just a simple wrapper around a callable object. Use that object directly.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/497)
def test___gt__(data):
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___int__(count_elements):
+ eval_general(*create_test_series([1.5] * count_elements), int)
@pytest.mark.parametrize("count_elements", [0, 1, 10])
def test___float__(count_elements):
+ eval_general(*create_test_series([1] * count_elements), float)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
|
codereview_new_python_data_10686
|
def show_versions(as_json: Union[str, bool] = False) -> None:
print(f"{k:<{maxlen}}: {v}")
-def int_to_float32(dtype: np.dtype) -> np.dtype:
"""
Check if a datatype is a variant of integer.
- If dtype is integer function returns float32 datatype if not returns the
argument datatype itself
Parameters
Should this be `int_to_float64`?
def show_versions(as_json: Union[str, bool] = False) -> None:
print(f"{k:<{maxlen}}: {v}")
+def int_to_float64(dtype: np.dtype) -> np.dtype:
"""
Check if a datatype is a variant of integer.
+ If dtype is integer function returns float64 datatype if not returns the
argument datatype itself
Parameters
|
codereview_new_python_data_10687
|
def show_versions(as_json: Union[str, bool] = False) -> None:
print(f"{k:<{maxlen}}: {v}")
-def int_to_float32(dtype: np.dtype) -> np.dtype:
"""
Check if a datatype is a variant of integer.
- If dtype is integer function returns float32 datatype if not returns the
argument datatype itself
Parameters
```suggestion
If dtype is integer function returns float64 datatype if not returns the
```
def show_versions(as_json: Union[str, bool] = False) -> None:
print(f"{k:<{maxlen}}: {v}")
+def int_to_float64(dtype: np.dtype) -> np.dtype:
"""
Check if a datatype is a variant of integer.
+ If dtype is integer function returns float64 datatype if not returns the
argument datatype itself
Parameters
|
codereview_new_python_data_10688
|
def compute_dtypes_common_cast(first, second) -> np.dtype:
-----
The dtypes of the operands are supposed to be known.
"""
- dtypes_first = dict(zip(first.columns, first._modin_frame._dtypes))
- dtypes_second = dict(zip(second.columns, second._modin_frame._dtypes))
columns_first = set(first.columns)
columns_second = set(second.columns)
common_columns = columns_first.intersection(columns_second)
```suggestion
dtypes_first = first._modin_frame._dtypes.to_dict()
dtypes_second = second._modin_frame._dtypes.to_dict()
```
or even we can remove this at all and use just this further.
```python
dtypes_first = first._modin_frame._dtypes
dtypes_first.loc[col]
```
def compute_dtypes_common_cast(first, second) -> np.dtype:
-----
The dtypes of the operands are supposed to be known.
"""
+ dtypes_first = first._modin_frame._dtypes.to_dict()
+ dtypes_second = second._modin_frame._dtypes.to_dict()
columns_first = set(first.columns)
columns_second = set(second.columns)
common_columns = columns_first.intersection(columns_second)
|
codereview_new_python_data_10689
|
def values(self): # noqa: RT01, D200
"""
import modin.pandas as pd
- if isinstance(self.dtype, pandas.core.dtypes.dtypes.ExtensionDtype):
return self._default_to_pandas("values")
data = self.to_numpy()
Modin doesn't have `core` module.
def values(self): # noqa: RT01, D200
"""
import modin.pandas as pd
+ if isinstance(
+ self.dtype, pandas.core.dtypes.dtypes.ExtensionDtype
+ ) and not isinstance(self.dtype, pd.CategoricalDtype):
return self._default_to_pandas("values")
data = self.to_numpy()
|
codereview_new_python_data_10690
|
def partitioned_file(
List with the next elements:
int : partition start read byte
int : partition end read byte
- pandas.DataFrame
Dataframe from which metadata can be retrieved. Can be None if `read_callback_kw=None`.
"""
if read_callback_kw is not None and pre_reading != 0:
```suggestion
pandas.DataFrame or None
```
def partitioned_file(
List with the next elements:
int : partition start read byte
int : partition end read byte
+ pandas.DataFrame or None
Dataframe from which metadata can be retrieved. Can be None if `read_callback_kw=None`.
"""
if read_callback_kw is not None and pre_reading != 0:
|
codereview_new_python_data_10691
|
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""
op_name = getattr(pandas_op, "__name__", str(pandas_op))
ErrorMessage.default_to_pandas(op_name)
- args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
- kwargs = {
- k: v.to_pandas() if isinstance(v, type(self)) else v
- for k, v in kwargs.items()
- }
- result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = MODIN_UNNAMED_SERIES_LABEL
let's use more comprehensive approach
```suggestion
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
```
https://github.com/modin-project/modin/blob/1a57a542ad0413fae2591d4b4928b652effd01db/modin/utils.py#L480
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""
op_name = getattr(pandas_op, "__name__", str(pandas_op))
ErrorMessage.default_to_pandas(op_name)
+ args = try_cast_to_pandas(args)
+ kwargs = try_cast_to_pandas(kwargs)
+ result = pandas_op(try_cast_to_pandas(self), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = MODIN_UNNAMED_SERIES_LABEL
|
codereview_new_python_data_10692
|
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""
op_name = getattr(pandas_op, "__name__", str(pandas_op))
ErrorMessage.default_to_pandas(op_name)
- args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
- kwargs = {
- k: v.to_pandas() if isinstance(v, type(self)) else v
- for k, v in kwargs.items()
- }
- result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = MODIN_UNNAMED_SERIES_LABEL
```suggestion
result = pandas_op(try_cast_to_pandas(self), *args, **kwargs)
```
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""
op_name = getattr(pandas_op, "__name__", str(pandas_op))
ErrorMessage.default_to_pandas(op_name)
+ args = try_cast_to_pandas(args)
+ kwargs = try_cast_to_pandas(kwargs)
+ result = pandas_op(try_cast_to_pandas(self), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = MODIN_UNNAMED_SERIES_LABEL
|
codereview_new_python_data_10693
|
def from_labels(self) -> "PandasDataframe":
if "index" not in self.columns
else "level_{}".format(0)
]
- names = tuple(level_names) if len(level_names) > 1 else level_names[0]
- new_dtypes = self.index.to_frame(name=names).dtypes
- new_dtypes = pandas.concat([new_dtypes, self.dtypes])
# We will also use the `new_column_names` in the calculation of the internal metadata, so this is a
# lightweight way of ensuring the metadata matches.
I'm wondering should we do it all the time or only when the `self._dtypes` cache is accessible?
def from_labels(self) -> "PandasDataframe":
if "index" not in self.columns
else "level_{}".format(0)
]
+ new_dtypes = None
+ if self._dtypes is not None:
+ names = tuple(level_names) if len(level_names) > 1 else level_names[0]
+ new_dtypes = self.index.to_frame(name=names).dtypes
+ new_dtypes = pandas.concat([new_dtypes, self._dtypes])
# We will also use the `new_column_names` in the calculation of the internal metadata, so this is a
# lightweight way of ensuring the metadata matches.
|
codereview_new_python_data_10694
|
def from_labels(self) -> "PandasDataframe":
if "index" not in self.columns
else "level_{}".format(0)
]
- names = tuple(level_names) if len(level_names) > 1 else level_names[0]
- new_dtypes = self.index.to_frame(name=names).dtypes
- new_dtypes = pandas.concat([new_dtypes, self.dtypes])
# We will also use the `new_column_names` in the calculation of the internal metadata, so this is a
# lightweight way of ensuring the metadata matches.
```suggestion
new_dtypes = None
if self._dtypes is not None:
names = tuple(level_names) if len(level_names) > 1 else level_names[0]
new_dtypes = self.index.to_frame(name=names).dtypes
new_dtypes = pandas.concat([new_dtypes, self._dtypes])
```
def from_labels(self) -> "PandasDataframe":
if "index" not in self.columns
else "level_{}".format(0)
]
+ new_dtypes = None
+ if self._dtypes is not None:
+ names = tuple(level_names) if len(level_names) > 1 else level_names[0]
+ new_dtypes = self.index.to_frame(name=names).dtypes
+ new_dtypes = pandas.concat([new_dtypes, self._dtypes])
# We will also use the `new_column_names` in the calculation of the internal metadata, so this is a
# lightweight way of ensuring the metadata matches.
|
codereview_new_python_data_10695
|
def test_mean_with_datetime(by_func):
def test_groupby_mad_warn():
- modin_df = pd.DataFrame(test_groupby_data)
md_grp = modin_df.groupby(by=modin_df.columns[0])
msg = "The 'mad' method is deprecated and will be removed in a future version."
- with pytest.warns(FutureWarning, match=msg):
- md_grp.mad()
def test_groupby_backfill_warn():
I wish we would test against pandas, too, to make sure we warn the same thing
def test_mean_with_datetime(by_func):
def test_groupby_mad_warn():
+ modin_df, pandas_df = create_test_dfs(test_groupby_data)
md_grp = modin_df.groupby(by=modin_df.columns[0])
+ pd_grp = pandas_df.groupby(by=pandas_df.columns[0])
msg = "The 'mad' method is deprecated and will be removed in a future version."
+ for grp_obj in (md_grp, pd_grp):
+ with pytest.warns(FutureWarning, match=msg):
+ grp_obj.mad()
def test_groupby_backfill_warn():
|
codereview_new_python_data_10696
|
def values(self): # noqa: RT01, D200
Return Series as ndarray or ndarray-like depending on the dtype.
"""
data = self.to_numpy()
- if isinstance(self.dtype, pandas.CategoricalDtype):
- data = pandas.Categorical(data, dtype=self.dtype)
return data
def add(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
Can we use "our own" categorical class somehow? I know that `modin.pandas.Categorical` is just an alias for the pandas class, but maybe it would change in the future and so then we wouldn't need to rename this all over the project.
```suggestion
if isinstance(self.dtype, pd.CategoricalDtype):
data = pd.Categorical(data, dtype=self.dtype)
```
def values(self): # noqa: RT01, D200
Return Series as ndarray or ndarray-like depending on the dtype.
"""
data = self.to_numpy()
+ if isinstance(self.dtype, pd.CategoricalDtype):
+ data = pd.Categorical(data, dtype=self.dtype)
return data
def add(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
|
codereview_new_python_data_10697
|
import sys
from ipykernel import kernelspec
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
-def new_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
-kernelspec.make_ipkernel_cmd = new_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
```suggestion
def custom_make_ipkernel_cmd():
mpiexec_args = ["mpiexec", "-n", "1"]
default_args = default_make_ipkernel_cmd()
return mpiexec_args + default_args
```
import sys
from ipykernel import kernelspec
+
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
+def custom_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
+kernelspec.make_ipkernel_cmd = custom_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
|
codereview_new_python_data_10698
|
import sys
from ipykernel import kernelspec
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
-def new_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
-kernelspec.make_ipkernel_cmd = new_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
```suggestion
kernelspec.make_ipkernel_cmd = custom_make_ipkernel_cmd
```
import sys
from ipykernel import kernelspec
+
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
+def custom_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
+kernelspec.make_ipkernel_cmd = custom_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
|
codereview_new_python_data_10699
|
import sys
from ipykernel import kernelspec
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
-def new_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
-kernelspec.make_ipkernel_cmd = new_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
Add a licence.
import sys
from ipykernel import kernelspec
+
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
+def custom_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
+kernelspec.make_ipkernel_cmd = custom_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
|
codereview_new_python_data_10700
|
import sys
from ipykernel import kernelspec
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
-def new_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
-kernelspec.make_ipkernel_cmd = new_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
```suggestion
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
```
import sys
from ipykernel import kernelspec
+
default_make_ipkernel_cmd = kernelspec.make_ipkernel_cmd
+def custom_make_ipkernel_cmd(
mod="ipykernel_launcher", executable=None, extra_arguments=None
):
mpi_arguments = ["mpiexec", "-n", "1"]
arguments = default_make_ipkernel_cmd(mod, executable, extra_arguments)
return mpi_arguments + arguments
+kernelspec.make_ipkernel_cmd = custom_make_ipkernel_cmd
if __name__ == "__main__":
kernel_name = "python3mpi"
|
codereview_new_python_data_10701
|
from nbconvert.preprocessors import ExecutePreprocessor
test_dataset_path = "taxi.csv"
-kernel_name = (
- os.environ["MODIN_KERNEL_NAME"] if "MODIN_KERNEL_NAME" in os.environ else None
-)
-ep = ExecutePreprocessor(
- timeout=600, kernel_name=kernel_name if kernel_name else "python3"
-)
download_taxi_dataset = f"""import os
import urllib.request
```suggestion
# the kernel name "python3mpi" must match the one
# that is set up in `examples/tutorial/jupyter/execution/pandas_on_unidist/setup_kernel.py`
# for `Unidist` engine
kernel_name = "python3" if cfg.Engine.get() != "Unidist" else "python3mpi"
ep = ExecutePreprocessor(timeout=600, kernel_name=kernel_name)
```
from nbconvert.preprocessors import ExecutePreprocessor
test_dataset_path = "taxi.csv"
+# the kernel name "python3mpi" must match the one
+# that is set up in `examples/tutorial/jupyter/execution/pandas_on_unidist/setup_kernel.py`
+# for `Unidist` engine
+kernel_name = "python3" if cfg.Engine.get() != "Unidist" else "python3mpi"
+ep = ExecutePreprocessor(timeout=600, kernel_name=kernel_name)
download_taxi_dataset = f"""import os
import urllib.request
|
codereview_new_python_data_10702
|
_execute_notebook,
test_dataset_path,
download_taxi_dataset,
- change_kernel,
)
# the kernel name "python3mpi" must match the one
# that is set up in `examples/tutorial/jupyter/execution/pandas_on_unidist/setup_kernel.py`
# for `Unidist` engine
-change_kernel(kernel_name="python3mpi")
local_notebooks_dir = "examples/tutorial/jupyter/execution/pandas_on_unidist/local"
I prefer to use my suggestion in order not to create the executor twice.
_execute_notebook,
test_dataset_path,
download_taxi_dataset,
+ set_kernel,
)
# the kernel name "python3mpi" must match the one
# that is set up in `examples/tutorial/jupyter/execution/pandas_on_unidist/setup_kernel.py`
# for `Unidist` engine
+set_kernel(kernel_name="python3mpi")
local_notebooks_dir = "examples/tutorial/jupyter/execution/pandas_on_unidist/local"
|
codereview_new_python_data_10703
|
def custom_make_ipkernel_cmd(*args, **kwargs):
"""
- Build modifyied Popen command list for launching an IPython kernel with mpi.
-
Returns
-------
array
- A Popen command list
Notes
-----
```suggestion
Build modified Popen command list for launching an IPython kernel with MPI.
```
def custom_make_ipkernel_cmd(*args, **kwargs):
"""
+ Build modified Popen command list for launching an IPython kernel with MPI.
+
+ Parameters
+ ----------
+ *args : iterable
+ Additional positional arguments to be passed in `default_make_ipkernel_cmd`.
+ **kwargs : dict
+ Additional keyword arguments to be passed in `default_make_ipkernel_cmd`.
+
Returns
-------
array
+ A Popen command list.
Notes
-----
|
codereview_new_python_data_10704
|
def custom_make_ipkernel_cmd(*args, **kwargs):
"""
- Build modifyied Popen command list for launching an IPython kernel with mpi.
-
Returns
-------
array
- A Popen command list
Notes
-----
```suggestion
Parameters
----------
*args : iterable
Additional positional arguments to be passed in `default_make_ipkernel_cmd`.
**kwargs : dict
Additional keyword arguments to be passed in `default_make_ipkernel_cmd`.
Returns
-------
array
A Popen command list.
```
def custom_make_ipkernel_cmd(*args, **kwargs):
"""
+ Build modified Popen command list for launching an IPython kernel with MPI.
+
+ Parameters
+ ----------
+ *args : iterable
+ Additional positional arguments to be passed in `default_make_ipkernel_cmd`.
+ **kwargs : dict
+ Additional keyword arguments to be passed in `default_make_ipkernel_cmd`.
+
Returns
-------
array
+ A Popen command list.
Notes
-----
|
codereview_new_python_data_10705
|
_execute_notebook,
test_dataset_path,
download_taxi_dataset,
- change_kernel,
)
# the kernel name "python3mpi" must match the one
# that is set up in `examples/tutorial/jupyter/execution/pandas_on_unidist/setup_kernel.py`
# for `Unidist` engine
-change_kernel(kernel_name="python3mpi")
local_notebooks_dir = "examples/tutorial/jupyter/execution/pandas_on_unidist/local"
```suggestion
set_kernel,
```
_execute_notebook,
test_dataset_path,
download_taxi_dataset,
+ set_kernel,
)
# the kernel name "python3mpi" must match the one
# that is set up in `examples/tutorial/jupyter/execution/pandas_on_unidist/setup_kernel.py`
# for `Unidist` engine
+set_kernel(kernel_name="python3mpi")
local_notebooks_dir = "examples/tutorial/jupyter/execution/pandas_on_unidist/local"
|
codereview_new_python_data_10706
|
def call_progress_bar(result_parts, line_no):
elif modin_engine == "Unidist":
from unidist import wait
else:
- raise RuntimeError(
f"ProgressBar feature is not supported for {modin_engine} engine."
)
We probably need to enable a test for this engine?
def call_progress_bar(result_parts, line_no):
elif modin_engine == "Unidist":
from unidist import wait
else:
+ raise NotImplementedError(
f"ProgressBar feature is not supported for {modin_engine} engine."
)
|
codereview_new_python_data_10707
|
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
-"""The module for working with displaying progress bars for Modin execution engines."""
import os
import time
@modin-project/modin-core, does anyone remember why the file has the suffix `aqp`?
@noah-kuo, tagging you as the original author.
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+"""
+The module for working with displaying progress bars for Modin execution engines.
+
+Modin Automatic Query Progress (AQP).
+"""
import os
import time
|
codereview_new_python_data_10708
|
def check_partition_column(partition_column, cols):
if k == partition_column:
if v == "int":
return
- raise InvalidPartitionColumn(
- "partition_column must be int, and not {0}".format(v)
- )
raise InvalidPartitionColumn(
- "partition_column {0} not found in the query".format(partition_column)
)
```suggestion
f"partition_column must be int, and not {v}"
```
def check_partition_column(partition_column, cols):
if k == partition_column:
if v == "int":
return
+ raise InvalidPartitionColumn(f"partition_column must be int, and not {v}")
raise InvalidPartitionColumn(
+ f"partition_column {partition_column} not found in the query"
)
|
codereview_new_python_data_10709
|
def test_map(data, na_values):
# Index into list objects
df_equals(
- modin_series_lists.map(lambda list: list[0]),
- pandas_series_lists.map(lambda list: list[0]),
)
```suggestion
modin_series_lists.map(lambda lst: lst[0]),
pandas_series_lists.map(lambda lst: lst[0]),
```
better not shadow builtins; also why change?
def test_map(data, na_values):
# Index into list objects
df_equals(
+ modin_series_lists.map(lambda lst: lst[0]),
+ pandas_series_lists.map(lambda lst: lst[0]),
)
|
codereview_new_python_data_10710
|
def groupby_agg(
how="axis_wise",
drop=False,
):
- # Defaulting to pandas in case of an empty frame
if len(self.columns) == 0 or len(self.index) == 0:
return super().groupby_agg(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, how, drop
we don't default to pandas at the front end in case of a lazy query compiler and so there are still cases where empty data could leak into qc
def groupby_agg(
how="axis_wise",
drop=False,
):
+ # Defaulting to pandas in case of an empty frame as we can't process it properly.
+ # Higher API level won't pass empty data here unless the frame has delayed
+ # computations. So we apparently lose some laziness here (due to index access)
+ # because of the disability to process empty groupby natively.
if len(self.columns) == 0 or len(self.index) == 0:
return super().groupby_agg(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, how, drop
|
codereview_new_python_data_10711
|
def test_index_of_empty_frame():
md_df, pd_df = create_test_dfs(
{}, index=pandas.Index([], name="index name"), columns=["a", "b"]
)
- assert md_df.empty and md_df.empty
- df_equals(md_df.index, md_df.index)
# Test on an empty frame produced by Modin's logic
data = test_data_values[0]
uh? why double-check `md_df.empty`?.. should the second one be `pd_df.empty`?
def test_index_of_empty_frame():
md_df, pd_df = create_test_dfs(
{}, index=pandas.Index([], name="index name"), columns=["a", "b"]
)
+ assert md_df.empty and pd_df.empty
+ df_equals(md_df.index, pd_df.index)
# Test on an empty frame produced by Modin's logic
data = test_data_values[0]
|
codereview_new_python_data_10712
|
def test_index_of_empty_frame():
md_df, pd_df = create_test_dfs(
{}, index=pandas.Index([], name="index name"), columns=["a", "b"]
)
- assert md_df.empty and md_df.empty
- df_equals(md_df.index, md_df.index)
# Test on an empty frame produced by Modin's logic
data = test_data_values[0]
same, should it be comparing `md_df` and `pd_df`?
def test_index_of_empty_frame():
md_df, pd_df = create_test_dfs(
{}, index=pandas.Index([], name="index name"), columns=["a", "b"]
)
+ assert md_df.empty and pd_df.empty
+ df_equals(md_df.index, pd_df.index)
# Test on an empty frame produced by Modin's logic
data = test_data_values[0]
|
codereview_new_python_data_10713
|
from .arr import *
from .math import *
from .constants import *
## 'import *' may pollute namespace
Import pollutes the enclosing namespace, as the imported module [modin.numpy.arr](1) does not define '__all__'.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/470)
+# Licensed to Modin Development Team under one or more contributor license agreements.
+# See the NOTICE file distributed with this work for additional information regarding
+# copyright ownership. The Modin Development Team licenses this file to you under the
+# Apache License, Version 2.0 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under
+# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
+# ANY KIND, either express or implied. See the License for the specific language
+# governing permissions and limitations under the License.
+
from .arr import *
from .math import *
from .constants import *
|
codereview_new_python_data_10714
|
from .arr import *
from .math import *
from .constants import *
## 'import *' may pollute namespace
Import pollutes the enclosing namespace, as the imported module [modin.numpy.math](1) does not define '__all__'.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/471)
+# Licensed to Modin Development Team under one or more contributor license agreements.
+# See the NOTICE file distributed with this work for additional information regarding
+# copyright ownership. The Modin Development Team licenses this file to you under the
+# Apache License, Version 2.0 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under
+# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
+# ANY KIND, either express or implied. See the License for the specific language
+# governing permissions and limitations under the License.
+
from .arr import *
from .math import *
from .constants import *
|
codereview_new_python_data_10715
|
from .arr import *
from .math import *
from .constants import *
## 'import *' may pollute namespace
Import pollutes the enclosing namespace, as the imported module [modin.numpy.constants](1) does not define '__all__'.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/472)
+# Licensed to Modin Development Team under one or more contributor license agreements.
+# See the NOTICE file distributed with this work for additional information regarding
+# copyright ownership. The Modin Development Team licenses this file to you under the
+# Apache License, Version 2.0 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under
+# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
+# ANY KIND, either express or implied. See the License for the specific language
+# governing permissions and limitations under the License.
+
from .arr import *
from .math import *
from .constants import *
|
codereview_new_python_data_10716
|
def to_numpy(
Convert the `BasePandasDataset` to a NumPy array.
"""
from modin.config import ExperimentalNumPyAPI
if ExperimentalNumPyAPI.get():
from ..numpy.arr import array
return array(_query_compiler=self._query_compiler, _ndim=2)
-
return self._query_compiler.to_numpy(
dtype=dtype,
copy=copy,
this should be imported on top of the file, not in the middle of nowhere
def to_numpy(
Convert the `BasePandasDataset` to a NumPy array.
"""
from modin.config import ExperimentalNumPyAPI
+
if ExperimentalNumPyAPI.get():
from ..numpy.arr import array
return array(_query_compiler=self._query_compiler, _ndim=2)
+
return self._query_compiler.to_numpy(
dtype=dtype,
copy=copy,
|
codereview_new_python_data_10717
|
def to_numpy(
Convert the `BasePandasDataset` to a NumPy array.
"""
from modin.config import ExperimentalNumPyAPI
if ExperimentalNumPyAPI.get():
from ..numpy.arr import array
return array(_query_compiler=self._query_compiler, _ndim=2)
-
return self._query_compiler.to_numpy(
dtype=dtype,
copy=copy,
I would say this should be put into `modin/pandas/dataframe.py`, not in its base class; `BaseDataset` should contain only common methods for DF and Series, and this piece differs
def to_numpy(
Convert the `BasePandasDataset` to a NumPy array.
"""
from modin.config import ExperimentalNumPyAPI
+
if ExperimentalNumPyAPI.get():
from ..numpy.arr import array
return array(_query_compiler=self._query_compiler, _ndim=2)
+
return self._query_compiler.to_numpy(
dtype=dtype,
copy=copy,
|
codereview_new_python_data_10718
|
def to_numpy(
Return the NumPy ndarray representing the values in this Series or Index.
"""
from modin.config import ExperimentalNumPyAPI
if not ExperimentalNumPyAPI.get():
return (
super(Series, self)
import should be done on top of the file
def to_numpy(
Return the NumPy ndarray representing the values in this Series or Index.
"""
from modin.config import ExperimentalNumPyAPI
+
if not ExperimentalNumPyAPI.get():
return (
super(Series, self)
|
codereview_new_python_data_10719
|
class TestReadFromPostgres(EnvironmentVariable, type=bool):
varname = "MODIN_TEST_READ_FROM_POSTGRES"
default = False
class ExperimentalNumPyAPI(EnvironmentVariable, type=bool):
"""Set to true to use Modin's experimental NumPy API."""
varname = "MODIN_EXPERIMENTAL_NUMPY_API"
default = False
class ReadSqlEngine(EnvironmentVariable, type=str):
"""Engine to run `read_sql`."""
Requires changes in https://modin.readthedocs.io/en/stable/flow/modin/config.html?
class TestReadFromPostgres(EnvironmentVariable, type=bool):
varname = "MODIN_TEST_READ_FROM_POSTGRES"
default = False
+
class ExperimentalNumPyAPI(EnvironmentVariable, type=bool):
"""Set to true to use Modin's experimental NumPy API."""
varname = "MODIN_EXPERIMENTAL_NUMPY_API"
default = False
+
class ReadSqlEngine(EnvironmentVariable, type=str):
"""Engine to run `read_sql`."""
|
codereview_new_python_data_10720
|
def where(condition, x=None, y=None):
- if condition:
return x
- if not condition:
return y
if hasattr(condition, "where"):
return condition.where(x=x, y=y)
This seems incorrect, since `where` is supposed to check the `condition` array element-wise. Even if the argument here were a modin.numpy array that has the `where` attribute, this check happens too early for that to matter.
```python
>>> import numpy
>>> numpy.where([1, 0], [1, 1], [2, 2])
array([1, 2])
>>> import modin.numpy as np
>>> np.where([1,0], [1,1],[2,2])
[1, 1]
```
def where(condition, x=None, y=None):
+ if condition is True:
return x
+ if condition is False:
return y
if hasattr(condition, "where"):
return condition.where(x=x, y=y)
|
codereview_new_python_data_10721
|
newaxis,
pi,
)
Do we need to define `__all__` here?
newaxis,
pi,
)
+
+__all__ = [
+ "Inf",
+ "Infinity",
+ "NAN",
+ "NINF",
+ "NZERO",
+ "NaN",
+ "PINF",
+ "PZERO",
+ "e",
+ "euler_gamma",
+ "inf",
+ "infty",
+ "nan",
+ "newaxis",
+ "pi",
+]
|
codereview_new_python_data_10722
|
# governing permissions and limitations under the License.
"""Module houses array creation methods for Modin's NumPy API."""
import numpy
from modin.error_message import ErrorMessage
from .arr import array
## Cyclic import
Import of module [modin.numpy.arr](1) begins an import cycle.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/537)
# governing permissions and limitations under the License.
"""Module houses array creation methods for Modin's NumPy API."""
+
import numpy
+
from modin.error_message import ErrorMessage
from .arr import array
|
codereview_new_python_data_10723
|
# governing permissions and limitations under the License.
"""Module houses array shaping methods for Modin's NumPy API."""
import numpy
from modin.error_message import ErrorMessage
## Cyclic import
Import of module [modin.numpy.arr](1) begins an import cycle.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/555)
# governing permissions and limitations under the License.
"""Module houses array shaping methods for Modin's NumPy API."""
+
import numpy
from modin.error_message import ErrorMessage
|
codereview_new_python_data_10724
|
# governing permissions and limitations under the License.
"""Module houses array shaping methods for Modin's NumPy API."""
import numpy
from modin.error_message import ErrorMessage
```suggestion
"""Module houses array shaping methods for Modin's NumPy API."""
import numpy
```
# governing permissions and limitations under the License.
"""Module houses array shaping methods for Modin's NumPy API."""
+
import numpy
from modin.error_message import ErrorMessage
|
codereview_new_python_data_10725
|
# governing permissions and limitations under the License.
"""Module houses array creation methods for Modin's NumPy API."""
import numpy
from modin.error_message import ErrorMessage
from .arr import array
```suggestion
"""Module houses array creation methods for Modin's NumPy API."""
import numpy
from modin.error_message import ErrorMessage
from .arr import array
```
# governing permissions and limitations under the License.
"""Module houses array creation methods for Modin's NumPy API."""
+
import numpy
+
from modin.error_message import ErrorMessage
from .arr import array
|
codereview_new_python_data_10726
|
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses ``array`` class, that is distributed version of ``numpy.array``."""
from math import prod
```suggestion
"""Module houses ``array`` class, that is distributed version of ``numpy.array``."""
```
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+
"""Module houses ``array`` class, that is distributed version of ``numpy.array``."""
from math import prod
|
codereview_new_python_data_10727
|
def initialize_ray(
ray_init_kwargs = {
"num_cpus": CpuCount.get(),
"num_gpus": GpuCount.get(),
- "include_dashboard": True,
"ignore_reinit_error": True,
"object_store_memory": object_store_memory,
"_redis_password": redis_password,
Why? Also, this is not related to the issue.
def initialize_ray(
ray_init_kwargs = {
"num_cpus": CpuCount.get(),
"num_gpus": GpuCount.get(),
+ "include_dashboard": False,
"ignore_reinit_error": True,
"object_store_memory": object_store_memory,
"_redis_password": redis_password,
|
codereview_new_python_data_10728
|
from .partition import PandasOnRayDataframePartition
from .partition_manager import PandasOnRayDataframePartitionManager
-from .virtual_partition import (
- PandasOnRayDataframeVirtualPartition,
- PandasOnRayDataframeColumnPartition,
- PandasOnRayDataframeRowPartition,
-)
__all__ = [
"PandasOnRayDataframePartition",
"PandasOnRayDataframePartitionManager",
- "PandasOnRayDataframeVirtualPartition",
- "PandasOnRayDataframeColumnPartition",
- "PandasOnRayDataframeRowPartition",
]
same as for dask and also not related to the issue
from .partition import PandasOnRayDataframePartition
from .partition_manager import PandasOnRayDataframePartitionManager
__all__ = [
"PandasOnRayDataframePartition",
"PandasOnRayDataframePartitionManager",
]
|
codereview_new_python_data_10729
|
def initialize_ray(
ray_init_kwargs = {
"num_cpus": CpuCount.get(),
"num_gpus": GpuCount.get(),
- "include_dashboard": True,
"ignore_reinit_error": True,
"object_store_memory": object_store_memory,
"_redis_password": redis_password,
```suggestion
"include_dashboard": False,
```
def initialize_ray(
ray_init_kwargs = {
"num_cpus": CpuCount.get(),
"num_gpus": GpuCount.get(),
+ "include_dashboard": False,
"ignore_reinit_error": True,
"object_store_memory": object_store_memory,
"_redis_password": redis_password,
|
codereview_new_python_data_10730
|
# We have to explicitly mock subclass implementations of wait_partitions.
if engine == "Ray":
wait_method = (
- "modin.core.execution.ray.implementations."
- + "pandas_on_ray.partitioning."
+ "PandasOnRayDataframePartitionManager.wait_partitions"
)
elif engine == "Dask":
Let's make this change in a separate PR where we define public interface for ray engine (e.g., expose the partition classes).
# We have to explicitly mock subclass implementations of wait_partitions.
if engine == "Ray":
wait_method = (
+ "modin.core.execution.ray.implementations.pandas_on_ray."
+ + "partitioning.partition_manager."
+ "PandasOnRayDataframePartitionManager.wait_partitions"
)
elif engine == "Dask":
|
codereview_new_python_data_10731
|
if Engine.get() == "Ray":
import ray
- from modin.core.execution.ray.implementations.pandas_on_ray.partitioning import (
PandasOnRayDataframePartition,
)
- from modin.core.execution.ray.implementations.pandas_on_ray.partitioning import (
PandasOnRayDataframeColumnPartition,
PandasOnRayDataframeRowPartition,
)
same for ray
if Engine.get() == "Ray":
import ray
+ from modin.core.execution.ray.implementations.pandas_on_ray.partitioning.partition import (
PandasOnRayDataframePartition,
)
+ from modin.core.execution.ray.implementations.pandas_on_ray.partitioning.virtual_partition import (
PandasOnRayDataframeColumnPartition,
PandasOnRayDataframeRowPartition,
)
|
codereview_new_python_data_10732
|
def fn(
method = kwargs.get("method")
if isinstance(result, pandas.Series):
- result = result.to_frame(method or MODIN_UNNAMED_SERIES_LABEL)
if not as_index:
if isinstance(by, pandas.Series):
this was actually one of those cases where we still were using "0" column name rather than `MODIN_UNNAMED_SERIES_LABEL`
```python
>>> pd.Series([1, 2, 3]).to_frame().columns
RangeIndex(start=0, stop=1, step=1)
```
def fn(
method = kwargs.get("method")
if isinstance(result, pandas.Series):
+ result = result.to_frame(MODIN_UNNAMED_SERIES_LABEL)
if not as_index:
if isinstance(by, pandas.Series):
|
codereview_new_python_data_10733
|
def fn(
method = kwargs.get("method")
if isinstance(result, pandas.Series):
- result = result.to_frame(method or MODIN_UNNAMED_SERIES_LABEL)
if not as_index:
if isinstance(by, pandas.Series):
So `method` can be used as name for `to_frame`. Is it a mistake or not?
def fn(
method = kwargs.get("method")
if isinstance(result, pandas.Series):
+ result = result.to_frame(MODIN_UNNAMED_SERIES_LABEL)
if not as_index:
if isinstance(by, pandas.Series):
|
codereview_new_python_data_10734
|
"TimeReindexMethod",
"TimeFillnaMethodDataframe",
"TimeDropDuplicatesDataframe",
- "TimeSimpleReshape",
"TimeReplace",
# IO benchmarks
"TimeReadCsvSkiprows",
```suggestion
"TimeStack",
"TimeUnstack",
```
"TimeReindexMethod",
"TimeFillnaMethodDataframe",
"TimeDropDuplicatesDataframe",
+ "TimeStack",
+ "TimeUnstack",
"TimeReplace",
# IO benchmarks
"TimeReadCsvSkiprows",
|
codereview_new_python_data_10735
|
def do_relabel(obj_to_relabel):
agg_kwargs=kwargs,
how="axis_wise",
)
- return result if not do_relabel else do_relabel(result)
agg = aggregate
```suggestion
return do_relabel(result) if do_relabel else result
```
positive conditions are read easier
def do_relabel(obj_to_relabel):
agg_kwargs=kwargs,
how="axis_wise",
)
+ return do_relabel(result) if do_relabel else result
agg = aggregate
|
codereview_new_python_data_10736
|
class FactoryDispatcher(object):
@classmethod
def get_factory(cls) -> factories.BaseFactory:
"""Get current factory."""
- Engine.subscribe(cls._update_factory)
- StorageFormat.subscribe(cls._update_factory)
return cls.__factory
@classmethod
Formerly, these were in global scope at the end of this file. I moved them here because we want to subscribe after the user has possibly added extra engines.
class FactoryDispatcher(object):
@classmethod
def get_factory(cls) -> factories.BaseFactory:
"""Get current factory."""
+ if cls.__factory is None:
+ Engine.subscribe(cls._update_factory)
+ StorageFormat.subscribe(cls._update_factory)
return cls.__factory
@classmethod
|
codereview_new_python_data_10737
|
class FactoryDispatcher(object):
@classmethod
def get_factory(cls) -> factories.BaseFactory:
"""Get current factory."""
- Engine.subscribe(cls._update_factory)
- StorageFormat.subscribe(cls._update_factory)
return cls.__factory
@classmethod
This would subscribe multiple times, while originally it was one callback per module import (hence one callback per lifetime); maybe `Parameter.subscribe()` should be modified to only add a callback once? As now it just adds them to a list to maintain a FIFO order.
class FactoryDispatcher(object):
@classmethod
def get_factory(cls) -> factories.BaseFactory:
"""Get current factory."""
+ if cls.__factory is None:
+ Engine.subscribe(cls._update_factory)
+ StorageFormat.subscribe(cls._update_factory)
return cls.__factory
@classmethod
|
codereview_new_python_data_10738
|
def time_timedelta_nanoseconds(self, shape):
execute(self.series.dt.nanoseconds)
-class TimeSetCategories:
-
- params = [get_benchmark_shapes("TimeSetCategories")]
- param_names = ["shape"]
-
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
-class TimeRemoveCategories:
-
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
- def setup(self, shape):
- rows = shape[0]
- arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
- self.ts = IMPL.Series(arr).astype("category")
- execute(self.ts)
-
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
```suggestion
class BaseCategories:
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
class TimeSetCategories(BaseCategories):
params = [get_benchmark_shapes("TimeSetCategories")]
param_names = ["shape"]
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
```
def time_timedelta_nanoseconds(self, shape):
execute(self.series.dt.nanoseconds)
+class BaseCategories:
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
+
+class TimeSetCategories(BaseCategories):
+ params = [get_benchmark_shapes("TimeSetCategories")]
+ param_names = ["shape"]
+
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
+class TimeRemoveCategories(BaseCategories):
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
|
codereview_new_python_data_10739
|
def time_timedelta_nanoseconds(self, shape):
execute(self.series.dt.nanoseconds)
-class TimeSetCategories:
-
- params = [get_benchmark_shapes("TimeSetCategories")]
- param_names = ["shape"]
-
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
-class TimeRemoveCategories:
-
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
- def setup(self, shape):
- rows = shape[0]
- arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
- self.ts = IMPL.Series(arr).astype("category")
- execute(self.ts)
-
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
```suggestion
class TimeRemoveCategories(BaseCategories):
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
```
def time_timedelta_nanoseconds(self, shape):
execute(self.series.dt.nanoseconds)
+class BaseCategories:
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
+
+class TimeSetCategories(BaseCategories):
+ params = [get_benchmark_shapes("TimeSetCategories")]
+ param_names = ["shape"]
+
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
+class TimeRemoveCategories(BaseCategories):
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
|
codereview_new_python_data_10740
|
def time_timedelta_nanoseconds(self, shape):
execute(self.series.dt.nanoseconds)
-class TimeSetCategories:
-
- params = [get_benchmark_shapes("TimeSetCategories")]
- param_names = ["shape"]
-
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
-class TimeRemoveCategories:
-
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
- def setup(self, shape):
- rows = shape[0]
- arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
- self.ts = IMPL.Series(arr).astype("category")
- execute(self.ts)
-
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
In Modin, the same mechanism is used for these functions. It is enough to test at least one of these.
```suggestion
def time_timedelta_seconds(self, shape):
execute(self.series.dt.seconds)
```
def time_timedelta_nanoseconds(self, shape):
execute(self.series.dt.nanoseconds)
+class BaseCategories:
def setup(self, shape):
rows = shape[0]
arr = [f"s{i:04d}" for i in np.random.randint(0, rows // 10, size=rows)]
self.ts = IMPL.Series(arr).astype("category")
execute(self.ts)
+
+class TimeSetCategories(BaseCategories):
+ params = [get_benchmark_shapes("TimeSetCategories")]
+ param_names = ["shape"]
+
def time_set_categories(self, shape):
execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
+class TimeRemoveCategories(BaseCategories):
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
|
codereview_new_python_data_10741
|
def _repartition(self, axis: Optional[int] = None):
DataFrame or Series
The repartitioned dataframe or series, depending on the original type.
"""
- if axis not in (0, 1, None):
- raise NotImplementedError
if StorageFormat.get() == "Hdk":
# Hdk uses only one partition, it makes
Can you pass the error message to the exception to be more informative?
def _repartition(self, axis: Optional[int] = None):
DataFrame or Series
The repartitioned dataframe or series, depending on the original type.
"""
+ allowed_axis_values = (0, 1, None)
+ if axis not in allowed_axis_values:
+ raise NotImplementedError(
+ f"passed `axis` parameter: {axis}, but should be one of {allowed_axis_values}"
+ )
if StorageFormat.get() == "Hdk":
# Hdk uses only one partition, it makes
|
codereview_new_python_data_10742
|
def _repartition(self, axis: Optional[int] = None):
Parameters
----------
- axis : int, optional
Returns
-------
```suggestion
axis : {0, 1}, optional
```
def _repartition(self, axis: Optional[int] = None):
Parameters
----------
+ axis : {0, 1}, optional
Returns
-------
|
codereview_new_python_data_10743
|
def get_indices(cls, axis, partitions, index_func=None):
new_idx = [idx.apply(func) for idx in target[0]] if len(target) else []
new_idx = cls.get_objects_from_partitions(new_idx)
# filter empty indexes
- new_idx = list(filter(lambda idx: len(idx), new_idx))
- # TODO FIX INFORMATION LEAK!!!!1!!1!!
- total_idx = new_idx[0].append(new_idx[1:]) if new_idx else new_idx
return total_idx, new_idx
@classmethod
## Unnecessary lambda
This 'lambda' is just a simple wrapper around a callable object. Use that object directly.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/445)
def get_indices(cls, axis, partitions, index_func=None):
new_idx = [idx.apply(func) for idx in target[0]] if len(target) else []
new_idx = cls.get_objects_from_partitions(new_idx)
# filter empty indexes
+ total_idx = list(filter(len, new_idx))
+ if len(total_idx) > 0:
+ # TODO FIX INFORMATION LEAK!!!!1!!1!!
+ total_idx = total_idx[0].append(total_idx[1:])
return total_idx, new_idx
@classmethod
|
codereview_new_python_data_10744
|
class TimeDropDuplicatesDataframe:
param_names = ["shape"]
def setup(self, shape):
- N = shape[0] // 10
K = 10
- key1 = tm.makeStringIndex(N).values.repeat(K)
- key2 = tm.makeStringIndex(N).values.repeat(K)
- self.df = IMPL.DataFrame(
- {"key1": key1, "key2": key2, "value": np.random.randn(N * K)}
- )
execute(self.df)
def time_drop_dups(self, shape):
`self.df` should have amount of columns equals `shape[1]`
class TimeDropDuplicatesDataframe:
param_names = ["shape"]
def setup(self, shape):
+ rows,cols = shape
+ N = rows // 10
K = 10
+ self.df = IMPL.DataFrame()
+ for col in range(cols-1): # dataframe would have cols-1 keys(strings) and one value(int) column
+ self.df["key"+str(col+1)] = tm.makeStringIndex(N).values.repeat(K)
+ self.df["value"] = np.random.randn(N * K)
execute(self.df)
def time_drop_dups(self, shape):
|
codereview_new_python_data_10745
|
def setup(self, shape):
execute(self.df)
def time_drop_dups(self, shape):
- execute(self.df.drop_duplicates(["key1", "key2"]))
def time_drop_dups_inplace(self, shape):
- self.df.drop_duplicates(["key1", "key2"], inplace=True)
execute(self.df)
Let's perform the operation on all columns:
```suggestion
execute(self.df.drop_duplicates())
```
def setup(self, shape):
execute(self.df)
def time_drop_dups(self, shape):
+ execute(self.df.drop_duplicates(self.df.columns[:-1]))
def time_drop_dups_inplace(self, shape):
+ self.df.drop_duplicates(self.df.columns[:-1], inplace=True)
execute(self.df)
|
codereview_new_python_data_10746
|
def setup(self, shape):
execute(self.df)
def time_drop_dups(self, shape):
- execute(self.df.drop_duplicates(["key1", "key2"]))
def time_drop_dups_inplace(self, shape):
- self.df.drop_duplicates(["key1", "key2"], inplace=True)
execute(self.df)
Same
```suggestion
self.df.drop_duplicates(inplace=True)
```
def setup(self, shape):
execute(self.df)
def time_drop_dups(self, shape):
+ execute(self.df.drop_duplicates(self.df.columns[:-1]))
def time_drop_dups_inplace(self, shape):
+ self.df.drop_duplicates(self.df.columns[:-1], inplace=True)
execute(self.df)
|
codereview_new_python_data_10747
|
def _wrap_aggregation(
DataFrame or Series
Returns the same type as `self._df`.
"""
- if not isinstance(numeric_only, NumericOnly):
- numeric_only = NumericOnly(numeric_only)
agg_args = tuple() if agg_args is None else agg_args
agg_kwargs = dict() if agg_kwargs is None else agg_kwargs
I wonder if we can unconditionally cast to `NumericOnly(numeric_only)`, even if it already is a proper enum...
def _wrap_aggregation(
DataFrame or Series
Returns the same type as `self._df`.
"""
+ numeric_only = NumericOnly(numeric_only)
agg_args = tuple() if agg_args is None else agg_args
agg_kwargs = dict() if agg_kwargs is None else agg_kwargs
|
codereview_new_python_data_10748
|
class BasePandasDataset(ClassLogger):
# but lives in "pandas" namespace.
_pandas_class = pandas.core.generic.NDFrame
- # TODO(https://github.com/modin-project/modin/issues/4821):
- # make this cache_readonly
- @property
def _is_dataframe(self) -> bool:
"""
Tell whether this is a dataframe.
`cache_readonly` is accessible from `pandas.util`, why cannot we just import it and use instead of adding a TODO? It's not specific to any Python version (as I'm _not_ suggesting to use `cached_property` from `functools`, as I find pandas version safer even if a little slower)
class BasePandasDataset(ClassLogger):
# but lives in "pandas" namespace.
_pandas_class = pandas.core.generic.NDFrame
+ @pandas.util.cache_readonly
def _is_dataframe(self) -> bool:
"""
Tell whether this is a dataframe.
|
codereview_new_python_data_10749
|
def broadcast_item(
index_values = obj.index[row_lookup]
if not index_values.equals(item.index):
axes_to_reindex["index"] = index_values
- if need_columns_reindex and isinstance(item, (pandas.DataFrame, DataFrame)):
column_values = obj.columns[col_lookup]
if not column_values.equals(item.columns):
axes_to_reindex["columns"] = column_values
this looks super-weird, should we really support both pandas and Modin versions of `DataFrame` here?
def broadcast_item(
index_values = obj.index[row_lookup]
if not index_values.equals(item.index):
axes_to_reindex["index"] = index_values
+ if need_columns_reindex and isinstance(item, DataFrame):
column_values = obj.columns[col_lookup]
if not column_values.equals(item.columns):
axes_to_reindex["columns"] = column_values
|
codereview_new_python_data_10750
|
def new_col_adder(df, partition_id):
NotImplementedError,
match="Dynamic repartitioning is currently only supported for DataFrames with 1 partition.",
):
- _ = pipeline.compute_batch()
def test_fan_out(self):
"""Check that the fan_out argument is appropriately handled."""
```suggestion
pipeline.compute_batch()
```
Why assign anything at all?
def new_col_adder(df, partition_id):
NotImplementedError,
match="Dynamic repartitioning is currently only supported for DataFrames with 1 partition.",
):
+ pipeline.compute_batch()
def test_fan_out(self):
"""Check that the fan_out argument is appropriately handled."""
|
codereview_new_python_data_10751
|
def test_astype(data):
"data", [["A", "A", "B", "B", "A"], [1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2]]
)
def test_astype_categorical(data):
- modin_df, pandas_df = pd.Series(data), pandas.Series(data)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
```suggestion
modin_df, pandas_df = create_test_series(data)
```
def test_astype(data):
"data", [["A", "A", "B", "B", "A"], [1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2]]
)
def test_astype_categorical(data):
+ modin_df, pandas_df = create_test_series(data)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
|
codereview_new_python_data_10752
|
def initialize_ray(
What password to use when connecting to Redis.
If not specified, ``modin.config.RayRedisPassword`` is used.
"""
extra_init_kw = {"runtime_env": {"env_vars": {"__MODIN_AUTOIMPORT_PANDAS__": "1"}}}
if not ray.is_initialized() or override_is_cluster:
- # TODO(https://github.com/ray-project/ray/issues/28216): remove this
- # workaround once Ray gives a better way to suppress task errors.
- # Ideally we would not set global environment variables.
- # If user has explicitly set _RAY_IGNORE_UNHANDLED_ERRORS_VAR, don't
- # don't override its value.
- if _RAY_IGNORE_UNHANDLED_ERRORS_VAR not in os.environ:
- os.environ[_RAY_IGNORE_UNHANDLED_ERRORS_VAR] = "1"
cluster = override_is_cluster or IsRayCluster.get()
redis_address = override_redis_address or RayRedisAddress.get()
redis_password = (
What if ray is already initialized by the user? Do we want to supress errors in remote tasks in that case?
def initialize_ray(
What password to use when connecting to Redis.
If not specified, ``modin.config.RayRedisPassword`` is used.
"""
+ # TODO(https://github.com/ray-project/ray/issues/28216): remove this
+ # workaround once Ray gives a better way to suppress task errors.
+ # Ideally we would not set global environment variables.
+ # If user has explicitly set _RAY_IGNORE_UNHANDLED_ERRORS_VAR, don't
+ # don't override its value.
+ if _RAY_IGNORE_UNHANDLED_ERRORS_VAR not in os.environ:
+ os.environ[_RAY_IGNORE_UNHANDLED_ERRORS_VAR] = "1"
extra_init_kw = {"runtime_env": {"env_vars": {"__MODIN_AUTOIMPORT_PANDAS__": "1"}}}
if not ray.is_initialized() or override_is_cluster:
cluster = override_is_cluster or IsRayCluster.get()
redis_address = override_redis_address or RayRedisAddress.get()
redis_password = (
|
codereview_new_python_data_10753
|
def execute(
return
partitions = df._query_compiler._modin_frame._partitions.flatten()
if len(partitions) > 0 and hasattr(partitions[0], "wait"):
- all(map(lambda partition: partition.wait() or True, partitions))
return
# compatibility with old Modin versions
I would suggest using either a simple for-loop or a special partition manager's method, I don't see reasons why we should use this complicated `all(map(...))` approach
```suggestion
df._query_compiler._modin_frame._partition_mgr_cls.wait_partitions(partitions)
```
btw, the further code in this function still uses the incorrect logic
def execute(
return
partitions = df._query_compiler._modin_frame._partitions.flatten()
if len(partitions) > 0 and hasattr(partitions[0], "wait"):
+ df._query_compiler._modin_frame._partition_mgr_cls.wait_partitions(partitions)
return
# compatibility with old Modin versions
|
codereview_new_python_data_10754
|
def execute(
df._query_compiler._modin_frame._execute()
return
partitions = df._query_compiler._modin_frame._partitions.flatten()
- if len(partitions) > 0 and hasattr(partitions[0], "wait"):
- df._query_compiler._modin_frame._partition_mgr_cls.wait_partitions(
- partitions
- )
return
# compatibility with old Modin versions
we probably should change the condition then to something like:
```python
if len(partitions) and hasattr(partition_mgr_cls, "wait_partitions"):
```
def execute(
df._query_compiler._modin_frame._execute()
return
partitions = df._query_compiler._modin_frame._partitions.flatten()
+ mgr_cls = df._query_compiler._modin_frame._partition_mgr_cls
+ if len(partitions) and hasattr(mgr_cls, "wait_partitions"):
+ mgr_cls.wait_partitions(partitions)
return
# compatibility with old Modin versions
|
codereview_new_python_data_10755
|
def test_dict(self):
assert mdt == "category"
assert isinstance(mdt, pandas.CategoricalDtype)
assert pandas.api.types.is_categorical_dtype(mdt)
if type(mdt) != pandas.CategoricalDtype:
# This is a lazy proxy.
- # Make sure the table is not materialized after
- # the instance check and comparison with string.
assert mdt._table is not None
assert mdt == pdt
assert pdt == mdt
- assert str(mdt) == str(pdt)
assert repr(mdt) == repr(pdt)
can we put `str` case in the "lazy" section of the test? We would like to print `df.dtypes` that trigger `str(dtype)` for every without materialization
def test_dict(self):
assert mdt == "category"
assert isinstance(mdt, pandas.CategoricalDtype)
assert pandas.api.types.is_categorical_dtype(mdt)
+ assert str(mdt) == str(pdt)
if type(mdt) != pandas.CategoricalDtype:
# This is a lazy proxy.
+ # Make sure the table is not materialized yet.
assert mdt._table is not None
assert mdt == pdt
assert pdt == mdt
assert repr(mdt) == repr(pdt)
|
codereview_new_python_data_10756
|
def test_dict(self):
assert pandas.api.types.is_categorical_dtype(mdt)
assert str(mdt) == str(pdt)
- if type(mdt) != pandas.CategoricalDtype:
- # This is a lazy proxy.
- # Make sure the table is not materialized yet.
- assert mdt._table is not None
assert mdt == pdt
assert pdt == mdt
why do we need this if-conditioning if we intend to test a lazy proxy here?
```suggestion
assert type(mdt) == LazyProxyCategoricalDtype
# This is a lazy proxy.
# Make sure the table is not materialized yet.
assert mdt._table is not None
```
def test_dict(self):
assert pandas.api.types.is_categorical_dtype(mdt)
assert str(mdt) == str(pdt)
+ # Make sure the lazy proxy dtype is not materialized yet.
+ assert type(mdt) != pandas.CategoricalDtype
+ assert mdt._table is not None
assert mdt == pdt
assert pdt == mdt
|
codereview_new_python_data_10757
|
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
-"""Utilities for internal use by te hdk_on_native module."""
import pandas
```suggestion
"""Utilities for internal use by the ``HdkOnNativeDataframe``."""
```
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+"""Utilities for internal use by the ``HdkOnNativeDataframe``."""
import pandas
|
codereview_new_python_data_10758
|
def ml(train_final, test_final):
evals=watchlist,
feval=func_loss,
early_stopping_rounds=10,
- verbose_eval=1000,
)
yp = clf.predict(dvalid)
```suggestion
verbose_eval=None,
```
Decided to leave as is to not issue a lot of warnings which might be kind of confusing to users.
def ml(train_final, test_final):
evals=watchlist,
feval=func_loss,
early_stopping_rounds=10,
+ verbose_eval=None,
)
yp = clf.predict(dvalid)
|
codereview_new_python_data_10759
|
def ml(train_final, test_final):
evals=watchlist,
feval=func_loss,
early_stopping_rounds=10,
- verbose_eval=1000,
)
yp = clf.predict(dvalid)
```suggestion
verbose_eval=None,
```
Decided to leave as is to not issue a lot of warnings which might be kind of confusing to users.
def ml(train_final, test_final):
evals=watchlist,
feval=func_loss,
early_stopping_rounds=10,
+ verbose_eval=None,
)
yp = clf.predict(dvalid)
|
codereview_new_python_data_10760
|
def test_info(data, verbose, max_cols, memory_usage, null_counts):
assert modin_info[1:] == pandas_info[1:]
-def test_info_default_cols():
- # Covers https://github.com/modin-project/modin/issues/5137
- with io.StringIO() as first, io.StringIO() as second:
- data = np.random.randint(0, 100, (10, 10))
- eval_general(
- pd.DataFrame(data),
- pandas.DataFrame(data),
- operation=lambda df, **kwargs: df.info(**kwargs),
- buf=lambda df: second if isinstance(df, pandas.DataFrame) else first,
- )
- modin_info = first.getvalue().splitlines()
- pandas_info = second.getvalue().splitlines()
-
- assert modin_info[0] == str(pd.DataFrame)
- assert pandas_info[0] == str(pandas.DataFrame)
- assert modin_info[1:] == pandas_info[1:]
-
-
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("skipna", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("numeric_only", bool_arg_values, ids=bool_arg_keys)
Should we delete this test case?
def test_info(data, verbose, max_cols, memory_usage, null_counts):
assert modin_info[1:] == pandas_info[1:]
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("skipna", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("numeric_only", bool_arg_values, ids=bool_arg_keys)
|
codereview_new_python_data_10761
|
def test_add_does_not_change_original_series_name():
s2 = pd.Series(2, name=2)
original_s1 = s1.copy(deep=True)
original_s2 = s2.copy(deep=True)
- s1 + s2
df_equals(s1, original_s1)
df_equals(s2, original_s2)
## Statement has no effect
This statement has no effect.
[Show more details](https://github.com/modin-project/modin/security/code-scanning/385)
def test_add_does_not_change_original_series_name():
s2 = pd.Series(2, name=2)
original_s1 = s1.copy(deep=True)
original_s2 = s2.copy(deep=True)
+ _ = s1 + s2
df_equals(s1, original_s1)
df_equals(s2, original_s2)
|
codereview_new_python_data_10762
|
def test_add_does_not_change_original_series_name():
s2 = pd.Series(2, name=2)
original_s1 = s1.copy(deep=True)
original_s2 = s2.copy(deep=True)
- s1 + s2
df_equals(s1, original_s1)
df_equals(s2, original_s2)
Maybe make the change to disable CodeQL warning?
```suggestion
_ = s1 + s2
```
def test_add_does_not_change_original_series_name():
s2 = pd.Series(2, name=2)
original_s1 = s1.copy(deep=True)
original_s2 = s2.copy(deep=True)
+ _ = s1 + s2
df_equals(s1, original_s1)
df_equals(s2, original_s2)
|
codereview_new_python_data_10763
|
def test_add_does_not_change_original_series_name():
s2 = pd.Series(2, name=2)
original_s1 = s1.copy(deep=True)
original_s2 = s2.copy(deep=True)
- s1 + s2
df_equals(s1, original_s1)
df_equals(s2, original_s2)
why not do something like `s1.add(s2)` or something l like that?
def test_add_does_not_change_original_series_name():
s2 = pd.Series(2, name=2)
original_s1 = s1.copy(deep=True)
original_s2 = s2.copy(deep=True)
+ _ = s1 + s2
df_equals(s1, original_s1)
df_equals(s2, original_s2)
|
codereview_new_python_data_10764
|
def test_add_custom_class():
)
def test_non_commutative_multiply():
# This test checks that mul and rmul do different things when
# multiplication is not commutative, e.g. for adding a string to a string.
# For context see https://github.com/modin-project/modin/issues/5238
modin_df, pandas_df = create_test_dfs([1], dtype=int)
integer = NonCommutativeMultiplyInteger(2)
- # It's tricky to get the non commutative integer class implementation
- # right, so before we do the actual test, check that the operation
- # we care about is really not commmutative in pandas.
- assert not (integer * pandas_df).equals(pandas_df * integer)
eval_general(modin_df, pandas_df, lambda s: integer * s)
eval_general(modin_df, pandas_df, lambda s: s * integer)
this should be a separate test that implementation of `NonCommutativeMultiplyInteger` is correct
def test_add_custom_class():
)
+def test_non_commutative_multiply_pandas():
+ # The non commutative integer class implementation is tricky. Check that
+ # multiplying such an integer with a pandas dataframe is really not
+ # commutative.
+ pandas_df = pd.DataFrame([[1]], dtype=int)
+ integer = NonCommutativeMultiplyInteger(2)
+ assert not (integer * pandas_df).equals(pandas_df * integer)
+
+
def test_non_commutative_multiply():
# This test checks that mul and rmul do different things when
# multiplication is not commutative, e.g. for adding a string to a string.
# For context see https://github.com/modin-project/modin/issues/5238
modin_df, pandas_df = create_test_dfs([1], dtype=int)
integer = NonCommutativeMultiplyInteger(2)
eval_general(modin_df, pandas_df, lambda s: integer * s)
eval_general(modin_df, pandas_df, lambda s: s * integer)
|
codereview_new_python_data_10765
|
def test_non_commutative_add_string_to_series(data):
eval_general(*create_test_series(data), lambda s: s + "string")
def test_non_commutative_multiply():
# This test checks that mul and rmul do different things when
# multiplication is not commutative, e.g. for adding a string to a string.
# For context see https://github.com/modin-project/modin/issues/5238
modin_series, pandas_series = create_test_series(1, dtype=int)
integer = NonCommutativeMultiplyInteger(2)
- # It's tricky to get the non commutative integer class implementation
- # right, so before we do the actual test, check that the operation
- # we care about is really not commmutative in pandas.
- assert not (integer * pandas_series).equals(pandas_series * integer)
eval_general(modin_series, pandas_series, lambda s: integer * s)
eval_general(modin_series, pandas_series, lambda s: s * integer)
again, this should be a separate test that checks implementation of non-commutative int
def test_non_commutative_add_string_to_series(data):
eval_general(*create_test_series(data), lambda s: s + "string")
+def test_non_commutative_multiply_pandas():
+ # The non commutative integer class implementation is tricky. Check that
+ # multiplying such an integer with a pandas series is really not
+ # commutative.
+ pandas_series = pd.DataFrame([[1]], dtype=int)
+ integer = NonCommutativeMultiplyInteger(2)
+ assert not (integer * pandas_series).equals(pandas_series * integer)
+
+
def test_non_commutative_multiply():
# This test checks that mul and rmul do different things when
# multiplication is not commutative, e.g. for adding a string to a string.
# For context see https://github.com/modin-project/modin/issues/5238
modin_series, pandas_series = create_test_series(1, dtype=int)
integer = NonCommutativeMultiplyInteger(2)
eval_general(modin_series, pandas_series, lambda s: integer * s)
eval_general(modin_series, pandas_series, lambda s: s * integer)
|
codereview_new_python_data_10766
|
def compare(self, other, **kwargs):
return self.__constructor__(
self._modin_frame.broadcast_apply_full_axis(
0,
- lambda l, r: pandas.DataFrame.compare(l, r, **kwargs),
other._modin_frame,
)
)
```suggestion
lambda l, r: pandas.DataFrame.compare(l, other=r, **kwargs),
```
for clarity?
def compare(self, other, **kwargs):
return self.__constructor__(
self._modin_frame.broadcast_apply_full_axis(
0,
+ lambda l, r: pandas.DataFrame.compare(l, other=r, **kwargs),
other._modin_frame,
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.