id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_13360
def _entropy(self, kappa): The default limits of integration are endpoints of the interval of width ``2*pi`` centered at `loc` (e.g. ``[-pi, pi]`` when ``loc=0``).\n\n""") - def expect(self, func=None, args=(), loc=0, scale=1, lb=-np.pi, ub=np.pi, conditional=False, **kwds): _a, _b = -np.pi, np.pi I think this is what was intended, given the notes above. ```suggestion def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, ``` def _entropy(self, kappa): The default limits of integration are endpoints of the interval of width ``2*pi`` centered at `loc` (e.g. ``[-pi, pi]`` when ``loc=0``).\n\n""") + def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds): _a, _b = -np.pi, np.pi
codereview_new_python_data_13361
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, >>> from scipy.stats import vonmises >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), ... complex_func=True) (-0.18576377217422957+0.40590124735052263j) >>> np.angle(res) # location of the (circular) distribution ```suggestion >>> res (-0.18576377217422957+0.40590124735052263j) ``` def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, >>> from scipy.stats import vonmises >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), ... complex_func=True) + >>> res (-0.18576377217422957+0.40590124735052263j) >>> np.angle(res) # location of the (circular) distribution
codereview_new_python_data_13362
PearsonRResult FitResult OddsRatioResult - Ttest_Result """ __all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', 'PearsonRResult', 'FitResult', 'OddsRatioResult', - 'Ttest_Result'] from ._binomtest import BinomTestResult from ._odds_ratio import OddsRatioResult from ._relative_risk import RelativeRiskResult from ._hypotests import TukeyHSDResult -from ._stats_py import PearsonRResult, Ttest_Result from ._fit import FitResult ```suggestion from ._stats_py import PearsonRResult, TtestResult ``` PearsonRResult FitResult OddsRatioResult + TtestResult """ __all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', 'PearsonRResult', 'FitResult', 'OddsRatioResult', + 'TtestResult'] from ._binomtest import BinomTestResult from ._odds_ratio import OddsRatioResult from ._relative_risk import RelativeRiskResult from ._hypotests import TukeyHSDResult +from ._stats_py import PearsonRResult, TtestResult from ._fit import FitResult
codereview_new_python_data_13363
PearsonRResult FitResult OddsRatioResult - Ttest_Result """ __all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', 'PearsonRResult', 'FitResult', 'OddsRatioResult', - 'Ttest_Result'] from ._binomtest import BinomTestResult from ._odds_ratio import OddsRatioResult from ._relative_risk import RelativeRiskResult from ._hypotests import TukeyHSDResult -from ._stats_py import PearsonRResult, Ttest_Result from ._fit import FitResult ```suggestion 'TtestResult'] ``` PearsonRResult FitResult OddsRatioResult + TtestResult """ __all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', 'PearsonRResult', 'FitResult', 'OddsRatioResult', + 'TtestResult'] from ._binomtest import BinomTestResult from ._odds_ratio import OddsRatioResult from ._relative_risk import RelativeRiskResult from ._hypotests import TukeyHSDResult +from ._stats_py import PearsonRResult, TtestResult from ._fit import FitResult
codereview_new_python_data_13364
PearsonRResult FitResult OddsRatioResult - Ttest_Result """ __all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', 'PearsonRResult', 'FitResult', 'OddsRatioResult', - 'Ttest_Result'] from ._binomtest import BinomTestResult from ._odds_ratio import OddsRatioResult from ._relative_risk import RelativeRiskResult from ._hypotests import TukeyHSDResult -from ._stats_py import PearsonRResult, Ttest_Result from ._fit import FitResult ```suggestion TtestResult ``` PearsonRResult FitResult OddsRatioResult + TtestResult """ __all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult', 'PearsonRResult', 'FitResult', 'OddsRatioResult', + 'TtestResult'] from ._binomtest import BinomTestResult from ._odds_ratio import OddsRatioResult from ._relative_risk import RelativeRiskResult from ._hypotests import TukeyHSDResult +from ._stats_py import PearsonRResult, TtestResult from ._fit import FitResult
codereview_new_python_data_13365
class rv_histogram(rv_continuous): The second containing the (n+1) bin boundaries In particular the return value np.histogram is accepted - .. versionadded:: 1.10.0 - Notes ----- There are no additional shape parameters except for the loc and scale. ```suggestion ``` This was included in the PR gh-16768 that is being backported, but it is unrelated and should not be packported. Otherwise, the stats stuff looks good to me. class rv_histogram(rv_continuous): The second containing the (n+1) bin boundaries In particular the return value np.histogram is accepted Notes ----- There are no additional shape parameters except for the loc and scale.
codereview_new_python_data_13366
def shgo(func, bounds, args=(), constraints=None, n=None, iters=1, ... {'type': 'eq', 'fun': h1}) >>> bounds = [(0, 1.0),]*4 >>> res = shgo(f, bounds, iters=3, constraints=cons) message: Optimization terminated successfully. success: True fun: 29.894378159142136 ```suggestion >>> res message: Optimization terminated successfully. ``` def shgo(func, bounds, args=(), constraints=None, n=None, iters=1, ... {'type': 'eq', 'fun': h1}) >>> bounds = [(0, 1.0),]*4 >>> res = shgo(f, bounds, iters=3, constraints=cons) + >>> res message: Optimization terminated successfully. success: True fun: 29.894378159142136
codereview_new_python_data_13367
def least_squares( is applied), a sparse matrix (csr_matrix preferred for performance) or a `scipy.sparse.linalg.LinearOperator`. bounds : 2-tuple of array_like or `Bounds`, optional - There are two ways to specify bounds for methods 'trf' and 'dogbox': 1. Instance of `Bounds` class 2. Lower and upper bounds on independent variables. Defaults to no ```suggestion There are two ways to specify bounds: ``` Is wrong copy and paste? def least_squares( is applied), a sparse matrix (csr_matrix preferred for performance) or a `scipy.sparse.linalg.LinearOperator`. bounds : 2-tuple of array_like or `Bounds`, optional + There are two ways to specify bounds: 1. Instance of `Bounds` class 2. Lower and upper bounds on independent variables. Defaults to no
codereview_new_python_data_13368
def test_bounds_shape(self): method=self.method) assert_allclose(res.x, [0.0, 0.5], atol=1e-5) - def test_bound_instances(self): res = least_squares(fun_trivial, 0.5, bounds=Bounds()) assert_allclose(res.x, 0.0, atol=1e-4) ```suggestion def test_bounds_instances(self): ``` Very minor point. def test_bounds_shape(self): method=self.method) assert_allclose(res.x, [0.0, 0.5], atol=1e-5) + def test_bounds_instances(self): res = least_squares(fun_trivial, 0.5, bounds=Bounds()) assert_allclose(res.x, 0.0, atol=1e-4)
codereview_new_python_data_13369
class InterpolatedUnivariateSpline(UnivariateSpline): 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox=[x[0], x[-1]]``. k : int, optional - Degree of the smoothing spline. Must be 1 <= `k` <= 5. Default is - `k` = 3, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence. ```suggestion Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is ``` class InterpolatedUnivariateSpline(UnivariateSpline): 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox=[x[0], x[-1]]``. k : int, optional + Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is + ``k = 3`, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence.
codereview_new_python_data_13370
class InterpolatedUnivariateSpline(UnivariateSpline): 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox=[x[0], x[-1]]``. k : int, optional - Degree of the smoothing spline. Must be 1 <= `k` <= 5. Default is - `k` = 3, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence. ```suggestion ``k = 3`, a cubic spline. ``` class InterpolatedUnivariateSpline(UnivariateSpline): 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox=[x[0], x[-1]]``. k : int, optional + Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is + ``k = 3`, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence.
codereview_new_python_data_13371
class InterpolatedUnivariateSpline(UnivariateSpline): None (default), ``bbox=[x[0], x[-1]]``. k : int, optional Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is - ``k = 3`, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence. ```suggestion ``k = 3``, a cubic spline. ``` class InterpolatedUnivariateSpline(UnivariateSpline): None (default), ``bbox=[x[0], x[-1]]``. k : int, optional Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is + ``k = 3``, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence.
codereview_new_python_data_13372
def ppf(self, q, *args, **kwds): cond = cond0 & cond1 output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') # output type 'd' to handle nin and inf - place(output, (q == 0), _a - 1 + loc) place(output, cond2, _b + loc) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) ```suggestion place(output, (q == 0)*(cond == cond), _a-1 + loc) ``` def ppf(self, q, *args, **kwds): cond = cond0 & cond1 output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), _a-1 + loc) place(output, cond2, _b + loc) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,)))
codereview_new_python_data_13373
def design_matrix(cls, x, t, k, extrapolate=False): else: int_dtype = np.int64 # Preallocate indptr and indices - indices = np.zeros(n * (k + 1), dtype=int_dtype) indptr = np.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype) data, indices, indptr = _bspl._make_design_matrix( np.empty to signal the intent to fill in? def design_matrix(cls, x, t, k, extrapolate=False): else: int_dtype = np.int64 # Preallocate indptr and indices + indices = np.empty(n * (k + 1), dtype=int_dtype) indptr = np.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype) data, indices, indptr = _bspl._make_design_matrix(
codereview_new_python_data_13374
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate', df = n - 1 mean = np.mean(a, axis) - d = mean - popmean[..., 0] # popmean is an array because of decorator v = _var(a, axis, ddof=1) denom = np.sqrt(v / n) Hmm the behavior was buggy before (see https://github.com/scipy/scipy/pull/16835/files#r945519181) but we still have a problem with this code: if the user were to pass `popmean` of the same shape as `a`, for instance, then only the 0th element along `axis` would be considered. def ttest_1samp(a, popmean, axis=0, nan_policy='propagate', df = n - 1 mean = np.mean(a, axis) + try: + popmean = np.squeeze(popmean, axis=axis) + except ValueError as e: + raise ValueError("`popmean.shape[axis]` must equal 1.") from e + d = mean - popmean v = _var(a, axis, ddof=1) denom = np.sqrt(v / n)
codereview_new_python_data_13375
def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *, ... 0.104, -0.136, -0.867, 0.012, 0.105]) >>> circstd_1 = circstd(samples_1) >>> circstd_2 = circstd(samples_2) - >>> fig, (left, right) = plt.subplots(ncols=2) Plot the samples. ```suggestion >>> circstd_2 = circstd(samples_2) Plot the samples. ``` def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *, ... 0.104, -0.136, -0.867, 0.012, 0.105]) >>> circstd_1 = circstd(samples_1) >>> circstd_2 = circstd(samples_2) Plot the samples.
codereview_new_python_data_13376
class of similar problems can be solved together. Examples -------- >>> import numpy as np - >>> from scipy import optimize >>> import matplotlib.pyplot as plt >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1 This plt import should be next to the np import class of similar problems can be solved together. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt + >>> from scipy import optimize >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1
codereview_new_python_data_13377
def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): Plot and compare the results of *circmean* and the regular *mean*. - >>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500), - ... np.sin(np.linspace(0, 2*np.pi, 500), ... c='k') >>> plt.scatter(np.cos(angles) , np.sin(angles), c='k') >>> plt.scatter(np.cos(circmean), np.sin(circmean), c='b', ```suggestion >>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500)), ... np.sin(np.linspace(0, 2*np.pi, 500)), ``` def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): Plot and compare the results of *circmean* and the regular *mean*. + >>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), ... c='k') >>> plt.scatter(np.cos(angles) , np.sin(angles), c='k') >>> plt.scatter(np.cos(circmean), np.sin(circmean), c='b',
codereview_new_python_data_13378
def test_infeasible_prob_16609(): _msg_iter = "Iteration limit reached. (HiGHS Status 14:" -@pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow @pytest.mark.parametrize(["options", "msg"], [({"time_limit": 1}, _msg_time), ({"node_limit": 10}, _msg_iter)]) ```suggestion @pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") ``` def test_infeasible_prob_16609(): _msg_iter = "Iteration limit reached. (HiGHS Status 14:" +@pytest.mark.skipif(np.intp(0).itemsize < 8, + reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow @pytest.mark.parametrize(["options", "msg"], [({"time_limit": 1}, _msg_time), ({"node_limit": 10}, _msg_iter)])
codereview_new_python_data_13379
def test_infeasible_prob_16609(): @pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow -@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 1}, _msg_time), - ({"node_limit": 10}, _msg_iter)]) def test_milp_timeout_16545(options, msg): # Ensure solution is not thrown away if MILP solver times out # -- see gh-16545 ```suggestion @pytest.mark.parametrize(["options", "msg"], [({"time_limit": 10}, _msg_time), ({"node_limit": 1}, _msg_iter)]) ``` def test_infeasible_prob_16609(): @pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow +@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 10}, _msg_time), + ({"node_limit": 1}, _msg_iter)]) def test_milp_timeout_16545(options, msg): # Ensure solution is not thrown away if MILP solver times out # -- see gh-16545
codereview_new_python_data_13380
def test_infeasible_prob_16609(): @pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow @pytest.mark.parametrize(["options", "msg"], [({"time_limit": 10}, _msg_time), ({"node_limit": 1}, _msg_iter)]) def test_milp_timeout_16545(options, msg): ```suggestion @pytest.mark.slow @pytest.mark.timeout(360) ``` i added a greater timeout to allow azure to find at least one feasible solution def test_infeasible_prob_16609(): @pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow +@pytest.mark.timeout(360) @pytest.mark.parametrize(["options", "msg"], [({"time_limit": 10}, _msg_time), ({"node_limit": 1}, _msg_iter)]) def test_milp_timeout_16545(options, msg):
codereview_new_python_data_13381
def design_matrix(cls, x, t, k, extrapolate=False): # of knots. if len(t) < 2 * k + 2: raise ValueError(f"Length t is not enough for k={k}.") - if not np.isfinite(t).all(): - raise ValueError("Knots should not have nans or infs.") if extrapolate == 'periodic': # With periodic extrapolation we map x to the segment This check is already done in `_as_float_array` (lines 410-411), so i suggest removing it def design_matrix(cls, x, t, k, extrapolate=False): # of knots. if len(t) < 2 * k + 2: raise ValueError(f"Length t is not enough for k={k}.") if extrapolate == 'periodic': # With periodic extrapolation we map x to the segment
codereview_new_python_data_13382
def skew_d(d): # skewness in terms of delta # MoM won't provide a good guess. Get out early. s = stats.skew(data) s_max = skew_d(1) - if np.abs(s) >= s_max and method == "mle": return super().fit(data, *args, **kwds) # If method is method of moments, we don't need the user's guesses. All of this is just moved up from below. def skew_d(d): # skewness in terms of delta # MoM won't provide a good guess. Get out early. s = stats.skew(data) s_max = skew_d(1) + if abs(s) >= s_max and method == "mle" and fa is None and not args: return super().fit(data, *args, **kwds) # If method is method of moments, we don't need the user's guesses.
codereview_new_python_data_13383
def skew_d(d): # skewness in terms of delta # MoM won't provide a good guess. Get out early. s = stats.skew(data) s_max = skew_d(1) - if np.abs(s) >= s_max and method == "mle": return super().fit(data, *args, **kwds) # If method is method of moments, we don't need the user's guesses. Here's the fix for MLE. def skew_d(d): # skewness in terms of delta # MoM won't provide a good guess. Get out early. s = stats.skew(data) s_max = skew_d(1) + if abs(s) >= s_max and method == "mle" and fa is None and not args: return super().fit(data, *args, **kwds) # If method is method of moments, we don't need the user's guesses.
codereview_new_python_data_13384
def skew_d(d): # skewness in terms of delta # MoM won't provide a good guess. Get out early. s = stats.skew(data) s_max = skew_d(1) - if np.abs(s) >= s_max and method == "mle": return super().fit(data, *args, **kwds) # If method is method of moments, we don't need the user's guesses. ```suggestion if abs(s) >= s_max and method == "mle" and fa is None and not args: ``` def skew_d(d): # skewness in terms of delta # MoM won't provide a good guess. Get out early. s = stats.skew(data) s_max = skew_d(1) + if abs(s) >= s_max and method == "mle" and fa is None and not args: return super().fit(data, *args, **kwds) # If method is method of moments, we don't need the user's guesses.
codereview_new_python_data_13385
def test_incompatible_x_y(self, k): def test_broken_x(self, k): x = [0, 1, 1, 2, 3, 4] # duplicates y = [0, 1, 2, 3, 4, 5] - with assert_raises(ValueError, match="Expect x to not have duplicates"): make_interp_spline(x, y, k=k) x = [0, 2, 1, 3, 4, 5] # unsorted ```suggestion with assert_raises(ValueError, match="x to not have duplicates"): ``` line too long, can just shorten to this def test_incompatible_x_y(self, k): def test_broken_x(self, k): x = [0, 1, 1, 2, 3, 4] # duplicates y = [0, 1, 2, 3, 4, 5] + with assert_raises(ValueError, match="x to not have duplicates"): make_interp_spline(x, y, k=k) x = [0, 2, 1, 3, 4, 5] # unsorted
codereview_new_python_data_13386
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): :math:`\\iint^{\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`: >>> import numpy as np - >>> from scipy import integrate >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) (3.141592653589777, 2.5173086737433208e-08) Please move the import to NumPy to the top and remove the import to integrate as already present. def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): :math:`\\iint^{\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`: >>> import numpy as np >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) (3.141592653589777, 2.5173086737433208e-08)
codereview_new_python_data_13387
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): :math:`\\iint^{\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`: >>> import numpy as np - >>> from scipy import integrate >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) (3.141592653589777, 2.5173086737433208e-08) ```suggestion :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`: ``` def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): :math:`\\iint^{\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`: >>> import numpy as np >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) (3.141592653589777, 2.5173086737433208e-08)
codereview_new_python_data_13388
def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic): message = ("Either `bootstrap_result.bootstrap_distribution.size` or " "`n_resamples` must be positive.") - if n_resamples_int == 0 and (not bootstrap_result or - not bootstrap_result.bootstrap_distribution.size): raise ValueError(message) random_state = check_random_state(random_state) `scipy/stats/_resampling.py:218:13: E128 continuation line under-indented for visual indent` ```suggestion if ((not bootstrap_result or not bootstrap_result.bootstrap_distribution.size) and n_resamples_int == 0): ``` Other suggestions welcome. def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic): message = ("Either `bootstrap_result.bootstrap_distribution.size` or " "`n_resamples` must be positive.") + if ((not bootstrap_result or + not bootstrap_result.bootstrap_distribution.size) + and n_resamples_int == 0): raise ValueError(message) random_state = check_random_state(random_state)
codereview_new_python_data_13389
def test_concatenate(): def test_concatenate_wrong_type(): with pytest.raises(TypeError, match='Rotation objects only'): Rotation.concatenate([Rotation.identity(), 1, None]) - - def test_len_and_bool(): rotation_single = Rotation([0, 0, 0, 1]) rotation_multi = Rotation([[0, 0, 0, 1], [0, 0, 0, 1]]) - with pytest.raises(TypeError, match="Single rotation has no len()."): len(rotation_single) assert len(rotation_multi) == 2 - assert rotation_single assert rotation_multi Could you add here: ``` # Regression test for gh-16663 ``` Also, there's a couple of whitespace-related lint failures to resolve. Then this looks good to go. def test_concatenate(): def test_concatenate_wrong_type(): with pytest.raises(TypeError, match='Rotation objects only'): Rotation.concatenate([Rotation.identity(), 1, None]) + + +# Regression test for gh-16663 def test_len_and_bool(): rotation_single = Rotation([0, 0, 0, 1]) rotation_multi = Rotation([[0, 0, 0, 1], [0, 0, 0, 1]]) + with pytest.raises(TypeError, match="Single rotation has no len()."): len(rotation_single) assert len(rotation_multi) == 2 + assert rotation_single assert rotation_multi
codereview_new_python_data_13390
def _compute_covariance(self): self._data_covariance[::-1, ::-1]).T[::-1, ::-1] self.covariance = self._data_covariance * self.factor**2 - self.cho_cov = self._data_cho_cov * self.factor self.log_det = 2*np.log(np.diag(self.cho_cov * np.sqrt(2*pi))).sum() ```suggestion self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64) ``` def _compute_covariance(self): self._data_covariance[::-1, ::-1]).T[::-1, ::-1] self.covariance = self._data_covariance * self.factor**2 + self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64) self.log_det = 2*np.log(np.diag(self.cho_cov * np.sqrt(2*pi))).sum()
codereview_new_python_data_13391
def test_gaussian_kde_subclassing(): assert_array_almost_equal_nulp(ys, y2, nulp=10) # subclass 3 was removed because we have no obligation to maintain support - # for manual invocation of private methods # subclass 4 kde4 = _kde_subclass4(x1) ```suggestion # for user invocation of private methods ``` def test_gaussian_kde_subclassing(): assert_array_almost_equal_nulp(ys, y2, nulp=10) # subclass 3 was removed because we have no obligation to maintain support + # for user invocation of private methods # subclass 4 kde4 = _kde_subclass4(x1)
codereview_new_python_data_13392
def _compute_covariance(self): @property def inv_cov(self): self.factor = self.covariance_factor() self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, bias=False, aweights=self.weights)) ```suggestion def inv_cov(self): # Re-compute from scratch each time because I'm not sure how this is # used in the wild. (Perhaps users change the `dataset`, since it's # not a private attribute?) `_compute_covariance` used to recalculate # all these, so we'll recalculate everything now that this is a # a property. ``` def _compute_covariance(self): @property def inv_cov(self): + # Re-compute from scratch each time because I'm not sure how this is + # used in the wild. (Perhaps users change the `dataset`, since it's + # not a private attribute?) `_compute_covariance` used to recalculate + # all these, so we'll recalculate everything now that this is a + # a property. self.factor = self.covariance_factor() self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, bias=False, aweights=self.weights))
codereview_new_python_data_13393
def test_svd_LM_ones_matrix(self, shape, dtype): # Check some generic properties of svd. if (self.solver == 'arpack' and dtype is complex): - pytest.skip("ARPACK has additional restriction for complex dtype") _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False) # Check that the largest singular value is near sqrt(n*m) ```suggestion pytest.skip("The ARPACK-based svds does not reliably produce " "orthogonal vectors in VH when there are repeated " "singular values") ``` def test_svd_LM_ones_matrix(self, shape, dtype): # Check some generic properties of svd. if (self.solver == 'arpack' and dtype is complex): + pytest.skip("The ARPACK-based svds does not reliably produce " + "orthogonal vectors in VH when there are repeated " + "singular values") _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False) # Check that the largest singular value is near sqrt(n*m)
codereview_new_python_data_13394
def test_sg_filter_valid_window_length_3d(): savgol_filter(x, window_length=29, polyorder=3, mode='interp') - with pytest.raises(ValueError, match='window_length must be less than'): # window_length is more than x.shape[-1]. savgol_filter(x, window_length=31, polyorder=3, mode='interp') ```suggestion with pytest.raises(ValueError, match='window_length must be less than'): ``` def test_sg_filter_valid_window_length_3d(): savgol_filter(x, window_length=29, polyorder=3, mode='interp') + with pytest.raises(ValueError, match='window_length must be less than'): # window_length is more than x.shape[-1]. savgol_filter(x, window_length=31, polyorder=3, mode='interp')
codereview_new_python_data_13395
def anderson_ksamp(samples, midrank=True): distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate - p-value of 5.0%. >>> res = stats.anderson_ksamp([rng.normal(size=50), ... rng.normal(size=30), rng.normal(size=20)]) Thanks for updating this. def anderson_ksamp(samples, midrank=True): distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate + p-value of 4.99%. >>> res = stats.anderson_ksamp([rng.normal(size=50), ... rng.normal(size=30), rng.normal(size=20)])
codereview_new_python_data_13396
def test_pvalue_literature(self): assert_allclose(pvalue, 1/1001) @pytest.mark.slow - @pytest.mark.parametrize("is_twosamp", [True, False]) - def test_alias(self, is_twosamp): - x = np.arange(100) - y = np.arange(100) - res = stats.multiscale_graphcorr(x, y, is_twosamp=is_twosamp) assert_equal(res.stat, res.statistic) Do we need a new test or can we tack this on to and existing test? def test_pvalue_literature(self): assert_allclose(pvalue, 1/1001) @pytest.mark.slow + def test_alias(self): + np.random.seed(12345678) + + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type="linear") + + res = stats.multiscale_graphcorr(x, y, random_state=1) assert_equal(res.stat, res.statistic)
codereview_new_python_data_13397
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. - xdata : array_like or object The independent variable where the data is measured. Should usually be an M-length sequence or an (k,M)-shaped array for functions with k predictors, and each element should be float Agreed that "any object" is far too broad. I wonder if we should also remove the `or object` in the type info after `xdata` above as well? I don't think we normally use type info that broad in docstrings. def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. + xdata : array_like The independent variable where the data is measured. Should usually be an M-length sequence or an (k,M)-shaped array for functions with k predictors, and each element should be float
codereview_new_python_data_14677
def testConvertTimeValueFromSparkToH2OAndBack(spark, hc, timeZone, sparkType): df = spark.createDataFrame(data, ['strings']).select(col('strings').cast(sparkType).alias('time')) hf = hc.asH2OFrame(df) - hfResultString = hf.__unicode__() - hfParsedItems = hfResultString.split('\n')[1:5] hfParsedItems.sort() assert hfParsedItems == expected I'm not sure the code is expressive enough - the specified upper bound seems weird to me. I would probably wrote something like: `# skip column name` `hfParsedItems = hfResultString.split('\n')[1:]` def testConvertTimeValueFromSparkToH2OAndBack(spark, hc, timeZone, sparkType): df = spark.createDataFrame(data, ['strings']).select(col('strings').cast(sparkType).alias('time')) hf = hc.asH2OFrame(df) + hfResultString = hf.to_str() + hfParsedItems = hfResultString.split('\n')[1:-2] hfParsedItems.sort() assert hfParsedItems == expected
codereview_new_python_data_14678
def testConvertTimeValueFromSparkToH2OAndBack(spark, hc, timeZone, sparkType): df = spark.createDataFrame(data, ['strings']).select(col('strings').cast(sparkType).alias('time')) hf = hc.asH2OFrame(df) - hfResultString = hf.__unicode__() - hfParsedItems = hfResultString.split('\n')[1:5] hfParsedItems.sort() assert hfParsedItems == expected why do you use `__unicode__()` here? This is a remnant from Py2.7 and is obsolete. Is SW also supporting Py2.7? FYI, `hf.to_str()` should now provide a correct unicode representation in all versions of Py def testConvertTimeValueFromSparkToH2OAndBack(spark, hc, timeZone, sparkType): df = spark.createDataFrame(data, ['strings']).select(col('strings').cast(sparkType).alias('time')) hf = hc.asH2OFrame(df) + hfResultString = hf.to_str() + hfParsedItems = hfResultString.split('\n')[1:-2] hfParsedItems.sort() assert hfParsedItems == expected
codereview_new_python_data_14680
def __add_url_to_classloader(gateway, url): jvm = gateway.jvm loader = jvm.Thread.currentThread().getContextClassLoader() logger = Initializer.__get_logger(jvm) while loader: try: - urlClassLoaderClass = jvm.py4j.reflection.ReflectionUtil.classForName("java.net.URLClassLoader") classClass = gateway.jvm.Class classArray = gateway.new_array(classClass, 1) classArray[0] = url.getClass() Why do we have this in the while-loop? the instance of urlClassLoaderClass will not change in the loop def __add_url_to_classloader(gateway, url): jvm = gateway.jvm loader = jvm.Thread.currentThread().getContextClassLoader() logger = Initializer.__get_logger(jvm) + urlClassLoaderClass = jvm.py4j.reflection.ReflectionUtil.classForName("java.net.URLClassLoader") while loader: try: classClass = gateway.jvm.Class classArray = gateway.new_array(classClass, 1) classArray[0] = url.getClass()
codereview_new_python_data_14681
def __add_url_to_classloader(gateway, url): jvm = gateway.jvm loader = jvm.Thread.currentThread().getContextClassLoader() logger = Initializer.__get_logger(jvm) while loader: try: - urlClassLoaderClass = jvm.py4j.reflection.ReflectionUtil.classForName("java.net.URLClassLoader") classClass = gateway.jvm.Class classArray = gateway.new_array(classClass, 1) classArray[0] = url.getClass() is it possible this will actually load the class using a different classloader resulting in a different instance of the class object? def __add_url_to_classloader(gateway, url): jvm = gateway.jvm loader = jvm.Thread.currentThread().getContextClassLoader() logger = Initializer.__get_logger(jvm) + urlClassLoaderClass = jvm.py4j.reflection.ReflectionUtil.classForName("java.net.URLClassLoader") while loader: try: classClass = gateway.jvm.Class classArray = gateway.new_array(classClass, 1) classArray[0] = url.getClass()
codereview_new_python_data_14682
def __add_url_to_classloader(gateway, url): jvm = gateway.jvm loader = jvm.Thread.currentThread().getContextClassLoader() logger = Initializer.__get_logger(jvm) while loader: try: - urlClassLoaderClass = jvm.py4j.reflection.ReflectionUtil.classForName("java.net.URLClassLoader") classClass = gateway.jvm.Class classArray = gateway.new_array(classClass, 1) classArray[0] = url.getClass() if addURL was public we could just use `loader.getMethod`, correct? is this why we need to look up the method on URLClassLoader directly? def __add_url_to_classloader(gateway, url): jvm = gateway.jvm loader = jvm.Thread.currentThread().getContextClassLoader() logger = Initializer.__get_logger(jvm) + urlClassLoaderClass = jvm.py4j.reflection.ReflectionUtil.classForName("java.net.URLClassLoader") while loader: try: classClass = gateway.jvm.Class classArray = gateway.new_array(classClass, 1) classArray[0] = url.getClass()
codereview_new_python_data_14685
def test_h2o_mojo_pipeline_contributions(spark): mojo = H2OMOJOPipelineModel.createFromMojo(mojo_path, settings) df = spark.read.csv(data_path, header=True, inferSchema=True) - predictions_and_contributions = mojo.transform(df).select("prediction.*") feature_columns = 1 prediction_columns = 4 bias = 1 contribution_columns = prediction_columns * (feature_columns + bias) - assert prediction_columns + contribution_columns == len(predictions_and_contributions.columns) - - contributions = [c for c in predictions_and_contributions.columns if c.startswith("contrib_")] - assert contribution_columns == len(contributions) \ No newline at end of file nit: missing new line def test_h2o_mojo_pipeline_contributions(spark): mojo = H2OMOJOPipelineModel.createFromMojo(mojo_path, settings) df = spark.read.csv(data_path, header=True, inferSchema=True) + contributions = mojo.transform(df).select("contribution.*") feature_columns = 1 prediction_columns = 4 bias = 1 contribution_columns = prediction_columns * (feature_columns + bias) \ No newline at end of file + assert contribution_columns == len(contributions.columns) + assert all(c.startswith("contrib_") for c in contributions.columns)
codereview_new_python_data_14743
def test_is_start_event(self): self.assertEqual(17, len(starts)) def test_is_end_event(self): - starts = [e for e in LOG if log_utils.is_end_event(e)] - self.assertEqual(17, len(starts)) def test_filter_and_sort_log_entries(self): filtered = log_utils.filter_and_sort_log_entries(LOG) Nit: `ends` def test_is_start_event(self): self.assertEqual(17, len(starts)) def test_is_end_event(self): + ends = [e for e in LOG if log_utils.is_end_event(e)] + self.assertEqual(17, len(ends)) def test_filter_and_sort_log_entries(self): filtered = log_utils.filter_and_sort_log_entries(LOG)
codereview_new_python_data_14744
def build_windows_from_events(backpressure_events, window_width_in_hours=1): """ - Generate histogram-friendly time windows with counts of backpressuring durations within each window. :param backpressure_events: a list of BackpressureEvents to be broken up into time windows :param window_width_in_hours: how wide each time window should be in hours I think what what's in this PR is correct, but it's not what I would think of as a histogram. I imagine that would be something like binning the backpressure durations to get a sense of how long they are when they happen (and is there anything interesting in the duration distribution). Is there a different way I should be thinking about this? def build_windows_from_events(backpressure_events, window_width_in_hours=1): """ + Generate barchart-friendly time windows with counts of backpressuring durations within each window. :param backpressure_events: a list of BackpressureEvents to be broken up into time windows :param window_width_in_hours: how wide each time window should be in hours