diff --git a/doc/whats_new/v0.24.rst b/doc/whats_new/v0.24.rst index 60bf06cef8d02..74684c2416bed 100644 --- a/doc/whats_new/v0.24.rst +++ b/doc/whats_new/v0.24.rst @@ -76,6 +76,15 @@ Changelog redundant with the `dictionary` attribute and constructor parameter. :pr:`17679` by :user:`Xavier Dupré `. +:mod:`sklearn.dummy` +.................... + +- |Enhancement| Add a parameter `interpolation` to + :class:`dummy.DummyRegressor` to choose the type of interpolation with the + strategy `median` and `quantile`. Beware that the interpolation will always + be `'linear'` with and without `sample_weight` in the future. + :pr:`17775` by :user:`Guillaume Lemaitre `. + :mod:`sklearn.ensemble` ....................... @@ -248,6 +257,15 @@ Changelog :meth:`tree.DecisionTreeRegressor.fit`, and has not effect. :pr:`17614` by :user:`Juan Carlos Alfaro Jiménez `. +:mod:`sklearn.utils` +.................... + +- |Enhancement| :func:`sklearn.utils.stats._weighted_percentile` takes a new + parameter `interpolation` allowing to choose how to interpolate the + percentile value when it lies between two data points. + :pr:`17768` by :user:`Guillaume Lemaitre ` and + :user:`Michael Recachinas `. + Code and Documentation Contributors ----------------------------------- diff --git a/setup.cfg b/setup.cfg index f086993b26a29..95e4417b816e1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,7 @@ addopts = --ignore examples --ignore maint_tools --doctest-modules - --disable-pytest-warnings + # --disable-pytest-warnings -rxXs filterwarnings = diff --git a/sklearn/dummy.py b/sklearn/dummy.py index cee7294ab5afd..912afba5f364b 100644 --- a/sklearn/dummy.py +++ b/sklearn/dummy.py @@ -410,22 +410,42 @@ class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): * "constant": always predicts a constant value that is provided by the user. - constant : int or float or array-like of shape (n_outputs,) + constant : int or float or array-like of shape (n_outputs,), default=None The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. - quantile : float in [0.0, 1.0] + quantile : float, default=None The quantile to predict using the "quantile" strategy. A quantile of 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the maximum. + interpolation : {"linear", "lower", "higher", "nearest"}, default=None + When `strategy="median"` or `strategy="quantile"`, this parameter is + the interpolation method to use when the desired median or quantile + lies between data points `i` and `j`: + + * `"linear"`: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`; + * `"lower"`: i`; + * `"higher"`: `j`; + * `"nearest"`: `i` or `j`, whichever is nearest. + + By default, if `sample_weight` is `None`, `interpolation="linear"`, + otherwise `interpolation="nearest"`. + + .. versionadded: 0.24 + + .. versionchanged:: 0.24 + `interpolation` will be `"linear"` whether the regressor is fitted + with or without `sample_weight` from 0.26. + Attributes ---------- - constant_ : array, shape (1, n_outputs) + constant_ : array of shape (1, n_outputs) Mean or median or quantile of the training targets or constant value given by the user. - n_outputs_ : int, + n_outputs_ : int Number of outputs. Examples @@ -443,10 +463,12 @@ class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): 0.0 """ @_deprecate_positional_args - def __init__(self, *, strategy="mean", constant=None, quantile=None): + def __init__(self, *, strategy="mean", constant=None, quantile=None, + interpolation=None): self.strategy = strategy self.constant = constant self.quantile = quantile + self.interpolation = interpolation def fit(self, X, y, sample_weight=None): """Fit the random regressor. @@ -485,29 +507,63 @@ def fit(self, X, y, sample_weight=None): if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) + # FIXME: change the default interpolation to "linear" in 0.26 + if self.strategy in ("median", "quantile"): + if sample_weight is not None: + if self.interpolation is None: + warnings.warn( + "From 0.26 and onward, interpolation will be 'linear' " + "by default when fitting with some sample weights. You" + " can force `interpolation='linear'` to get the new " + "behaviour and silence this warning.", + FutureWarning + ) + interpolation = "nearest" + else: + interpolation = self.interpolation + else: + interpolation = ( + "linear" if self.interpolation is None + else self.interpolation + ) + if self.strategy == "mean": self.constant_ = np.average(y, axis=0, weights=sample_weight) elif self.strategy == "median": if sample_weight is None: - self.constant_ = np.median(y, axis=0) + self.constant_ = np.percentile( + y, q=50.0, axis=0, interpolation=interpolation, + ) else: - self.constant_ = [_weighted_percentile(y[:, k], sample_weight, - percentile=50.) - for k in range(self.n_outputs_)] + self.constant_ = [ + _weighted_percentile( + y[:, k], sample_weight, percentile=50., + interpolation=interpolation, + ) + for k in range(self.n_outputs_) + ] elif self.strategy == "quantile": if self.quantile is None or not np.isscalar(self.quantile): - raise ValueError("Quantile must be a scalar in the range " - "[0.0, 1.0], but got %s." % self.quantile) + raise ValueError( + f"Quantile must be a scalar in the range [0.0, 1.0], " + f"but got {self.quantile}." + ) percentile = self.quantile * 100.0 if sample_weight is None: - self.constant_ = np.percentile(y, axis=0, q=percentile) + self.constant_ = np.percentile( + y, q=percentile, axis=0, interpolation=interpolation, + ) else: - self.constant_ = [_weighted_percentile(y[:, k], sample_weight, - percentile=percentile) - for k in range(self.n_outputs_)] + self.constant_ = [ + _weighted_percentile( + y[:, k], sample_weight, percentile=percentile, + interpolation=interpolation, + ) + for k in range(self.n_outputs_) + ] elif self.strategy == "constant": if self.constant is None: diff --git a/sklearn/ensemble/tests/test_gradient_boosting.py b/sklearn/ensemble/tests/test_gradient_boosting.py index fc4bf59671ef0..f098649464197 100644 --- a/sklearn/ensemble/tests/test_gradient_boosting.py +++ b/sklearn/ensemble/tests/test_gradient_boosting.py @@ -229,7 +229,7 @@ def check_regression_dataset(loss, subsample): y_pred = reg.predict(X_reg) mse = mean_squared_error(y_reg, y_pred) - assert mse < 0.04 + assert mse < 0.05 if last_y_pred is not None: # FIXME: We temporarily bypass this test. This is due to the fact diff --git a/sklearn/tests/test_dummy.py b/sklearn/tests/test_dummy.py index 280ade175bc4a..868b15277f30d 100644 --- a/sklearn/tests/test_dummy.py +++ b/sklearn/tests/test_dummy.py @@ -664,12 +664,20 @@ def test_dummy_regressor_sample_weight(n_samples=10): est = DummyRegressor(strategy="mean").fit(X, y, sample_weight) assert est.constant_ == np.average(y, weights=sample_weight) - est = DummyRegressor(strategy="median").fit(X, y, sample_weight) - assert est.constant_ == _weighted_percentile(y, sample_weight, 50.) - - est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y, - sample_weight) - assert est.constant_ == _weighted_percentile(y, sample_weight, 95.) + interpolation = "linear" + est = DummyRegressor(strategy="median", interpolation=interpolation) + est.fit(X, y, sample_weight) + assert est.constant_ == _weighted_percentile( + y, sample_weight, 50., interpolation=interpolation, + ) + + est = DummyRegressor( + strategy="quantile", quantile=.95, interpolation=interpolation, + ) + est.fit(X, y, sample_weight) + assert est.constant_ == _weighted_percentile( + y, sample_weight, 95., interpolation=interpolation, + ) def test_dummy_regressor_on_3D_array(): @@ -764,3 +772,64 @@ def test_n_features_in_(Dummy): assert not hasattr(d, 'n_features_in_') d.fit(X, y) assert d.n_features_in_ is None + + +@pytest.mark.filterwarnings("ignore:From 0.26 and onward, interpolation will") +@pytest.mark.parametrize( + "strategy, quantile", [("median", 0.5), ("quantile", 0.9)] +) +def test_dummy_regressor_default_legacy_behaviour(strategy, quantile): + # DummyRegressor will interpolate the following manner: + # * 'linear' if we are using np.median and np.percentile which is the case + # when `sample_weight` is None. + # * 'nearest' if we are using `_weighted_percentile` which is the case + # when `sample_weight` is not None. + + rng = np.random.RandomState(seed=1) + + n_samples = 100 + X = [[0]] * n_samples + y = rng.rand(n_samples) + sample_weight = rng.rand(n_samples) + + params = {"strategy": strategy, "quantile": quantile} + regressor = DummyRegressor(**params) + percentile = quantile * 100 + + regressor.fit(X, y) + assert regressor.constant_ == pytest.approx( + np.percentile(y, q=percentile, axis=0) + ) + + regressor.fit(X, y, sample_weight=sample_weight) + assert regressor.constant_ == pytest.approx( + _weighted_percentile( + y, sample_weight, percentile=percentile, interpolation="nearest", + ) + ) + + +@pytest.mark.parametrize( + "strategy, quantile", [("median", 0.5), ("quantile", 0.9)] +) +@pytest.mark.parametrize( + "interpolation, WarningType, expected_n_warnings", + [(None, FutureWarning, 1), ("linear", None, 0)] +) +def test_dummy_regressort_future_warning_interpolation( + strategy, quantile, interpolation, WarningType, expected_n_warnings, +): + rng = np.random.RandomState(seed=1) + + n_samples = 100 + X = [[0]] * n_samples + y = rng.rand(n_samples) + sample_weight = rng.rand(n_samples) + + regressor = DummyRegressor( + strategy=strategy, quantile=quantile, interpolation=interpolation, + ) + + with pytest.warns(WarningType) as record: + regressor.fit(X, y, sample_weight=sample_weight) + assert len(record) == expected_n_warnings diff --git a/sklearn/utils/stats.py b/sklearn/utils/stats.py index 7b44575e97b33..bea2b52341daf 100644 --- a/sklearn/utils/stats.py +++ b/sklearn/utils/stats.py @@ -1,10 +1,13 @@ +from collections.abc import Iterable + import numpy as np from .extmath import stable_cumsum from .fixes import _take_along_axis -def _weighted_percentile(array, sample_weight, percentile=50): +def _weighted_percentile(array, sample_weight, percentile=50, + interpolation="nearest"): """Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the @@ -15,47 +18,143 @@ def _weighted_percentile(array, sample_weight, percentile=50): Parameters ---------- - array : 1D or 2D array + array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. - sample_weight: 1D or 2D array + sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. - percentile: int, default=50 + percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. + interpolation : {"linear", "lower", "higher", "nearest"}, default="lower" + The interpolation method to use when the percentile lies between + data points `i` and `j`: + + * `"linear"`: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`; + * `"lower"`: i`; + * `"higher"`: `j`; + * `"nearest"`: `i` or `j`, whichever is nearest (default). + + .. versionadded: 0.24 + Returns ------- - percentile : int if `array` 1D, ndarray if `array` 2D + percentile_value : float or int if `array` of shape (n,), otherwise\ + ndarray of shape (m,) Weighted percentile. """ + possible_interpolation = ("linear", "lower", "higher", "nearest") + if interpolation not in possible_interpolation: + raise ValueError( + f"'interpolation' should be one of " + f"{', '.join(possible_interpolation)}. Got '{interpolation}' " + f"instead." + ) + + if np.any(np.count_nonzero(sample_weight, axis=0) < 1): + raise ValueError( + "All weights cannot be null when computing a weighted percentile." + ) + n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) - # When sample_weight 1D, repeat for each array.shape[1] + if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): + # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T + + n_rows, n_cols = array.shape + sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) + percentile = np.array([percentile / 100] * n_cols) + cum_weigths = stable_cumsum(sorted_weights, axis=0) + + def _squeeze_arr(arr, n_dim): + return arr[0] if n_dim == 1 else arr + + # Percentile can be computed with 3 different alternative: + # https://en.wikipedia.org/wiki/Percentile + # These 3 alternatives depend of the value of a parameter C. NumPy uses + # the variant where C=0 which allows to obtained a strictly monotically + # increasing function which is defined as: + # P = (x - 1) / (N - 1); x in [1, N] + # Weighted percentile change this formula by taking into account the + # weights instead of the data frequency. + # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being + # the sum of the weights. + adjusted_percentile = (cum_weigths - sorted_weights) + with np.errstate(invalid="ignore"): + adjusted_percentile /= cum_weigths[-1] - sorted_weights + nan_mask = np.isnan(adjusted_percentile) + adjusted_percentile[nan_mask] = 1 + + if interpolation in ("lower", "higher", "nearest"): + percentile_idx = np.array([ + np.searchsorted(adjusted_percentile[:, col], percentile[col], + side="left") + for col in range(n_cols) + ]) + + if interpolation == "lower" and np.all(percentile < 1): + # P = 100 is a corner case for "lower" + percentile_idx -= 1 + elif interpolation == "nearest" and np.all(percentile < 1): + for col in range(n_cols): + error_higher = abs( + adjusted_percentile[percentile_idx[col], col] - + percentile[col] + ) + error_lower = abs( + adjusted_percentile[percentile_idx[col] - 1, col] - + percentile[col] + ) + if error_higher >= error_lower: + percentile_idx[col] -= 1 + + percentile_idx = np.apply_along_axis( + lambda x: np.clip(x, 0, n_rows - 1), axis=0, + arr=percentile_idx + ) + + percentile_value = array[ + sorted_idx[percentile_idx, np.arange(n_cols)], + np.arange(n_cols) + ] + percentile_value = _squeeze_arr(percentile_value, n_dim) + + else: # interpolation == "linear" + percentile_value = np.array([ + np.interp( + x=percentile[col], + xp=adjusted_percentile[:, col], + fp=array[sorted_idx[:, col], col], + ) + for col in range(n_cols) + ]) + + percentile_value = _squeeze_arr(percentile_value, n_dim) + + single_sample_weight = np.count_nonzero(sample_weight, axis=0) + if np.any(single_sample_weight == 1): + # edge case where a single weight is non-null in which case the + # previous methods will fail + if not isinstance(percentile_value, Iterable): + percentile_value = _squeeze_arr( + array[np.nonzero(sample_weight)], n_dim + ) + else: + percentile_value = np.array([ + array[np.flatnonzero(sample_weight[:, col])[0], col] + if n_nonzero == 1 else percentile_value[col] + for col, n_nonzero in enumerate(single_sample_weight) + ]) - # Find index of median prediction for each sample - weight_cdf = stable_cumsum(sorted_weights, axis=0) - adjusted_percentile = percentile / 100 * weight_cdf[-1] - percentile_idx = np.array([ - np.searchsorted(weight_cdf[:, i], adjusted_percentile[i]) - for i in range(weight_cdf.shape[1]) - ]) - percentile_idx = np.array(percentile_idx) - # In rare cases, percentile_idx equals to sorted_idx.shape[0] - max_idx = sorted_idx.shape[0] - 1 - percentile_idx = np.apply_along_axis(lambda x: np.clip(x, 0, max_idx), - axis=0, arr=percentile_idx) - - col_index = np.arange(array.shape[1]) - percentile_in_sorted = sorted_idx[percentile_idx, col_index] - percentile = array[percentile_in_sorted, col_index] - return percentile[0] if n_dim == 1 else percentile + return percentile_value diff --git a/sklearn/utils/tests/test_stats.py b/sklearn/utils/tests/test_stats.py index fe0d267393db0..bf999098e372d 100644 --- a/sklearn/utils/tests/test_stats.py +++ b/sklearn/utils/tests/test_stats.py @@ -1,11 +1,15 @@ import numpy as np from numpy.testing import assert_allclose -from pytest import approx +import pytest from sklearn.utils.stats import _weighted_percentile -def test_weighted_percentile(): +@pytest.mark.parametrize( + "interpolation, expected_median", + [("lower", 0), ("linear", 1), ("higher", 1)] +) +def test_weighted_percentile(interpolation, expected_median): y = np.empty(102, dtype=np.float64) y[:50] = 0 y[-51:] = 2 @@ -13,28 +17,21 @@ def test_weighted_percentile(): y[50] = 1 sw = np.ones(102, dtype=np.float64) sw[-1] = 0.0 - score = _weighted_percentile(y, sw, 50) - assert approx(score) == 1 + score = _weighted_percentile(y, sw, 50, interpolation=interpolation) + assert score == pytest.approx(expected_median) -def test_weighted_percentile_equal(): - y = np.empty(102, dtype=np.float64) - y.fill(0.0) +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest"] +) +def test_weighted_percentile_constant_data(interpolation): + y = np.zeros(102, dtype=np.float64) sw = np.ones(102, dtype=np.float64) sw[-1] = 0.0 - score = _weighted_percentile(y, sw, 50) + score = _weighted_percentile(y, sw, 50, interpolation=interpolation) assert score == 0 -def test_weighted_percentile_zero_weight(): - y = np.empty(102, dtype=np.float64) - y.fill(1.0) - sw = np.ones(102, dtype=np.float64) - sw.fill(0.0) - score = _weighted_percentile(y, sw, 50) - assert approx(score) == 1.0 - - def test_weighted_median_equal_weights(): # Checks weighted percentile=0.5 is same as median when weights equal rng = np.random.RandomState(0) @@ -44,7 +41,7 @@ def test_weighted_median_equal_weights(): median = np.median(x) w_median = _weighted_percentile(x, weights) - assert median == approx(w_median) + assert median == pytest.approx(w_median) def test_weighted_median_integer_weights(): @@ -58,10 +55,13 @@ def test_weighted_median_integer_weights(): median = np.median(x_manual) w_median = _weighted_percentile(x, weights) - assert median == approx(w_median) + assert median == pytest.approx(w_median) -def test_weighted_percentile_2d(): +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest"] +) +def test_weighted_percentile_2d(interpolation): # Check for when array 2D and sample_weight 1D rng = np.random.RandomState(0) x1 = rng.randint(10, size=10) @@ -70,20 +70,138 @@ def test_weighted_percentile_2d(): x2 = rng.randint(20, size=10) x_2d = np.vstack((x1, x2)).T - w_median = _weighted_percentile(x_2d, w1) + w_median = _weighted_percentile(x_2d, w1, interpolation=interpolation) p_axis_0 = [ - _weighted_percentile(x_2d[:, i], w1) + _weighted_percentile(x_2d[:, i], w1, interpolation=interpolation) for i in range(x_2d.shape[1]) ] assert_allclose(w_median, p_axis_0) - # Check when array and sample_weight boht 2D + # Check when array and sample_weight both 2D w2 = rng.choice(5, size=10) w_2d = np.vstack((w1, w2)).T - w_median = _weighted_percentile(x_2d, w_2d) + w_median = _weighted_percentile(x_2d, w_2d, interpolation=interpolation) p_axis_0 = [ - _weighted_percentile(x_2d[:, i], w_2d[:, i]) + _weighted_percentile(x_2d[:, i], w_2d[:, i], + interpolation=interpolation) for i in range(x_2d.shape[1]) ] assert_allclose(w_median, p_axis_0) + + +def test_weighted_percentile_np_median(): + # check that our weighted percentile lead to the same results than + # unweighted NumPy implementation with unit weights for the median + rng = np.random.RandomState(42) + X = rng.randn(10) + X.sort() + sample_weight = np.ones(X.shape) + + np_median = np.median(X) + sklearn_median = _weighted_percentile( + X, sample_weight, percentile=50.0, interpolation="linear" + ) + + assert sklearn_median == pytest.approx(np_median) + + +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest"] +) +@pytest.mark.parametrize("percentile", np.arange(0, 101, 2.5)) +def test_weighted_percentile_np_percentile(interpolation, percentile): + rng = np.random.RandomState(0) + X = rng.randn(10) + X.sort() + sample_weight = np.ones(X.shape) + + np_percentile = np.percentile(X, percentile, interpolation=interpolation) + sklearn_percentile = _weighted_percentile( + X, sample_weight, percentile=percentile, interpolation=interpolation, + ) + + assert sklearn_percentile == pytest.approx(np_percentile) + + +def test_weighted_percentile_wrong_interpolation(): + err_msg = "'interpolation' should be one of" + with pytest.raises(ValueError, match=err_msg): + X = np.random.randn(10) + sample_weight = np.ones(X.shape) + _weighted_percentile(X, sample_weight, 50, interpolation="xxxx") + + +@pytest.mark.parametrize("percentile", np.arange(2.5, 100, 2.5)) +def test_weighted_percentile_non_unit_weight(percentile): + # check the cumulative sum of the weight on the left and right side of the + # percentile + rng = np.random.RandomState(42) + X = rng.randn(1000) + X.sort() + sample_weight = rng.uniform(1, 30, X.shape) + sample_weight = sample_weight / sample_weight.sum() + sample_weight *= 100 + + percentile_value = _weighted_percentile( + X, sample_weight, percentile, interpolation="linear" + ) + X_percentile_idx = np.searchsorted(X, percentile_value) + assert sample_weight[:X_percentile_idx - 1].sum() < percentile + assert sample_weight[:X_percentile_idx + 1].sum() > percentile + + +@pytest.mark.parametrize("n_features", [None, 2]) +@pytest.mark.parametrize( + "interpolation", ["linear", "higher", "lower", "nearest"] +) +@pytest.mark.parametrize("percentile", np.arange(0, 101, 25)) +def test_weighted_percentile_single_weight(n_features, interpolation, + percentile): + rng = np.random.RandomState(42) + X = rng.randn(10) if n_features is None else rng.randn(10, n_features) + X.sort(axis=0) + sample_weight = np.zeros(X.shape) + pos_weight_idx = 4 + sample_weight[pos_weight_idx] = 1 + + percentile_value = _weighted_percentile( + X, sample_weight, percentile=percentile, interpolation=interpolation + ) + assert percentile_value == pytest.approx(X[pos_weight_idx]) + + +@pytest.mark.parametrize("n_features", [None, 2]) +def test_weighted_percentile_all_null_weight(n_features): + rng = np.random.RandomState(42) + X = rng.randn(10) if n_features is None else rng.randn(10, n_features) + sample_weight = np.zeros(X.shape) + + err_msg = "All weights cannot be null when computing a weighted percentile" + with pytest.raises(ValueError, match=err_msg): + _weighted_percentile(X, sample_weight, 50) + + +@pytest.mark.parametrize("percentile", [0, 25, 50, 75, 100]) +def test_weighted_percentile_equivalence_weights_repeated_samples(percentile): + interpolation = "nearest" + X_repeated = np.array([1, 2, 2, 3, 3, 3, 4, 4]) + sample_weight_unit = np.ones(X_repeated.shape[0]) + p_npy_repeated = np.percentile( + X_repeated, percentile, interpolation=interpolation + ) + p_sklearn_repeated = _weighted_percentile( + X_repeated, sample_weight_unit, percentile, + interpolation=interpolation, + ) + + assert p_sklearn_repeated == pytest.approx(p_npy_repeated) + + X = np.array([1, 2, 3, 4]) + sample_weight = np.array([1, 2, 3, 2]) + p_sklearn_weighted = _weighted_percentile( + X, sample_weight, percentile, interpolation=interpolation, + ) + + assert p_sklearn_weighted == pytest.approx(p_npy_repeated) + assert p_sklearn_weighted == pytest.approx(p_sklearn_repeated)