8000 MNT update Travis dependencies to latest available versions (#11714) · scikit-learn/scikit-learn@c1738a3 · GitHub
[go: up one dir, main page]

Skip to content

Commit c1738a3

Browse files
naoyakjnothman
authored andcommitted
MNT update Travis dependencies to latest available versions (#11714)
1 parent 391209b commit c1738a3

File tree

7 files changed

+20
-25
lines changed

7 files changed

+20
-25
lines changed

.travis.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,13 @@ matrix:
3838
NUMPY_VERSION="1.10.4" SCIPY_VERSION="0.16.1" CYTHON_VERSION="0.25.2"
3939
PILLOW_VERSION="4.0.0" COVERAGE=true
4040
if: type != cron
41-
# This environment tests the newest supported Anaconda release.
41+
# This environment tests the latest available dependencies.
4242
# It runs tests requiring pandas and PyAMG.
4343
# It also runs with the site joblib instead of the vendored copy of joblib.
44-
- env: DISTRIB="conda" PYTHON_VERSION="3.6.2" INSTALL_MKL="true"
45-
NUMPY_VERSION="1.14.2" SCIPY_VERSION="1.0.0" PANDAS_VERSION="0.20.3"
46-
CYTHON_VERSION="0.26.1" PYAMG_VERSION="3.3.2" PILLOW_VERSION="4.3.0"
47-
JOBLIB_VERSION="0.12" COVERAGE=true
44+
- env: DISTRIB="conda" PYTHON_VERSION="*" INSTALL_MKL="true"
45+
NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*"
46+
CYTHON_VERSION="*" PYAMG_VERSION="*" PILLOW_VERSION="*"
47+
JOBLIB_VERSION="*" COVERAGE=true
4848
CHECK_PYTEST_SOFT_DEPENDENCY="true" TEST_DOCSTRINGS="true"
4949
SKLEARN_SITE_JOBLIB=1
5050
if: type != cron

examples/applications/plot_prediction_latency.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626

2727
from sklearn.preprocessing import StandardScaler
2828
from sklearn.model_selection import train_test_split
29-
from scipy.stats import scoreatpercentile
3029
from sklearn.datasets.samples_generator import make_regression
3130
from sklearn.ensemble.forest import RandomForestRegressor
3231
from sklearn.linear_model.ridge import Ridge
@@ -50,7 +49,7 @@ def atomic_benchmark_estimator(estimator, X_test, verbose=False):
5049
estimator.predict(instance)
5150
runtimes[i] = time.time() - start
5251
if verbose:
53-
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
52+
print("atomic_benchmark runtimes:", min(runtimes), np.percentile(
5453
runtimes, 50), max(runtimes))
5554
return runtimes
5655

@@ -65,7 +64,7 @@ def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
6564
runtimes[i] = time.time() - start
6665
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
6766
if verbose:
68-
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
67+
print("bulk_benchmark runtimes:", min(runtimes), np.percentile(
6968
runtimes, 50), max(runtimes))
7069
return runtimes
7170

@@ -207,8 +206,8 @@ def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
207206
estimator.fit(X_train, y_train)
208207
gc.collect()
209208
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
210-
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
211-
percentile)
209+
percentiles[cls_name][n] = 1e6 * np.percentile(runtimes,
210+
percentile)
212211
return percentiles
213212

214213

sklearn/covariance/elliptic_envelope.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,8 +106,7 @@ def fit(self, X, y=None):
106106
y : (ignored)
107107
"""
108108
super(EllipticEnvelope, self).fit(X)
109-
self.offset_ = sp.stats.scoreatpercentile(
110-
-self.dist_, 100. * self.contamination)
109+
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
111110
return self
112111

113112
def decision_function(self, X, raw_values=None):

sklearn/ensemble/gradient_boosting.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import numbers
3939
import numpy as np
4040

41-
from scipy import stats
4241
from scipy.sparse import csc_matrix
4342
from scipy.sparse import csr_matrix
4443
from scipy.sparse import issparse
@@ -91,7 +90,7 @@ def fit(self, X, y, sample_weight=None):
9190
Individual weights for each sample
9291
"""
9392
if sample_weight is None:
94-
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
93+
self.quantile = np.percentile(y, self.alpha * 100.0)
9594
else:
9695
self.quantile = _weighted_percentile(y, sample_weight,
9796
self.alpha * 100.0)
@@ -608,7 +607,7 @@ def __call__(self, y, pred, sample_weight=None):
608607
gamma = self.gamma
609608
if gamma is None:
610609
if sample_weight is None:
611-
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
610+
gamma = np.percentile(np.abs(diff), self.alpha * 100)
612611
else:
613612
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
614613

@@ -641,7 +640,7 @@ def negative_gradient(self, y, pred, sample_weight=None, **kargs):
641640
pred = pred.ravel()
642641
diff = y - pred
643642
if sample_weight is None:
644-
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
643+
gamma = np.percentile(np.abs(diff), self.alpha * 100)
645644
else:
646645
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
647646
gamma_mask = np.abs(diff) <= gamma

sklearn/ensemble/iforest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -267,8 +267,8 @@ def fit(self, X, y=None, sample_weight=None):
267267
"'auto' when behaviour == 'old'.")
268268

269269
self.offset_ = -0.5
270-
self._threshold_ = sp.stats.scoreatpercentile(
271-
self.decision_function(X), 100. * self._contamination)
270+
self._threshold_ = np.percentile(self.decision_function(X),
271+
100. * self._contamination)
272272

273273
return self
274274

@@ -281,8 +281,8 @@ def fit(self, X, y=None, sample_weight=None):
281281

282282
# else, define offset_ wrt contamination parameter, so that the
283283
# threshold_ attribute is implicitly 0 and is not needed anymore:
284-
self.offset_ = sp.stats.scoreatpercentile(
285-
self.score_samples(X), 100. * self._contamination)
284+
self.offset_ = np.percentile(self.score_samples(X),
285+
100. * self._contamination)
286286

287287
return self
288288

sklearn/feature_selection/univariate_selection.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -438,8 +438,7 @@ def _get_support_mask(self):
438438
return np.zeros(len(self.scores_), dtype=np.bool)
439439

440440
scores = _clean_nans(self.scores_)
441-
threshold = stats.scoreatpercentile(scores,
442-
100 - self.percentile)
441+
threshold = np.percentile(scores, 100 - self.percentile)
443442
mask = scores > threshold
444443
ties = np.where(scores == threshold)[0]
445444
if len(ties):

sklearn/neighbors/lof.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
import numpy as np
66
import warnings
7-
from scipy.stats import scoreatpercentile
87

98
from .base import NeighborsBase
109
from .base import KNeighborsMixin
@@ -262,8 +261,8 @@ def fit(self, X, y=None):
262261
# inliers score around -1 (the higher, the less abnormal).
263262
self.offset_ = -1.5
264263
else:
265-
self.offset_ = scoreatpercentile(
266-
self.negative_outlier_factor_, 100. * self._contamination)
264+
self.offset_ = np.percentile(self.negative_outlier_factor_,
265+
100. * self._contamination)
267266

268267
return self
269268

0 commit comments

Comments
 (0)
0