8000 moved versionadded in the proper places · scikit-learn/scikit-learn@667fb3a · GitHub
[go: up one dir, main page]

Skip to content

Commit 667fb3a

Browse files
committed
moved versionadded in the proper places
1 parent 803d68d commit 667fb3a

File tree

8 files changed

+43
-20
lines changed

8 files changed

+43
-20
lines changed

sklearn/datasets/kddcup99.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,8 @@ def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
116116
Targets str, 'normal.' or name of the anomaly type
117117
================ ==========================================
118118
119+
.. versionadded:: 0.18
120+
119121
Parameters
120122
----------
121123
subset : None, 'SA', 'SF', 'http', 'smtp'
@@ -157,7 +159,6 @@ def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
157159
Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
158160
Michael Prerau, Leonid Portnoy, Sal Stolfo
159161
160-
.. versionadded:: 0.18
161162
"""
162163
kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10,
163164
download_if_missing=download_if_missing)

sklearn/ensemble/forest.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,13 +73,15 @@ class calls the ``fit`` method of each sub-estimator on random samples
7373

7474
MAX_INT = np.iinfo(np.int32).max
7575

76+
7677
def _generate_sample_indices(random_state, n_samples):
7778
"""Private function used to _parallel_build_trees function."""
7879
random_instance = check_random_state(random_state)
7980
sample_indices = random_instance.randint(0, n_samples, n_samples)
8081

8182
return sample_indices
8283

84+
8385
def _generate_unsampled_indices(random_state, n_samples):
8486
"""Private function used to forest._set_oob_score function."""
8587
sample_indices = _generate_sample_indices(random_state, n_samples)
@@ -90,6 +92,7 @@ def _generate_unsampled_indices(random_state, n_samples):
9092

9193
return unsampled_indices
9294

95+
9396
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
9497
verbose=0, class_weight=None):
9598
"""Private function used to fit a single tree in parallel."""
@@ -181,6 +184,8 @@ def apply(self, X):
181184
def decision_path(self, X):
182185
"""Return the decision path in the forest
183186
187+
.. versionadded:: 0.18
188+
184189
Parameters
185190
----------
186191
X : array-like or sparse matrix, shape = [n_samples, n_features]
@@ -198,7 +203,6 @@ def decision_path(self, X):
198203
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
199204
gives the indicator value for the i-th estimator.
200205
201-
.. versionadded:: 0.18
202206
"""
203207
X = self._validate_X_predict(X)
204208
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,

sklearn/ensemble/iforest.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ class IsolationForest(BaseBagging):
4444
4545
Read more in the :ref:`User Guide <isolation_forest>`.
4646
47+
.. versionadded:: 0.18
48+
4749
Parameters
4850
----------
4951
n_estimators : int, optional (default=100)
@@ -106,7 +108,6 @@ class IsolationForest(BaseBagging):
106108
anomaly detection." ACM Transactions on Knowledge Discovery from
107109
Data (TKDD) 6.1 (2012): 3.
108110
109-
.. versionadded:: 0.18
110111
"""
111112

112113
def __init__(self,

sklearn/gaussian_process/gpc.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
4545
Currently, the implementation is restricted to using the logistic link
4646
function.
4747
48+
.. versionadded:: 0.18
49+
4850
Parameters
4951
----------
5052
kernel : kernel object
@@ -139,7 +141,6 @@ def optimizer(obj_func, initial_theta, bounds):
139141
log_marginal_likelihood_value_: float
140142
The log-marginal-likelihood of ``self.kernel_.theta``
141143
142-
.. versionadded:: 0.18
143144
"""
144145
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
145146
n_restarts_optimizer=0, max_iter_predict=100,

sklearn/gaussian_process/gpr.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
3535
3636
Read more in the :ref:`User Guide <gaussian_process>`.
3737
38+
.. versionadded:: 0.18
39+
3840
Parameters
3941
----------
4042
kernel : kernel object
@@ -126,7 +128,6 @@ def optimizer(obj_func, initial_theta, bounds):
126128
log_marginal_likelihood_value_: float
127129
The log-marginal-likelihood of ``self.kernel_.theta``
128130
129-
.. versionadded:: 0.18
130131
"""
131132
def __init__(self, kernel=None, alpha=1e-10,
132133
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,

sklearn/gaussian_process/kernels.py

Lines changed: 24 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,8 @@ class Hyperparameter(namedtuple('Hyperparameter',
4949
'n_elements', 'fixed'))):
5050
"""A kernel hyperparameter's specification in form of a namedtuple.
5151
52+
.. versionadded:: 0.18
53+
5254
Attributes
5355
----------
5456
name : string
@@ -77,7 +79,6 @@ class Hyperparameter(namedtuple('Hyperparameter',
7779
changed during hyperparameter tuning. If None is passed, the "fixed" is
7880
derived based on the given bounds.
7981
80-
.. versionadded:: 0.18
8182
"""
8283
# A raw namedtuple is very memory efficient as it packs the attributes
8384
# in a struct to get rid of the __dict__ of attributes in particular it
@@ -636,6 +637,8 @@ class Sum(KernelOperator):
636637
The resulting kernel is defined as
637638
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
638639
640+
.. versionadded:: 0.18
641+
639642
Parameters
640643
----------
641644
k1 : Kernel object
@@ -644,7 +647,6 @@ class Sum(KernelOperator):
644647
k2 : Kernel object
645648
The second base-kernel of the sum-kernel
646649
647-
.. versionadded:: 0.18
648650
"""
649651

650652
def __call__(self, X, Y=None, eval_gradient=False):
@@ -709,6 +711,8 @@ class Product(KernelOperator):
709711
The resulting kernel is defined as
710712
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
711713
714+
.. versionadded:: 0.18
715+
712716
Parameters
713717
----------
714718
k1 : Kernel object
@@ -717,7 +721,6 @@ class Product(KernelOperator):
717721
k2 : Kernel object
718722
The second base-kernel of the product-kernel
719723
720-
.. versionadded:: 0.18
721724
"""
722725

723726
def __call__(self, X, Y=None, eval_gradient=False):
@@ -783,6 +786,8 @@ class Exponentiation(Kernel):
783786
The resulting kernel is defined as
784787
k_exp(X, Y) = k(X, Y) ** exponent
785788
789+
.. versionadded:: 0.18
790+
786791
Parameters
787792
----------
788793
kernel : Kernel object
@@ -791,7 +796,6 @@ class Exponentiation(Kernel):
791796
exponent : float
792797
The exponent for the base kernel
793798
794-
.. versionadded:: 0.18
795799
"""
796800
def __init__(self, kernel, exponent):
797801
self.kernel = kernel
@@ -942,6 +946,8 @@ class ConstantKernel(StationaryKernelMixin, Kernel):
942946
943947
k(x_1, x_2) = constant_value for all x_1, x_2
944948
949+
.. versionadded:: 0.18
950+
945951
Parameters
946952
----------
947953
constant_value : float, default: 1.0
@@ -951,7 +957,6 @@ class ConstantKernel(StationaryKernelMixin, Kernel):
951957
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
952958
The lower and upper bound on constant_value
953959
954-
.. versionadded:: 0.18
955960
"""
956961
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
957962
self.constant_value = constant_value
@@ -1036,6 +1041,8 @@ class WhiteKernel(StationaryKernelMixin, Kernel):
10361041
10371042
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
10381043
1044+
.. versionadded:: 0.18
1045+
10391046
Parameters
10401047
----------
10411048
noise_level : float, default: 1.0
@@ -1044,7 +1051,6 @@ class WhiteKernel(StationaryKernelMixin, Kernel):
10441051
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
10451052
The lower and upper bound on noise_level
10461053
1047-
.. versionadded:: 0.18
10481054
"""
10491055
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
10501056
self.noise_level = noise_level
@@ -1137,6 +1143,8 @@ class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
11371143
kernel as covariance function have mean square derivatives of all orders,
11381144
and are thus very smooth.
11391145
1146+
.. versionadded:: 0.18
1147+
11401148
Parameters
11411149
-----------
11421150
length_scale : float or array with shape (n_features,), default: 1.0
@@ -1147,7 +1155,6 @@ class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
11471155
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
11481156
The lower and upper bound on length_scale
11491157
1150-
.. versionadded:: 0.18
11511158
"""
11521159
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
11531160
self.length_scale = length_scale
@@ -1249,6 +1256,8 @@ class Matern(RBF):
12491256
See Rasmussen and Williams 2006, pp84 for details regarding the
12501257
different variants of the Matern kernel.
12511258
1259+
.. versionadded:: 0.18
1260+
12521261
Parameters
12531262
-----------
12541263
length_scale : float or array with shape (n_features,), default: 1.0
@@ -1271,7 +1280,6 @@ class Matern(RBF):
12711280
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
12721281
its initial value and not optimized.
12731282
1274-
.. versionadded:: 0.18
12751283
"""
12761284
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
12771285
nu=1.5):
@@ -1395,6 +1403,8 @@ class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
13951403
13961404
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
13971405
1406+
.. versionadded:: 0.18
1407+
13981408
Parameters
13991409
----------
14001410
length_scale : float > 0, default: 1.0
@@ -1409,7 +1419,6 @@ class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
14091419
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
14101420
The lower and upper bound on alpha
14111421
1412-
.. versionadded:: 0.18
14131422
"""
14141423
def __init__(self, length_scale=1.0, alpha=1.0,
14151424
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
@@ -1505,6 +1514,8 @@ class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
15051514
15061515
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
15071516
1517+
.. versionadded:: 0.18
1518+
15081519
Parameters
15091520
----------
15101521
length_scale : float > 0, default: 1.0
@@ -1519,7 +1530,6 @@ class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
15191530
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
15201531
The lower and upper bound on periodicity
15211532
1522-
.. versionadded:: 0.18
15231533
"""
15241534
def __init__(self, length_scale=1.0, periodicity=1.0,
15251535
length_scale_bounds=(1e-5, 1e5),
@@ -1621,6 +1631,8 @@ class DotProduct(Kernel):
16211631
16221632
The DotProduct kernel is commonly combined with exponentiation.
16231633
1634+
.. versionadded:: 0.18
1635+
16241636
Parameters
16251637
----------
16261638
sigma_0 : float >= 0, default: 1.0
@@ -1630,7 +1642,6 @@ class DotProduct(Kernel):
16301642
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
16311643
The lower and upper bound on l
16321644
1633-
.. versionadded:: 0.18
16341645
"""
16351646

16361647
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
@@ -1739,6 +1750,8 @@ class PairwiseKernel(Kernel):
17391750
kernel parameters are set directly at initialization and are kept
17401751
fixed.
17411752
1753+
.. versionadded:: 0.18
1754+
17421755
Parameters
17431756
----------
17441757
gamma: float >= 0, default: 1.0
@@ -1761,7 +1774,6 @@ class PairwiseKernel(Kernel):
17611774
All entries of this dict (if any) are passed as keyword arguments to
17621775
the pairwise kernel function.
17631776
1764-
.. versionadded:: 0.18
17651777
"""
17661778

17671779
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",

sklearn/neural_network/multilayer_perceptron.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -685,6 +685,8 @@ class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
685685
This model optimizes the log-loss function using LBFGS or stochastic
686686
gradient descent.
687687
688+
.. versionadded:: 0.18
689+
688690
Parameters
689691
----------
690692
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
@@ -871,7 +873,6 @@ class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
871873
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
872874
optimization." arXiv preprint arXiv:1412.6980 (2014).
873875
874-
.. versionadded:: 0.18
875876
"""
876877
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
877878
solver='adam', alpha=0.0001,
@@ -1029,6 +1030,8 @@ class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
10291030
This model optimizes the squared-loss using LBFGS or stochastic gradient
10301031
descent.
10311032
1033+
.. versionadded:: 0.18
1034+
10321035
Parameters
10331036
----------
10341037
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
@@ -1212,7 +1215,6 @@ class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
12121215
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
12131216
optimization." arXiv preprint arXiv:1412.6980 (2014).
12141217
1215-
.. versionadded:: 0.18
12161218
"""
12171219
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
12181220
solver='adam', alpha=0.0001,

sklearn/tree/tree.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,8 @@ def apply(self, X, check_input=True):
486486
def decision_path(self, X, check_input=True):
487487
"""Return the decision path in the tree
488488
489+
.. versionadded:: 0.18
490+
489491
Parameters
490492
----------
491493
X : array_like or sparse matrix, shape = [n_samples, n_features]
@@ -503,7 +505,6 @@ def decision_path(self, X, check_input=True):
503505
Return a node indicator matrix where non zero elements
504506
indicates that the samples goes through the nodes.
505507
506-
.. versionadded:: 0.18
507508
"""
508509
X = self._validate_X_predict(X, check_input)
509510
return self.tree_.decision_path(X)

0 commit comments

Comments
 (0)
0