8000 COSMIT pep8 · seckcoder/scikit-learn@2297306 · GitHub
[go: up one dir, main page]

Skip to content

Commit 2297306

Browse files
committed
COSMIT pep8
1 parent 4fb6c88 commit 2297306

File tree

11 files changed

+23
-27
lines changed

11 files changed

+23
-27
lines changed

examples/exercises/plot_cv_diabetes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,8 @@
6161
print("subsets of the data:")
6262
for k, (train, test) in enumerate(k_fold):
6363
lasso_cv.fit(X[train], y[train])
64-
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".\
65-
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
64+
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
65+
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
6666
print()
6767
print("Answer: Not very much since we obtained different alphas for different")
6868
print("subsets of the data and moreover, the scores for these alphas differ")

sklearn/cluster/dbscan_.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
#
88
# License: BSD
99

10-
import warnings
1110
import numpy as np
1211

1312
from ..base import BaseEstimator, ClusterMixin

sklearn/datasets/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@
8080
'make_friedman1',
8181
'make_friedman2',
8282
'make_friedman3',
83+
'make_gaussian_quantiles',
8384
'make_hastie_10_2',
8485
'make_low_rank_matrix',
8586
'make_moons',

sklearn/ensemble/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,4 +24,4 @@
2424
"ExtraTreesRegressor", "GradientBoostingClassifier",
2525
"GradientBoostingRegressor", "AdaBoostClassifier",
2626
"AdaBoostRegressor", "forest", "gradient_boosting",
27-
"partial_dependence", ]
27+
"partial_dependence", "weight_boosting"]

sklearn/ensemble/tests/test_weight_boosting.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from numpy.testing import assert_array_equal
77
from numpy.testing import assert_array_almost_equal
88
from numpy.testing import assert_equal
9-
from nose.tools import assert_true
109
from nose.tools import assert_raises
1110

1211
from sklearn.dummy import DummyClassifier

sklearn/feature_extraction/tests/test_text.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,8 @@ def test_unicode_decode_error():
174174
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
175175
charset='ascii').build_analyzer()
176176
assert_raises(UnicodeDecodeError, ca, text_bytes)
177-
178-
177+
178+
179179
def test_char_ngram_analyzer():
180180
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
181181
ngram_range=(3, 6)).build_analyzer()
@@ -244,7 +244,7 @@ def test_countvectorizer_custom_vocabulary_pipeline():
244244
assert_equal(set(pipe.named_steps['count'].vocabulary_),
245245
set(what_we_like))
246246
assert_equal(X.shape[1], len(what_we_like))
247-
247+
248248

249249
def test_countvectorizer_stop_words():
250250
cv = CountVectorizer()
@@ -404,11 +404,11 @@ def test_vectorizer():
404404
t2 = TfidfTransformer(norm='l1', use_idf=False)
405405
tf = t2.fit(counts_train).transform(counts_train).toarray()
406406
assert_equal(t2.idf_, None)
407-
407+
408408
# test idf transform with unlearned idf vector
409409
t3 = TfidfTransformer(use_idf=True)
410410
assert_raises(ValueError, t3.transform, counts_train)
411-
411+
412412
# test idf transform with incompatible n_features
413413
X = [[1, 1, 5],
414414
[1, 1, 0]]
@@ -437,23 +437,23 @@ def test_vectorizer():
437437
# test transform on unfitted vectorizer with empty vocabulary
438438
v3 = CountVectorizer(vocabulary=None)
439439
assert_raises(ValueError, v3.transform, train_data)
440-
440+
441441
# ascii preprocessor?
442442
v3.set_params(strip_accents='ascii', lowercase=False)
443443
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
444-
444+
445445
# error on bad strip_accents param
446446
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
447447
assert_raises(ValueError, v3.build_preprocessor)
448-
448+
449449
# error with bad analyzer type
450450
v3.set_params = '_invalid_analyzer_type_'
451-
assert_raises(ValueError, v3.build_analyzer)
452-
453-
451+
assert_raises(ValueError, v3.build_analyzer)
452+
453+
454454
def test_tfidf_vectorizer_setters():
455-
tv = TfidfVectorizer(norm='l2', use_idf=False,
456-
smooth_idf=False, sublinear_tf=False)
455+
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
456+
sublinear_tf=False)
457457
tv.norm = 'l1'
458458
assert_equal(tv._tfidf.norm, 'l1')
459459
tv.use_idf = True
@@ -462,7 +462,7 @@ def test_tfidf_vectorizer_setters():
462462
assert_true(tv._tfidf.smooth_idf)
463463
tv.sublinear_tf = True
464464
assert_true(tv._tfidf.sublinear_tf)
465-
465+
466466

467467
def test_hashing_vectorizer():
468468
v = HashingVectorizer()
@@ -504,11 +504,11 @@ def test_hashing_vectorizer():
504504

505505
def test_feature_names():
506506
cv = CountVectorizer(max_df=0.5, min_df=1)
507-
507+
508508
# test for Value error on unfitted/empty vocabulary
509509
assert_raises(ValueError, cv.get_feature_names)
510-
511-
X = cv.fit_transform(ALL_FOOD_DOCS)
510+
511+
X = cv.fit_transform(ALL_FOOD_DOCS)
512512
n_samples, n_features = X.shape
513513
assert_equal(len(cv.vocabulary_), n_features)
514514

sklearn/feature_extraction/text.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -916,7 +916,7 @@ def transform(self, X, copy=True):
916916

917917
if self.use_idf:
918918
if not hasattr(self, "_idf_diag"):
919-
raise ValueError("idf vector not fitted")
919+
raise ValueError("idf vector not fitted")
920920
expected_n_features = self._idf_diag.shape[0]
921921
if n_features != expected_n_features:
922922
raise ValueError("Input has n_features=%d while the model"

sklearn/hmm.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
"""
1414

1515
import string
16-
import warnings
1716

1817
import numpy as np
1918

sklearn/preprocessing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -614,7 +614,7 @@ def _is_multilabel(y):
614614
# versions of Numpy might want to register ndarray as a Sequence
615615
return (not isinstance(y[0], np.ndarray) and isinstance(y[0], Sequence) and
616616
not isinstance(y[0], string_types) or
617-
_is_label_indicator_matrix(y))
617+
_is_label_indicator_matrix(y))
618618

619619

620620
class OneHotEncoder(BaseEstimator, TransformerMixin):

sklearn/tree/tests/test_tree.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from numpy.testing import assert_almost_equal
99
from numpy.testing import assert_equal
1010
from nose.tools import assert_raises
11-
from nose.tools import assert_true
1211

1312
from sklearn import tree
1413
from sklearn import datasets

sklearn/utils/testing.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212

1313
import urllib2
1414
import scipy as sp
15-
from StringIO import StringIO
1615
from functools import wraps
1716

1817
import sklearn

0 commit comments

Comments
 (0)
0