8000 MNT Use raise from in 19 modules by cool-RR · Pull Request #17835 · scikit-learn/scikit-learn · GitHub
[go: up one dir, main page]

Skip to content

MNT Use raise from in 19 modules #17835

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions benchmarks/bench_tsne_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def sanitize(filename):
if args.bhtsne:
try:
from bhtsne.bhtsne import run_bh_tsne
except ImportError:
except ImportError as e:
raise ImportError("""\
If you want comparison with the reference implementation, build the
binary from source (https://github.com/lvdmaaten/bhtsne) in the folder
Expand All @@ -117,7 +117,7 @@ def sanitize(filename):
$ g++ sptree.cpp tsne.cpp tsne_main.cpp -o bh_tsne -O2
$ touch __init__.py
$ cd ..
""")
""") from e

def bhtsne(X):
"""Wrapper for the reference lvdmaaten/bhtsne implementation."""
Expand All @@ -131,10 +131,10 @@ def bhtsne(X):

try:
from memory_profiler import profile
except ImportError:
except ImportError as e:
raise ImportError("To run the benchmark with `--profile`, you "
"need to install `memory_profiler`. Please "
"run `pip install memory_profiler`.")
"run `pip install memory_profiler`.") from e
methods = [(n, profile(m)) for n, m in methods]

data_size = [100, 500, 1000, 5000, 10000]
Expand Down
4 changes: 2 additions & 2 deletions doc/tutorial/machine_learning_map/parse_path.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ def Sequence(token):
def convertToFloat(s, loc, toks):
try:
return float(toks[0])
except:
raise ParseException(loc, "invalid float format %s"%toks[0])
except BaseException as e:
raise ParseException(loc, "invalid float format %s" % toks[0]) from e

exponent = CaselessLiteral("e")+Optional(sign)+Word(nums)

Expand Down
4 changes: 2 additions & 2 deletions sklearn/_build_utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ def _check_cython_version():
CYTHON_MIN_VERSION)
try:
import Cython
except ModuleNotFoundError:
except ModuleNotFoundError as e:
# Re-raise with more informative error message instead:
raise ModuleNotFoundError(message)
raise ModuleNotFoundError(message) from e

if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += (' The current version of Cython is {} installed in {}.'
Expand Down
5 changes: 3 additions & 2 deletions sklearn/cluster/_agglomerative.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,10 +430,11 @@ def linkage_tree(X, connectivity=None, n_clusters=None, linkage='complete',
'single': None} # Single linkage is handled differently
try:
join_func = linkage_choices[linkage]
except KeyError:
except KeyError as e:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
'of %s, but %s was given' % (linkage_choices.keys(), linkage)
) from e

if affinity == 'cosine' and np.any(~np.any(X, axis=1)):
raise ValueError(
Expand Down
4 changes: 2 additions & 2 deletions sklearn/cluster/_bicluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,11 +466,11 @@ def _check_parameters(self):
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
except (ValueError, TypeError) as e:
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
" (n_row_clusters, n_column_clusters)") from e
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
Expand Down
10 changes: 6 additions & 4 deletions sklearn/compose/_column_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ def _fit_transform(self, X, y, func, fitted=False):
self._iter(fitted=fitted, replace_strings=True), 1))
except ValueError as e:
if "Expected 2D array, got 1D array instead" in str(e):
raise ValueError(_ERR_MSG_1DCOLUMN)
raise ValueError(_ERR_MSG_1DCOLUMN) from e
else:
raise

Expand Down Expand Up @@ -629,9 +629,11 @@ def _hstack(self, Xs):
accept_sparse=True,
force_all_finite=False)
for X in Xs]
except ValueError:
raise ValueError("For a sparse output, all columns should"
" be a numeric or convertible to a numeric.")
except ValueError as e:
raise ValueError(
"For a sparse output, all columns should "
"be a numeric or convertible to a numeric."
) from e

return sparse.hstack(converted_Xs).tocsr()
else:
Expand Down
21 changes: 11 additions & 10 deletions sklearn/datasets/_samples_generator.py
Original file line number F438 Diff line number Diff line change
Expand Up @@ -650,9 +650,9 @@ def make_circles(n_samples=100, *, shuffle=True, noise=None, random_state=None,
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError:
except ValueError as e:
raise ValueError('`n_samples` can be either an int or '
'a two-element tuple.')
'a two-element tuple.') from e

generator = check_random_state(random_state)
# so as not to have the first point = last point, we set endpoint=False
Expand Down Expand Up @@ -715,9 +715,9 @@ def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError:
except ValueError as e:
raise ValueError('`n_samples` can be either an int or '
'a two-element tuple.')
'a two-element tuple.') from e

generator = check_random_state(random_state)

Expand Down Expand Up @@ -845,13 +845,14 @@ def make_blobs(n_samples=100, n_features=2, *, centers=None, cluster_std=1.0,
size=(n_centers, n_features))
try:
assert len(centers) == n_centers
except TypeError:
except TypeError as e:
raise ValueError("Parameter `centers` must be array-like. "
"Got {!r} instead".format(centers))
except AssertionError:
raise ValueError("Length of `n_samples` not consistent"
" with number of centers. Got n_samples = {} "
"and centers = {}".format(n_samples, centers))
"Got {!r} instead".format(centers)) from e
except AssertionError as e:
raise ValueError(
f"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
) from e
else:
centers = check_array(centers)
n_features = centers.shape[1]
Expand Down
13 changes: 7 additions & 6 deletions sklearn/ensemble/_gb.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,8 +456,9 @@ def fit(self, X, y, sample_weight=None, monitor=None):
"weights.".format(self.init_.__class__.__name__))
try:
self.init_.fit(X, y, sample_weight=sample_weight)
except TypeError: # regular estimator without SW support
raise ValueError(msg)
except TypeError as e:
# regular estimator without SW support
raise ValueError(msg) from e
except ValueError as e:
if "pass parameters to specific steps of "\
"your pipeline using the "\
Expand Down Expand Up @@ -1219,9 +1220,9 @@ def predict_proba(self, X):
return self.loss_._raw_prediction_to_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError:
except AttributeError as e:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
self.loss) from e

def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Expand Down Expand Up @@ -1270,9 +1271,9 @@ def staged_predict_proba(self, X):
yield self.loss_._raw_prediction_to_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError:
except AttributeError as e:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
self.loss) from e


class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting):
Expand Down
4 changes: 2 additions & 2 deletions sklearn/ensemble/_weight_boosting.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,11 +255,11 @@ def feature_importances_(self):
in zip(self.estimator_weights_, self.estimators_))
/ norm)

except AttributeError:
except AttributeError as e:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
"feature_importances_ attribute") from e


def _samme_proba(estimator, n_classes, X):
Expand Down
30 changes: 15 additions & 15 deletions sklearn/externals/_arff.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,8 @@ def _escape_sub_callback(match):
if len(s) == 2:
try:
return _ESCAPE_SUB_MAP[s]
except KeyError:
raise ValueError('Unsupported escape sequence: %s' % s)
except KeyError as e:
raise ValueError('Unsupported escape sequence: %s' % s) from e
if s[1] == 'u':
return unichr(int(s[2:], 16))
else:
Expand Down Expand Up @@ -303,8 +303,8 @@ def _parse_values(s):
# an ARFF syntax error in sparse data
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
raise BadLayout('Error parsing %r' % match.group()) from exc
raise BadLayout('Unknown parsing error') from exc
else:
# an ARFF syntax error
for match in _RE_DENSE_VALUES.finditer(s):
Expand Down Expand Up @@ -449,8 +449,8 @@ def __init__(self, values):
def __call__(self, value):
try:
return self.values[value]
except KeyError:
raise BadNominalValue(value)
except KeyError as e:
raise BadNominalValue(value) from e


class NominalConversor(object):
Expand Down Expand Up @@ -498,7 +498,7 @@ def _decode_values(values, conversors):
in zip(conversors, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise BadNumericalValue from exc
return values

def encode_data(self, data, attributes):
Expand Down Expand Up @@ -557,11 +557,11 @@ def decode_rows(self, stream, conversors):
for key, value in zip(row_cols, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise BadNumericalValue from exc
raise
except IndexError:
except IndexError as e:
# conversor out of range
raise BadDataFormat(row)
raise BadDataFormat(row) from e

data.extend(values)
rows.extend([i] * len(values))
Expand Down Expand Up @@ -617,11 +617,11 @@ def decode_rows(self, stream, conversors):
for key, value in values.items()}
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise BadNumericalValue from exc
raise
except IndexError:
except IndexError as e:
# conversor out of range
raise BadDataFormat(row)
raise BadDataFormat(row) from e

def encode_data(self, data, attributes):
current_row = 0
Expand Down Expand Up @@ -772,8 +772,8 @@ def _decode_attribute(self, s):
if _RE_TYPE_NOMINAL.match(type_):
try:
type_ = _parse_values(type_.strip('{} '))
except Exception:
raise BadAttributeType()
except Exception as e:
raise BadAttributeType from e
if isinstance(type_, dict):
raise BadAttributeType()

Expand Down
8 changes: 4 additions & 4 deletions sklearn/externals/_lobpcg.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,8 +384,8 @@ def lobpcg(A, X,
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = cho_factor(gramYBY)
except LinAlgError:
raise ValueError('cannot handle linearly dependent constraints')
except LinAlgError as e:
raise ValueError('cannot handle linearly dependent constraints') from e

_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)

Expand Down Expand Up @@ -610,8 +610,8 @@ def _handle_gramA_gramB_verbosity(gramA, gramB):
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError:
raise ValueError('eigh has failed in lobpcg iterations')
except LinAlgError as e:
raise ValueError('eigh has failed in lobpcg iterations') from e

ii = _get_indx(_lambda, sizeX, largest)
if verbosityLevel > 10:
Expand Down
4 changes: 2 additions & 2 deletions sklearn/inspection/_partial_dependence.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,9 +164,9 @@ def _partial_dependence_brute(est, grid, features, X, response_method):
predictions.append(pred)
# average over samples
averaged_predictions.append(np.mean(pred, axis=0))
except NotFittedError:
except NotFittedError as e:
raise ValueError(
"'estimator' parameter must be a fitted estimator")
"'estimator' parameter must be a fitted estimator") from e

n_samples = X.shape[0]

Expand Down
12 changes: 7 additions & 5 deletions sklearn/inspection/_plot/partial_dependence.py
F438
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,8 @@ def convert_feature(fx):
if isinstance(fx, str):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
except ValueError as e:
raise ValueError('Feature %s not in feature_names' % fx) from e
return int(fx)

# convert features into a seq of int tuples
Expand All @@ -280,9 +280,11 @@ def convert_feature(fx):
fxs = (fxs,)
try:
fxs = tuple(convert_feature(fx) for fx in fxs)
except TypeError:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
except TypeError as e:
raise ValueError(
'Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.'
) from e
if not 1 <= np.size(fxs) <= 2:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
Expand Down
12 changes: 6 additions & 6 deletions sklearn/linear_model/_stochastic_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,22 +163,22 @@ def _get_loss_function(self, loss):
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
except KeyError as e:
raise ValueError("The loss %s is not supported. " % loss) from e

def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
except KeyError as e:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
"is not supported. " % learning_rate) from e

def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
except KeyError as e:
raise ValueError("Penalty %s is not supported. " % penalty) from e

def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
Expand Down
17 changes: 8 additions & 9 deletions sklearn/manifold/_locally_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,15 +169,14 @@ def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that eigen_solver='arpack' can fail when "
"the weight matrix is singular or otherwise "
"ill-behaved. In that case, eigen_solver='dense' "
"is recommended. See online documentation for "
"more information."
% msg)
except RuntimeError as e:
raise ValueError(
"Error in determining null-space with ARPACK. Error message: "
"'%s'. Note that eigen_solver='arpack' can fail when the "
"weight matrix is singular or otherwise ill-behaved. In that "
"case, eigen_solver='dense' is recommended. See online "
"documentation for more information." % e
) from e

return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
Expand Down
Loading
0