From 9d82cb8ad96049ab63bfe49177ed6229bf284dc2 Mon Sep 17 00:00:00 2001 From: Ram Rachum Date: Sun, 5 Jul 2020 11:23:57 +0300 Subject: [PATCH] MNT Use raise from in 18 modules --- benchmarks/bench_tsne_mnist.py | 8 ++--- .../machine_learning_map/parse_path.py | 4 +-- sklearn/_build_utils/__init__.py | 4 +-- sklearn/cluster/_agglomerative.py | 5 ++-- sklearn/cluster/_bicluster.py | 4 +-- sklearn/compose/_column_transformer.py | 10 ++++--- sklearn/datasets/_samples_generator.py | 21 ++++++------- sklearn/ensemble/_gb.py | 13 ++++---- sklearn/ensemble/_weight_boosting.py | 4 +-- sklearn/externals/_arff.py | 30 +++++++++---------- sklearn/externals/_lobpcg.py | 8 ++--- sklearn/inspection/_partial_dependence.py | 4 +-- .../inspection/_plot/partial_dependence.py | 12 ++++---- sklearn/linear_model/_stochastic_gradient.py | 12 ++++---- sklearn/manifold/_locally_linear.py | 17 +++++------ sklearn/manifold/_spectral_embedding.py | 4 +-- 16 files changed, 83 insertions(+), 77 deletions(-) diff --git a/benchmarks/bench_tsne_mnist.py b/benchmarks/bench_tsne_mnist.py index 8f58a3a41a7e3..1f1dc5143d177 100644 --- a/benchmarks/bench_tsne_mnist.py +++ b/benchmarks/bench_tsne_mnist.py @@ -106,7 +106,7 @@ def sanitize(filename): if args.bhtsne: try: from bhtsne.bhtsne import run_bh_tsne - except ImportError: + except ImportError as e: raise ImportError("""\ If you want comparison with the reference implementation, build the binary from source (https://github.com/lvdmaaten/bhtsne) in the folder @@ -117,7 +117,7 @@ def sanitize(filename): $ g++ sptree.cpp tsne.cpp tsne_main.cpp -o bh_tsne -O2 $ touch __init__.py $ cd .. -""") +""") from e def bhtsne(X): """Wrapper for the reference lvdmaaten/bhtsne implementation.""" @@ -131,10 +131,10 @@ def bhtsne(X): try: from memory_profiler import profile - except ImportError: + except ImportError as e: raise ImportError("To run the benchmark with `--profile`, you " "need to install `memory_profiler`. Please " - "run `pip install memory_profiler`.") + "run `pip install memory_profiler`.") from e methods = [(n, profile(m)) for n, m in methods] data_size = [100, 500, 1000, 5000, 10000] diff --git a/doc/tutorial/machine_learning_map/parse_path.py b/doc/tutorial/machine_learning_map/parse_path.py index 8d03c0e7629dc..770fd1481f53b 100644 --- a/doc/tutorial/machine_learning_map/parse_path.py +++ b/doc/tutorial/machine_learning_map/parse_path.py @@ -54,8 +54,8 @@ def Sequence(token): def convertToFloat(s, loc, toks): try: return float(toks[0]) - except: - raise ParseException(loc, "invalid float format %s"%toks[0]) + except BaseException as e: + raise ParseException(loc, "invalid float format %s" % toks[0]) from e exponent = CaselessLiteral("e")+Optional(sign)+Word(nums) diff --git a/sklearn/_build_utils/__init__.py b/sklearn/_build_utils/__init__.py index e76c06eac66a9..cf7ec241fc189 100644 --- a/sklearn/_build_utils/__init__.py +++ b/sklearn/_build_utils/__init__.py @@ -25,9 +25,9 @@ def _check_cython_version(): CYTHON_MIN_VERSION) try: import Cython - except ModuleNotFoundError: + except ModuleNotFoundError as e: # Re-raise with more informative error message instead: - raise ModuleNotFoundError(message) + raise ModuleNotFoundError(message) from e if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION: message += (' The current version of Cython is {} installed in {}.' diff --git a/sklearn/cluster/_agglomerative.py b/sklearn/cluster/_agglomerative.py index fd9241e0b7267..98ca69ea3dd33 100644 --- a/sklearn/cluster/_agglomerative.py +++ b/sklearn/cluster/_agglomerative.py @@ -430,10 +430,11 @@ def linkage_tree(X, connectivity=None, n_clusters=None, linkage='complete', 'single': None} # Single linkage is handled differently try: join_func = linkage_choices[linkage] - except KeyError: + except KeyError as e: raise ValueError( 'Unknown linkage option, linkage should be one ' - 'of %s, but %s was given' % (linkage_choices.keys(), linkage)) + 'of %s, but %s was given' % (linkage_choices.keys(), linkage) + ) from e if affinity == 'cosine' and np.any(~np.any(X, axis=1)): raise ValueError( diff --git a/sklearn/cluster/_bicluster.py b/sklearn/cluster/_bicluster.py index f75bd8511bc05..d80463f211aba 100644 --- a/sklearn/cluster/_bicluster.py +++ b/sklearn/cluster/_bicluster.py @@ -466,11 +466,11 @@ def _check_parameters(self): r, c = self.n_clusters int(r) int(c) - except (ValueError, TypeError): + except (ValueError, TypeError) as e: raise ValueError("Incorrect parameter n_clusters has value:" " {}. It should either be a single integer" " or an iterable with two integers:" - " (n_row_clusters, n_column_clusters)") + " (n_row_clusters, n_column_clusters)") from e if self.n_components < 1: raise ValueError("Parameter n_components must be greater than 0," " but its value is {}".format(self.n_components)) diff --git a/sklearn/compose/_column_transformer.py b/sklearn/compose/_column_transformer.py index 66c155f1f82b7..7d94330e1cca2 100644 --- a/sklearn/compose/_column_transformer.py +++ b/sklearn/compose/_column_transformer.py @@ -467,7 +467,7 @@ def _fit_transform(self, X, y, func, fitted=False): self._iter(fitted=fitted, replace_strings=True), 1)) except ValueError as e: if "Expected 2D array, got 1D array instead" in str(e): - raise ValueError(_ERR_MSG_1DCOLUMN) + raise ValueError(_ERR_MSG_1DCOLUMN) from e else: raise @@ -629,9 +629,11 @@ def _hstack(self, Xs): accept_sparse=True, force_all_finite=False) for X in Xs] - except ValueError: - raise ValueError("For a sparse output, all columns should" - " be a numeric or convertible to a numeric.") + except ValueError as e: + raise ValueError( + "For a sparse output, all columns should " + "be a numeric or convertible to a numeric." + ) from e return sparse.hstack(converted_Xs).tocsr() else: diff --git a/sklearn/datasets/_samples_generator.py b/sklearn/datasets/_samples_generator.py index 694e2db645d72..f3c28e8a14129 100644 --- a/sklearn/datasets/_samples_generator.py +++ b/sklearn/datasets/_samples_generator.py @@ -650,9 +650,9 @@ def make_circles(n_samples=100, *, shuffle=True, noise=None, random_state=None, else: try: n_samples_out, n_samples_in = n_samples - except ValueError: + except ValueError as e: raise ValueError('`n_samples` can be either an int or ' - 'a two-element tuple.') + 'a two-element tuple.') from e generator = check_random_state(random_state) # so as not to have the first point = last point, we set endpoint=False @@ -715,9 +715,9 @@ def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None): else: try: n_samples_out, n_samples_in = n_samples - except ValueError: + except ValueError as e: raise ValueError('`n_samples` can be either an int or ' - 'a two-element tuple.') + 'a two-element tuple.') from e generator = check_random_state(random_state) @@ -845,13 +845,14 @@ def make_blobs(n_samples=100, n_features=2, *, centers=None, cluster_std=1.0, size=(n_centers, n_features)) try: assert len(centers) == n_centers - except TypeError: + except TypeError as e: raise ValueError("Parameter `centers` must be array-like. " - "Got {!r} instead".format(centers)) - except AssertionError: - raise ValueError("Length of `n_samples` not consistent" - " with number of centers. Got n_samples = {} " - "and centers = {}".format(n_samples, centers)) + "Got {!r} instead".format(centers)) from e + except AssertionError as e: + raise ValueError( + f"Length of `n_samples` not consistent with number of " + f"centers. Got n_samples = {n_samples} and centers = {centers}" + ) from e else: centers = check_array(centers) n_features = centers.shape[1] diff --git a/sklearn/ensemble/_gb.py b/sklearn/ensemble/_gb.py index e050a83e59f75..e0c658e5dcac9 100644 --- a/sklearn/ensemble/_gb.py +++ b/sklearn/ensemble/_gb.py @@ -456,8 +456,9 @@ def fit(self, X, y, sample_weight=None, monitor=None): "weights.".format(self.init_.__class__.__name__)) try: self.init_.fit(X, y, sample_weight=sample_weight) - except TypeError: # regular estimator without SW support - raise ValueError(msg) + except TypeError as e: + # regular estimator without SW support + raise ValueError(msg) from e except ValueError as e: if "pass parameters to specific steps of "\ "your pipeline using the "\ @@ -1219,9 +1220,9 @@ def predict_proba(self, X): return self.loss_._raw_prediction_to_proba(raw_predictions) except NotFittedError: raise - except AttributeError: + except AttributeError as e: raise AttributeError('loss=%r does not support predict_proba' % - self.loss) + self.loss) from e def predict_log_proba(self, X): """Predict class log-probabilities for X. @@ -1270,9 +1271,9 @@ def staged_predict_proba(self, X): yield self.loss_._raw_prediction_to_proba(raw_predictions) except NotFittedError: raise - except AttributeError: + except AttributeError as e: raise AttributeError('loss=%r does not support predict_proba' % - self.loss) + self.loss) from e class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting): diff --git a/sklearn/ensemble/_weight_boosting.py b/sklearn/ensemble/_weight_boosting.py index 01c47dba9860b..f0da44301de6b 100644 --- a/sklearn/ensemble/_weight_boosting.py +++ b/sklearn/ensemble/_weight_boosting.py @@ -255,11 +255,11 @@ def feature_importances_(self): in zip(self.estimator_weights_, self.estimators_)) / norm) - except AttributeError: + except AttributeError as e: raise AttributeError( "Unable to compute feature importances " "since base_estimator does not have a " - "feature_importances_ attribute") + "feature_importances_ attribute") from e def _samme_proba(estimator, n_classes, X): diff --git a/sklearn/externals/_arff.py b/sklearn/externals/_arff.py index 015920d393a21..8330eec8adb87 100644 --- a/sklearn/externals/_arff.py +++ b/sklearn/externals/_arff.py @@ -265,8 +265,8 @@ def _escape_sub_callback(match): if len(s) == 2: try: return _ESCAPE_SUB_MAP[s] - except KeyError: - raise ValueError('Unsupported escape sequence: %s' % s) + except KeyError as e: + raise ValueError('Unsupported escape sequence: %s' % s) from e if s[1] == 'u': return unichr(int(s[2:], 16)) else: @@ -303,8 +303,8 @@ def _parse_values(s): # an ARFF syntax error in sparse data for match in _RE_SPARSE_KEY_VALUES.finditer(s): if not match.group(1): - raise BadLayout('Error parsing %r' % match.group()) - raise BadLayout('Unknown parsing error') + raise BadLayout('Error parsing %r' % match.group()) from exc + raise BadLayout('Unknown parsing error') from exc else: # an ARFF syntax error for match in _RE_DENSE_VALUES.finditer(s): @@ -449,8 +449,8 @@ def __init__(self, values): def __call__(self, value): try: return self.values[value] - except KeyError: - raise BadNominalValue(value) + except KeyError as e: + raise BadNominalValue(value) from e class NominalConversor(object): @@ -498,7 +498,7 @@ def _decode_values(values, conversors): in zip(conversors, values)] except ValueError as exc: if 'float: ' in str(exc): - raise BadNumericalValue() + raise BadNumericalValue from exc return values def encode_data(self, data, attributes): @@ -557,11 +557,11 @@ def decode_rows(self, stream, conversors): for key, value in zip(row_cols, values)] except ValueError as exc: if 'float: ' in str(exc): - raise BadNumericalValue() + raise BadNumericalValue from exc raise - except IndexError: + except IndexError as e: # conversor out of range - raise BadDataFormat(row) + raise BadDataFormat(row) from e data.extend(values) rows.extend([i] * len(values)) @@ -617,11 +617,11 @@ def decode_rows(self, stream, conversors): for key, value in values.items()} except ValueError as exc: if 'float: ' in str(exc): - raise BadNumericalValue() + raise BadNumericalValue from exc raise - except IndexError: + except IndexError as e: # conversor out of range - raise BadDataFormat(row) + raise BadDataFormat(row) from e def encode_data(self, data, attributes): current_row = 0 @@ -772,8 +772,8 @@ def _decode_attribute(self, s): if _RE_TYPE_NOMINAL.match(type_): try: type_ = _parse_values(type_.strip('{} ')) - except Exception: - raise BadAttributeType() + except Exception as e: + raise BadAttributeType from e if isinstance(type_, dict): raise BadAttributeType() diff --git a/sklearn/externals/_lobpcg.py b/sklearn/externals/_lobpcg.py index 4e0d0ad19b753..fe9f2e1c4aa93 100644 --- a/sklearn/externals/_lobpcg.py +++ b/sklearn/externals/_lobpcg.py @@ -384,8 +384,8 @@ def lobpcg(A, X, try: # gramYBY is a Cholesky factor from now on... gramYBY = cho_factor(gramYBY) - except LinAlgError: - raise ValueError('cannot handle linearly dependent constraints') + except LinAlgError as e: + raise ValueError('cannot handle linearly dependent constraints') from e _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) @@ -610,8 +610,8 @@ def _handle_gramA_gramB_verbosity(gramA, gramB): try: _lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False) - except LinAlgError: - raise ValueError('eigh has failed in lobpcg iterations') + except LinAlgError as e: + raise ValueError('eigh has failed in lobpcg iterations') from e ii = _get_indx(_lambda, sizeX, largest) if verbosityLevel > 10: diff --git a/sklearn/inspection/_partial_dependence.py b/sklearn/inspection/_partial_dependence.py index f75fa71fe0f87..b24c7718b93ac 100644 --- a/sklearn/inspection/_partial_dependence.py +++ b/sklearn/inspection/_partial_dependence.py @@ -164,9 +164,9 @@ def _partial_dependence_brute(est, grid, features, X, response_method): predictions.append(pred) # average over samples averaged_predictions.append(np.mean(pred, axis=0)) - except NotFittedError: + except NotFittedError as e: raise ValueError( - "'estimator' parameter must be a fitted estimator") + "'estimator' parameter must be a fitted estimator") from e n_samples = X.shape[0] diff --git a/sklearn/inspection/_plot/partial_dependence.py b/sklearn/inspection/_plot/partial_dependence.py index eea705e1c90a4..58bdbec82b891 100644 --- a/sklearn/inspection/_plot/partial_dependence.py +++ b/sklearn/inspection/_plot/partial_dependence.py @@ -269,8 +269,8 @@ def convert_feature(fx): if isinstance(fx, str): try: fx = feature_names.index(fx) - except ValueError: - raise ValueError('Feature %s not in feature_names' % fx) + except ValueError as e: + raise ValueError('Feature %s not in feature_names' % fx) from e return int(fx) # convert features into a seq of int tuples @@ -280,9 +280,11 @@ def convert_feature(fx): fxs = (fxs,) try: fxs = tuple(convert_feature(fx) for fx in fxs) - except TypeError: - raise ValueError('Each entry in features must be either an int, ' - 'a string, or an iterable of size at most 2.') + except TypeError as e: + raise ValueError( + 'Each entry in features must be either an int, ' + 'a string, or an iterable of size at most 2.' + ) from e if not 1 <= np.size(fxs) <= 2: raise ValueError('Each entry in features must be either an int, ' 'a string, or an iterable of size at most 2.') diff --git a/sklearn/linear_model/_stochastic_gradient.py b/sklearn/linear_model/_stochastic_gradient.py index cb311bb641b22..f48f7e8b1514e 100644 --- a/sklearn/linear_model/_stochastic_gradient.py +++ b/sklearn/linear_model/_stochastic_gradient.py @@ -163,22 +163,22 @@ def _get_loss_function(self, loss): 'squared_epsilon_insensitive'): args = (self.epsilon, ) return loss_class(*args) - except KeyError: - raise ValueError("The loss %s is not supported. " % loss) + except KeyError as e: + raise ValueError("The loss %s is not supported. " % loss) from e def _get_learning_rate_type(self, learning_rate): try: return LEARNING_RATE_TYPES[learning_rate] - except KeyError: + except KeyError as e: raise ValueError("learning rate %s " - "is not supported. " % learning_rate) + "is not supported. " % learning_rate) from e def _get_penalty_type(self, penalty): penalty = str(penalty).lower() try: return PENALTY_TYPES[penalty] - except KeyError: - raise ValueError("Penalty %s is not supported. " % penalty) + except KeyError as e: + raise ValueError("Penalty %s is not supported. " % penalty) from e def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None): diff --git a/sklearn/manifold/_locally_linear.py b/sklearn/manifold/_locally_linear.py index 6feff24bd7241..a2d3e63060413 100644 --- a/sklearn/manifold/_locally_linear.py +++ b/sklearn/manifold/_locally_linear.py @@ -169,15 +169,14 @@ def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100, eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0) - except RuntimeError as msg: - raise ValueError("Error in determining null-space with ARPACK. " - "Error message: '%s'. " - "Note that eigen_solver='arpack' can fail when " - "the weight matrix is singular or otherwise " - "ill-behaved. In that case, eigen_solver='dense' " - "is recommended. See online documentation for " - "more information." - % msg) + except RuntimeError as e: + raise ValueError( + "Error in determining null-space with ARPACK. Error message: " + "'%s'. Note that eigen_solver='arpack' can fail when the " + "weight matrix is singular or otherwise ill-behaved. In that " + "case, eigen_solver='dense' is recommended. See online " + "documentation for more information." % e + ) from e return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) elif eigen_solver == 'dense': diff --git a/sklearn/manifold/_spectral_embedding.py b/sklearn/manifold/_spectral_embedding.py index 069db608797a3..51beb85ec970c 100644 --- a/sklearn/manifold/_spectral_embedding.py +++ b/sklearn/manifold/_spectral_embedding.py @@ -216,10 +216,10 @@ def spectral_embedding(adjacency, *, n_components=8, eigen_solver=None, try: from pyamg import smoothed_aggregation_solver - except ImportError: + except ImportError as e: if eigen_solver == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " - "not available.") + "not available.") from e if eigen_solver is None: eigen_solver = 'arpack'