8000 Apply assorted ruff/flake8-simplify rules (SIM) (#10364) · pydata/xarray@245be4b · GitHub
[go: up one dir, main page]

Skip to content

Commit 245be4b

Browse files
Apply assorted ruff/flake8-simplify rules (SIM) (#10364)
* Apply ruff/flake8-simplify rule SIM102 SIM102 Use a single `if` statement instead of nested `if` statements * Apply ruff/flake8-simplify rule SIM102 SIM102 Use a single `if` statement instead of nested `if` statements While the `if` statement may become more complex, the following lines get unindented. I feel this improves readability in most cases. Of course, the alternative would be to let the programer decide about readability. * Apply ruff/flake8-simplify rule SIM105 SIM105 Use `contextlib.suppress(OutOfBoundsDatetime, ValueError)` instead of `try`-`except`-`pass` Sometimes this adds an import (contextlib). Is it worth it? * Apply ruff/flake8-simplify rule SIM114 SIM114 Combine `if` branches using logical `or` operator * Apply ruff/flake8-simplify rule SIM201 SIM201 Use `... != ...` instead of `not ... == ...` * Apply ruff/flake8-simplify rule SIM210 SIM210 Remove unnecessary `True if ... else False` * Apply ruff/flake8-simplify rule SIM211 SIM211 Use `not ...` instead of `False if ... else True` * Apply ruff/flake8-simplify rule SIM910 SIM910 Use `.get(...)` instead of `.get(..., None)`
1 parent f7b0bd8 commit 245be4b

30 files changed

+166
-201
lines changed

xarray/backends/h5netcdf_.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -68,17 +68,16 @@ def _read_attributes(h5netcdf_var):
6868
# bytes attributes to strings
6969
attrs = {}
7070
for k, v in h5netcdf_var.attrs.items():
71-
if k not in ["_FillValue", "missing_value"]:
72-
if isinstance(v, bytes):
73-
try:
74-
v = v.decode("utf-8")
75-
except UnicodeDecodeError:
76-
emit_user_level_warning(
77-
f"'utf-8' codec can't decode bytes for attribute "
78-
f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, "
79-
f"returning bytes undecoded.",
80-
UnicodeWarning,
81-
)
71+
if k not in ["_FillValue", "missing_value"] and isinstance(v, bytes):
72+
try:
73+
v = v.decode("utf-8")
74+
except UnicodeDecodeError:
75+
emit_user_level_warning(
76+
f"'utf-8' codec can't decode bytes for attribute "
77+
f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, "
78+
f"returning bytes undecoded.",
79+
UnicodeWarning,
80+
)
8281
attrs[k] = v
8382
return attrs
8483

xarray/backends/locks.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,7 @@ def _get_lock_maker(scheduler=None):
118118
dask.utils.get_scheduler_lock
119119
"""
120120

121-
if scheduler is None:
122-
return _get_threaded_lock
123-
elif scheduler == "threaded":
121+
if scheduler is None or scheduler == "threaded":
124122
return _get_threaded_lock
125123
elif scheduler == "multiprocessing":
126124
return _get_multiprocessing_lock

xarray/backends/netcdf3.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -111,11 +111,10 @@ def _maybe_prepare_times(var):
111111
data = var.data
112112
if data.dtype.kind in "iu":
113113
units = var.attrs.get("units", None)
114-
if units is not None:
115-
if coding.variables._is_time_like(units):
116-
mask = data == np.iinfo(np.int64).min
117-
if mask.any():
118-
data = np.where(mask, var.attrs.get("_FillValue", np.nan), data)
114+
if units is not None and coding.variables._is_time_like(units):
115+
mask = data == np.iinfo(np.int64).min
116+
if mask.any():
117+
data = np.where(mask, var.attrs.get("_FillValue", np.nan), data)
119118
return data
120119

121120

xarray/backends/zarr.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -562,7 +562,7 @@ def _validate_datatypes_for_zarr_append(vname, existing_var, new_var):
562562
# in the dataset, and with dtypes which are not known to be easy-to-append, necessitate
563563
# exact dtype equality, as checked below.
564564
pass
565-
elif not new_var.dtype == existing_var.dtype:
565+
elif new_var.dtype != existing_var.dtype:
566566
raise ValueError(
567567
f"Mismatched dtypes for variable {vname} between Zarr store on disk "
568568
f"and dataset to append. Store has dtype {existing_var.dtype} but "
@@ -1233,7 +1233,7 @@ def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=No
12331233
else:
12341234
encoded_attrs[DIMENSION_KEY] = dims
12351235

1236-
encoding["overwrite"] = True if self._mode == "w" else False
1236+
encoding["overwrite"] = self._mode == "w"
12371237

12381238
zarr_array = self._create_new_array(
12391239
name=name,

xarray/coding/frequencies.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def get_freq(self):
137137
return self._infer_daily_rule()
138138
# There is no possible intraday frequency with a non-unique delta
139139
# Different from pandas: we don't need to manage DST and business offsets in cftime
140-
elif not len(self.deltas) == 1:
140+
elif len(self.deltas) != 1:
141141
return None
142142

143143
if _is_multiple(delta, _ONE_HOUR):

xarray/coding/times.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from __future__ import annotations
22

3+
import contextlib
34
import re
45
import warnings
56
from collections.abc import Callable, Hashable
@@ -429,12 +430,11 @@ def _check_date_is_after_shift(
429430
# if we are outside the well-defined date range
430431
# proleptic_gregorian and standard/gregorian are only equivalent
431432
# if reference date and date range is >= 1582-10-15
432-
if calendar != "proleptic_gregorian":
433-
if date < type(date)(1582, 10, 15):
434-
raise OutOfBoundsDatetime(
435-
f"Dates before 1582-10-15 cannot be decoded "
436-
f"with pandas using {calendar!r} calendar: {date}"
437-
)
433+
if calendar != "proleptic_gregorian" and date < type(date)(1582, 10, 15):
434+
raise OutOfBoundsDatetime(
435+
f"Dates before 1582-10-15 cannot be decoded "
436+
f"with pandas using {calendar!r} calendar: {date}"
437+
)
438438

439439

440440
def _check_higher_resolution(
@@ -929,12 +929,10 @@ def _cleanup_netcdf_time_units(units: str) -> str:
929929
time_units = time_units.lower()
930930
if not time_units.endswith("s"):
931931
time_units = f"{time_units}s"
932-
try:
932+
# don't worry about reifying the units if they're out of bounds or
933+
# formatted badly
934+
with contextlib.suppress(OutOfBoundsDatetime, ValueError):
933935
units = f"{time_units} since {format_timestamp(ref_date)}"
934-
except (OutOfBoundsDatetime, ValueError):
935-
# don't worry about reifying the units if they're out of bounds or
936-
# formatted badly
937-
pass
938936
< F438 span class=pl-k>return units
939937

940938

@@ -1412,7 +1410,7 @@ def decode(self, variable: Variable, name: T_Name = None) -> Variable:
14121410

14131411

14141412
def has_timedelta64_encoding_dtype(attrs_or_encoding: dict) -> bool:
1415-
dtype = attrs_or_encoding.get("dtype", None)
1413+
dtype = attrs_or_encoding.get("dtype")
14161414
return isinstance(dtype, str) and dtype.startswith("timedelta64")
14171415

14181416

xarray/computation/computation.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -144,9 +144,8 @@ def cov(
144144
"Only xr.DataArray is supported."
145145
f"Given {[type(arr) for arr in [da_a, da_b]]}."
146146
)
147-
if weights is not None:
148-
if not isinstance(weights, DataArray):
149-
raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
147+
if weights is not None and not isinstance(weights, DataArray):
148+
raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
150149
return _cov_corr(da_a, da_b, weights=weights, dim=dim, ddof=ddof, method="cov")
151150

152151

@@ -248,9 +247,8 @@ def corr(
248247
"Only xr.DataArray is supported."
249248
f"Given {[type(arr) for arr in [da_a, da_b]]}."
250249
)
251-
if weights is not None:
252-
if not isinstance(weights, DataArray):
253-
raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
250+
if weights is not None and not isinstance(weights, DataArray):
251+
raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
254252
return _cov_corr(da_a, da_b, weights=weights, dim=dim, method="corr")
255253

256254

xarray/conventions.py

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def decode_cf_variable(
178178
if isinstance(decode_times, CFDatetimeCoder):
179179
decode_timedelta = CFTimedeltaCoder(time_unit=decode_times.time_unit)
180180
else:
181-
decode_timedelta = True if decode_times else False
181+
decode_timedelta = bool(decode_times)
182182

183183
if concat_characters:
184184
if stack_char_dim:
@@ -276,12 +276,11 @@ def _update_bounds_attributes(variables: T_Variables) -> None:
276276
attrs = v.attrs
277277
units = attrs.get("units")
278278
has_date_units = isinstance(units, str) and "since" in units
279-
if has_date_units and "bounds" in attrs:
280-
if attrs["bounds"] in variables:
281-
bounds_attrs = variables[attrs["bounds"]].attrs
282-
bounds_attrs.setdefault("units", attrs["units"])
283-
if "calendar" in attrs:
284-
bounds_attrs.setdefault("calendar", attrs["calendar"])
279+
if has_date_units and "bounds" in attrs and attrs["bounds"] in variables:
280+
bounds_attrs = variables[attrs["bounds"]].attrs
281+
bounds_attrs.setdefault("units", attrs["units"])
282+
if "calendar" in attrs:
283+
bounds_attrs.setdefault("calendar", attrs["calendar"])
285284

286285

287286
def _update_bounds_encoding(variables: T_Variables) -> None:
@@ -325,12 +324,11 @@ def _update_bounds_encoding(variables: T_Variables) -> None:
325324
f"{name} before writing to a file.",
326325
)
327326

328-
if has_date_units and "bounds" in attrs:
329-
if attrs["bounds"] in variables:
330-
bounds_encoding = variables[attrs["bounds"]].encoding
331-
bounds_encoding.setdefault("units", encoding["units"])
332-
if "calendar" in encoding:
333-
bounds_encoding.setdefault("calendar", encoding["calendar"])
327+
if has_date_units and "bounds" in attrs and 10000 attrs["bounds"] in variables:
328+
bounds_encoding = variables[attrs["bounds"]].encoding
329+
bounds_encoding.setdefault("units", encoding["units"])
330+
if "calendar" in encoding:
331+
bounds_encoding.setdefault("calendar", encoding["calendar"])
334332

335333

336334
T = TypeVar("T")
@@ -805,8 +803,11 @@ def cf_encoder(variables: T_Variables, attributes: T_Attrs):
805803
"leap_year",
806804
"month_lengths",
807805
]:
808-
if attr in new_vars[bounds].attrs and attr in var.attrs:
809-
if new_vars[bounds].attrs[attr] == var.attrs[attr]:
810-
new_vars[bounds].attrs.pop(attr)
806+
if (
807+
attr in new_vars[bounds].attrs
808+
and attr in var.attrs
809+
and new_vars[bounds].attrs[attr] == var.attrs[attr]
810+
):
811+
new_vars[bounds].attrs.pop(attr)
811812

812813
return new_vars, attributes

xarray/core/dataset.py

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -8141,19 +8141,18 @@ def quantile(
81418141
for name, var in self.variables.items():
81428142
reduce_dims = [d for d in var.dims if d in dims]
81438143
if reduce_dims or not var.dims:
8144-
if name not in self.coords:
8145-
if (
8146-
not numeric_only
8147-
or np.issubdtype(var.dtype, np.number)
8148-
or var.dtype == np.bool_
8149-
):
8150-
variables[name] = var.quantile(
8151-
q,
8152-
dim=reduce_dims,
8153-
method=method,
8154-
keep_attrs=keep_attrs,
8155-
skipna=skipna,
8156-
)
8144+
if name not in self.coords and (
8145+
not numeric_only
8146+
or np.issubdtype(var.dtype, np.number)
8147+
or var.dtype == np.bool_
8148+
):
8149+
variables[name] = var.quantile(
8150+
q,
8151+
dim=reduce_dims,
8152+
method=method,
8153+
keep_attrs=keep_attrs,
8154+
skipna=skipna,
8155+
)
81578156

81588157
else:
81598158
variables[name] = var

xarray/core/formatting.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1054,9 +1054,8 @@ def diff_datatree_repr(a: DataTree, b: DataTree, compat):
10541054
f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}"
10551055
]
10561056

1057-
if compat == "identical":
1058-
if diff_name := diff_name_summary(a, b):
1059-
summary.append(diff_name)
1057+
if compat == "identical" and (diff_name := diff_name_summary(a, b)):
1058+
summary.append(diff_name)
10601059

10611060
treestructure_diff = diff_treestructure(a, b)
10621061

xarray/core/groupby.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -977,13 +977,12 @@ def _maybe_reindex(self, combined):
977977
indexers = {}
978978
for grouper in self.groupers:
979979
index = combined._indexes.get(grouper.name, None)
980-
if has_missing_groups and index is not None:
980+
if (has_missing_groups and index is not None) or (
981+
len(self.groupers) > 1
982+
and not isinstance(grouper.full_index, pd.RangeIndex)
983+
and not index.index.equals(grouper.full_index)
984+
):
981985
indexers[grouper.name] = grouper.full_index
982-
elif len(self.groupers) > 1:
983-
if not isinstance(
984-
grouper.full_index, pd.RangeIndex
985-
) and not index.index.equals(grouper.full_index):
986-
indexers[grouper.name] = grouper.full_index
987986
if indexers:
988987
combined = combined.reindex(**indexers)
989988
return combined

xarray/core/missing.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -364,11 +364,10 @@ def interp_na(
364364
# Convert to float
365365
max_gap = timedelta_to_numeric(max_gap)
366366

367-
if not use_coordinate:
368-
if not isinstance(max_gap, Number | np.number):
369-
raise TypeError(
370-
f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}."
371-
)
367+
if not use_coordinate and not isinstance(max_gap, Number | np.number):
368+
raise TypeError(
369+
f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}."
370+
)
372371

373372
# method
374373
index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate)
@@ -499,7 +498,7 @@ def _get_interpolator(
499498
# take higher dimensional data but scipy.interp1d can.
500499
if (
501500
method == "linear"
502-
and not kwargs.get("fill_value") == "extrapolate"
501+
and kwargs.get("fill_value") != "extrapolate"
503502
and not vectorizeable_only
504503
):
505504
kwargs.update(method=method)

xarray/core/parallel.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -363,12 +363,14 @@ def _wrapper(
363363

364364
# check that index lengths and values are as expected
365365
for name, index in result._indexes.items():
366-
if name in expected["shapes"]:
367-
if result.sizes[name] != expected["shapes"][name]:
368-
raise ValueError(
369-
f"Received dimension {name!r} of length {result.sizes[name]}. "
370-
f"Expected length {expected['shapes'][name]}."
371-
)
366+
if (
367+
name in expected["shapes"]
368+
and result.sizes[name] != expected["shapes"][name]
369+
):
370+
raise ValueError(
371+
f"Received dimension {name!r} of length {result.sizes[name]}. "
372+
f"Expected length {expected['shapes'][name]}."
373+
)
372374

373375
# ChainMap wants MutableMapping, but xindexes is Mapping
374376
merged_indexes = collections.ChainMap(

xarray/core/utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -704,10 +704,8 @@ def try_read_magic_number_from_path(pathlike, count=8) -> bytes | None:
704704
def try_read_magic_number_from_file_or_path(filename_or_obj, count=8) -> bytes | None:
705705
magic_number = try_read_magic_number_from_path(filename_or_obj, count)
706706
if magic_number is None:
707-
try:
707+
with contextlib.suppress(TypeError):
708708
magic_number = read_magic_number_from_file(filename_or_obj, count)
709-
except TypeError:
710-
pass
711709
return magic_number
712710

713711

xarray/core/variable.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -168,15 +168,14 @@ def as_variable(
168168
f"explicit list of dimensions: {obj!r}"
169169
)
170170

171-
if auto_convert:
172-
if name is not None and name in obj.dims and obj.ndim == 1:
173-
# automatically convert the Variable into an Index
174-
emit_user_level_warning(
175-
f"variable {name!r} with name matching its dimension will not be "
176-
"automatically converted into an `IndexVariable` object in the future.",
177-
FutureWarning,
178-
)
179-
obj = obj.to_index_variable()
171+
if auto_convert and name is not None and name in obj.dims and obj.ndim == 1:
172+
# automatically convert the Variable into an Index
173+
emit_user_level_warning(
174+
f"variable {name!r} with name matching its dimension will not be "
175+
"automatically converted into an `IndexVariable` object in the future.",
176+
FutureWarning,
177+
)
178+
obj = obj.to_index_variable()
180179

181180
return obj
182181

@@ -2930,15 +2929,15 @@ def broadcast_variables(*variables: Variable) -> tuple[Variable, ...]:
29302929

29312930

29322931
def _broadcast_compat_data(self, other):
2933-
if not OPTIONS["arithmetic_broadcast"]:
2934-
if (isinstance(other, Variable) and self.dims != other.dims) or (
2935-
is_duck_array(other) and self.ndim != other.ndim
2936-
):
2937-
raise ValueError(
2938-
"Broadcasting is necessary but automatic broadcasting is disabled via "
2939-
"global option `'arithmetic_broadcast'`. "
2940-
"Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting."
2941-
)
2932+
if not OPTIONS["arithmetic_broadcast"] and (
2933+
(isinstance(other, Variable) and self.dims != other.dims)
2934+
or (is_duck_array(other) and self.ndim != other.ndim)
2935+
):
2936+
raise ValueError(
2937+
"Broadcasting is necessary but automatic broadcasting is disabled via "
2938+
"global option `'arithmetic_broadcast'`. "
2939+
"Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting."
2940+
)
29422941

29432942
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
29442943
# `other` satisfies the necessary Variable API for broadcast_variables

0 commit comments

Comments
 (0)
0