Skip to content

Revert "Enforce ruff/flake8-simplify rules (SIM)" #10476

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,6 @@ extend-select = [
"PIE", # flake8-pie
"TID", # flake8-tidy-imports (absolute imports)
"PYI", # flake8-pyi
"SIM", # flake8-simplify
"FLY", # flynt
"I", # isort
"PERF", # Perflint
Expand All @@ -278,9 +277,6 @@ ignore = [
"PIE790", # unnecessary pass statement
"PYI019", # use `Self` instead of custom TypeVar
"PYI041", # use `float` instead of `int | float`
"SIM108", # use ternary operator instead of `if`-`else`-block
"SIM117", # use a single `with` statement instead of nested `with` statements
"SIM300", # yoda condition detected
"PERF203", # try-except within a loop incurs performance overhead
"E402", # module level import not at top of file
"E731", # do not assign a lambda expression, use a def
Expand Down
2 changes: 1 addition & 1 deletion xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ def _extract_nc4_variable_encoding(
del encoding["chunksizes"]

var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims)
if not raise_on_invalid and var_has_unlim_dim and "contiguous" in encoding:
if not raise_on_invalid and var_has_unlim_dim and "contiguous" in encoding.keys():
del encoding["contiguous"]

for k in safe_to_drop:
Expand Down
8 changes: 4 additions & 4 deletions xarray/backends/pydap_.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,14 +349,14 @@ def group_fqn(store, path=None, g_fqn=None) -> dict[str, str]:
if not g_fqn:
g_fqn = {}
groups = [
var.id for var in store.values() if isinstance(var, GroupType)
store[key].id
for key in store.keys()
if isinstance(store[key], GroupType)
]
for g in groups:
g_fqn.update({g: path})
subgroups = [
key
for key, var in store[g].items()
if isinstance(var, GroupType)
var for var in store[g] if isinstance(store[g][var], GroupType)
]
if len(subgroups) > 0:
npath = path + g
Expand Down
2 changes: 1 addition & 1 deletion xarray/coding/times.py
Original file line number Diff line number Diff line change
Expand Up @@ -733,7 +733,7 @@ def infer_calendar_name(dates) -> CFCalendar:
"""Given an array of datetimes, infer the CF calendar name"""
if is_np_datetime_like(dates.dtype):
return "proleptic_gregorian"
elif dates.dtype == np.dtype("O") and dates.size > 0: # noqa: SIM102
elif dates.dtype == np.dtype("O") and dates.size > 0:
# Logic copied from core.common.contains_cftime_datetimes.
if cftime is not None:
sample = np.asarray(dates).flat[0]
Expand Down
2 changes: 1 addition & 1 deletion xarray/computation/rolling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1079,7 +1079,7 @@ def __init__(
self.side = side
self.boundary = boundary

missing_dims = tuple(dim for dim in windows if dim not in self.obj.dims)
missing_dims = tuple(dim for dim in windows.keys() if dim not in self.obj.dims)
if missing_dims:
raise ValueError(
f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} "
Expand Down
4 changes: 3 additions & 1 deletion xarray/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1247,7 +1247,9 @@ def _dataset_indexer(dim: Hashable) -> DataArray:
_dataarray_indexer if isinstance(cond, DataArray) else _dataset_indexer
)

indexers = {dim: _get_indexer(dim) for dim in cond.sizes}
indexers = {}
for dim in cond.sizes.keys():
indexers[dim] = _get_indexer(dim)

self = self.isel(**indexers)
cond = cond.isel(**indexers)
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def _check_data_shape(
data_shape = tuple(
(
as_variable(coords[k], k, auto_convert=False).size
if k in coords
if k in coords.keys()
else 1
)
for k in dims
Expand Down
15 changes: 7 additions & 8 deletions xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4086,20 +4086,21 @@ def _rename(
is raised at the right stack level.
"""
name_dict = either_dict_or_kwargs(name_dict, names, "rename")
for k, new_k in name_dict.items():
for k in name_dict.keys():
if k not in self and k not in self.dims:
raise ValueError(
f"cannot rename {k!r} because it is not a "
"variable or dimension in this dataset"
)

create_dim_coord = False
new_k = name_dict[k]

if k == new_k:
continue # Same name, nothing to do

if k in self.dims and new_k in self._coord_names:
coord_dims = self._variables[new_k].dims
coord_dims = self._variables[name_dict[k]].dims
if coord_dims == (k,):
create_dim_coord = True
elif k in self._coord_names and new_k in self.dims:
Expand All @@ -4109,7 +4110,7 @@ def _rename(

if create_dim_coord:
warnings.warn(
f"rename {k!r} to {new_k!r} does not create an index "
f"rename {k!r} to {name_dict[k]!r} does not create an index "
"anymore. Try using swap_dims instead or use set_index "
"after rename to create an indexed coordinate.",
UserWarning,
Expand Down Expand Up @@ -8975,18 +8976,16 @@ def pad(
variables[name] = var
elif name in self.data_vars:
if utils.is_dict_like(constant_values):
if name in constant_values:
if name in constant_values.keys():
filtered_constant_values = constant_values[name]
elif not set(var.dims).isdisjoint(constant_values.keys()):
filtered_constant_values = {
k: v # type: ignore[misc]
for k, v in constant_values.items()
if k in var.dims
k: v for k, v in constant_values.items() if k in var.dims
}
else:
filtered_constant_values = 0 # TODO: https://github.com/pydata/xarray/pull/9353#discussion_r1724018352
else:
filtered_constant_values = constant_values # type: ignore[assignment]
filtered_constant_values = constant_values
variables[name] = var.pad(
pad_width=var_pad_width,
mode=mode,
Expand Down
14 changes: 6 additions & 8 deletions xarray/core/formatting.py
Original file line number Diff line number Diff line change
Expand Up @@ -989,10 +989,9 @@ def diff_array_repr(a, b, compat):
):
summary.append(coords_diff)

if compat == "identical" and (
attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat)
):
summary.append(attrs_diff)
if compat == "identical":
if attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat):
summary.append(attrs_diff)

return "\n".join(summary)

Expand Down Expand Up @@ -1030,10 +1029,9 @@ def diff_dataset_repr(a, b, compat):
):
summary.append(data_diff)

if compat == "identical" and (
attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat)
):
summary.append(attrs_diff)
if compat == "identical":
if attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat):
summary.append(attrs_diff)

return "\n".join(summary)

Expand Down
2 changes: 1 addition & 1 deletion xarray/core/indexes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1247,7 +1247,7 @@ def create_variables(
level = name
dtype = self.level_coords_dtype[name] # type: ignore[index] # TODO: are Hashables ok?

var = variables.get(name)
var = variables.get(name, None)
if var is not None:
attrs = var.attrs
encoding = var.encoding
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ def _wrapper(
{**hlg.layers, **new_layers},
dependencies={
**hlg.dependencies,
**{name: {gname} for name in new_layers},
**{name: {gname} for name in new_layers.keys()},
},
)

Expand Down
27 changes: 16 additions & 11 deletions xarray/core/resample_cftime.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,22 @@ def __init__(
self.freq = to_offset(freq)
self.origin = origin

if (
isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd)
or
# The backward resample sets ``closed`` to ``'right'`` by default
# since the last value should be considered as the edge point for
# the last bin. When origin in "end" or "end_day", the value for a
# specific ``cftime.datetime`` index stands for the resample result
# from the current ``cftime.datetime`` minus ``freq`` to the current
# ``cftime.datetime`` with a right close.
self.origin in ["end", "end_day"]
):
if isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd):
if closed is None:
self.closed = "right"
else:
self.closed = closed
if label is None:
self.label = "right"
else:
self.label = label
# The backward resample sets ``closed`` to ``'right'`` by default
# since the last value should be considered as the edge point for
# the last bin. When origin in "end" or "end_day", the value for a
# specific ``cftime.datetime`` index stands for the resample result
# from the current ``cftime.datetime`` minus ``freq`` to the current
# ``cftime.datetime`` with a right close.
elif self.origin in ["end", "end_day"]:
if closed is None:
self.closed = "right"
else:
Expand Down
14 changes: 8 additions & 6 deletions xarray/groupers.py
Original file line number Diff line number Diff line change
Expand Up @@ -701,23 +701,25 @@ def find_independent_seasons(seasons: Sequence[str]) -> Sequence[SeasonsGroup]:
grouped = defaultdict(list)
codes = defaultdict(list)
seen: set[tuple[int, ...]] = set()
idx = 0
# This is quadratic, but the number of seasons is at most 12
for i, current in enumerate(season_inds):
# Start with a group
if current not in seen:
grouped[i].append(current)
codes[i].append(i)
grouped[idx].append(current)
codes[idx].append(i)
seen.add(current)

# Loop through remaining groups, and look for overlaps
for j, second in enumerate(season_inds[i:]):
if not (set(chain(*grouped[i])) & set(second)) and second not in seen:
grouped[i].append(second)
codes[i].append(j + i)
if not (set(chain(*grouped[idx])) & set(second)) and second not in seen:
grouped[idx].append(second)
codes[idx].append(j + i)
seen.add(second)
if len(seen) == len(seasons):
break
# found all non-overlapping groups for this row start over
# found all non-overlapping groups for this row, increment and start over
idx += 1

grouped_ints = tuple(tuple(idx) for idx in grouped.values() if idx)
return [
Expand Down
2 changes: 1 addition & 1 deletion xarray/plot/facetgrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ def map_plot1d(
)

if add_legend:
use_legend_elements = func.__name__ != "hist"
use_legend_elements = not func.__name__ == "hist"
if use_legend_elements:
self.add_legend(
use_legend_elements=use_legend_elements,
Expand Down
9 changes: 4 additions & 5 deletions xarray/plot/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,10 +419,9 @@ def _infer_xy_labels(
_assert_valid_xy(darray, x, "x")
_assert_valid_xy(darray, y, "y")

if darray._indexes.get(x, 1) is darray._indexes.get(y, 2) and isinstance(
darray._indexes[x], PandasMultiIndex
):
raise ValueError("x and y cannot be levels of the same MultiIndex")
if darray._indexes.get(x, 1) is darray._indexes.get(y, 2):
if isinstance(darray._indexes[x], PandasMultiIndex):
raise ValueError("x and y cannot be levels of the same MultiIndex")

return x, y

Expand Down Expand Up @@ -1821,7 +1820,7 @@ def _guess_coords_to_plot(
"""
coords_to_plot_exist = {k: v for k, v in coords_to_plot.items() if v is not None}
available_coords = tuple(
k for k in darray.coords if k not in coords_to_plot_exist.values()
k for k in darray.coords.keys() if k not in coords_to_plot_exist.values()
)

# If dims_plot[k] isn't defined then fill with one of the available dims, unless
Expand Down
2 changes: 1 addition & 1 deletion xarray/structure/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ def merge_collected(
variables = [variable for variable, _ in elements_list]
try:
merged_vars[name] = unique_variable(
name, variables, compat, equals.get(name)
name, variables, compat, equals.get(name, None)
)
except MergeError:
if compat != "minimal":
Expand Down
6 changes: 3 additions & 3 deletions xarray/tests/test_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -2671,13 +2671,13 @@ def test_hidden_zarr_keys(self) -> None:
# check that a variable hidden attribute is present and correct
# JSON only has a single array type, which maps to list in Python.
# In contrast, dims in xarray is always a tuple.
for var in expected.variables:
for var in expected.variables.keys():
dims = zarr_group[var].attrs[self.DIMENSION_KEY]
assert dims == list(expected[var].dims)

with xr.decode_cf(store):
# make sure it is hidden
for var in expected.variables:
for var in expected.variables.keys():
assert self.DIMENSION_KEY not in expected[var].attrs

# put it back and try removing from a variable
Expand Down Expand Up @@ -3731,7 +3731,7 @@ def test_chunk_key_encoding_v2(self) -> None:

# Verify the chunk keys in store use the slash separator
if not has_zarr_v3:
chunk_keys = [k for k in store if k.startswith("var1/")]
chunk_keys = [k for k in store.keys() if k.startswith("var1/")]
assert len(chunk_keys) > 0
for key in chunk_keys:
assert "/" in key
Expand Down
22 changes: 11 additions & 11 deletions xarray/tests/test_backends_datatree.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def assert_chunks_equal(
and node1.variables[name].chunksizes == node2.variables[name].chunksizes
)
for path, (node1, node2) in xr.group_subtrees(actual, expected)
for name in node1.variables
for name in node1.variables.keys()
}

assert all(comparison.values()), diff_chunks(comparison, actual, expected)
Expand Down Expand Up @@ -312,9 +312,9 @@ def test_open_groups(self, unaligned_datatree_nc) -> None:
unaligned_dict_of_datasets = open_groups(unaligned_datatree_nc)

# Check that group names are keys in the dictionary of `xr.Datasets`
assert "/" in unaligned_dict_of_datasets
assert "/Group1" in unaligned_dict_of_datasets
assert "/Group1/subgroup1" in unaligned_dict_of_datasets
assert "/" in unaligned_dict_of_datasets.keys()
assert "/Group1" in unaligned_dict_of_datasets.keys()
assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys()
# Check that group name returns the correct datasets
with xr.open_dataset(unaligned_datatree_nc, group="/") as expected:
assert_identical(unaligned_dict_of_datasets["/"], expected)
Expand Down Expand Up @@ -453,9 +453,9 @@ def test_open_groups(self, url=unaligned_datatree_url) -> None:
unaligned_dict_of_datasets = open_groups(url, engine=self.engine)

# Check that group names are keys in the dictionary of `xr.Datasets`
assert "/" in unaligned_dict_of_datasets
assert "/Group1" in unaligned_dict_of_datasets
assert "/Group1/subgroup1" in unaligned_dict_of_datasets
assert "/" in unaligned_dict_of_datasets.keys()
assert "/Group1" in unaligned_dict_of_datasets.keys()
assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys()
# Check that group name returns the correct datasets
with xr.open_dataset(url, engine=self.engine, group="/") as expected:
assert_identical(unaligned_dict_of_datasets["/"], expected)
Expand Down Expand Up @@ -782,10 +782,10 @@ def test_open_groups(self, unaligned_datatree_zarr_factory, zarr_format) -> None
storepath = unaligned_datatree_zarr_factory(zarr_format=zarr_format)
unaligned_dict_of_datasets = open_groups(storepath, engine="zarr")

assert "/" in unaligned_dict_of_datasets
assert "/Group1" in unaligned_dict_of_datasets
assert "/Group1/subgroup1" in unaligned_dict_of_datasets
assert "/Group2" in unaligned_dict_of_datasets
assert "/" in unaligned_dict_of_datasets.keys()
assert "/Group1" in unaligned_dict_of_datasets.keys()
assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys()
assert "/Group2" in unaligned_dict_of_datasets.keys()
# Check that group name returns the correct datasets
with xr.open_dataset(storepath, group="/", engine="zarr") as expected:
assert_identical(unaligned_dict_of_datasets["/"], expected)
Expand Down
4 changes: 2 additions & 2 deletions xarray/tests/test_coding.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ def test_coder_roundtrip() -> None:
assert_identical(original, roundtripped)


@pytest.mark.parametrize("dtype", ["u1", "u2", "i1", "i2", "f2", "f4"])
@pytest.mark.parametrize("dtype2", ["f4", "f8"])
@pytest.mark.parametrize("dtype", "u1 u2 i1 i2 f2 f4".split())
@pytest.mark.parametrize("dtype2", "f4 f8".split())
def test_scaling_converts_to_float(dtype: str, dtype2: str) -> None:
dt = np.dtype(dtype2)
original = xr.Variable(
Expand Down
4 changes: 2 additions & 2 deletions xarray/tests/test_combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@

def assert_combined_tile_ids_equal(dict1, dict2):
assert len(dict1) == len(dict2)
for k in dict1:
assert k in dict2
for k in dict1.keys():
assert k in dict2.keys()
assert_equal(dict1[k], dict2[k])


Expand Down
2 changes: 1 addition & 1 deletion xarray/tests/test_dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -1636,7 +1636,7 @@ def test_normalize_token_with_backend(map_ds):
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
map_ds.to_netcdf(tmp_file)
read = xr.open_dataset(tmp_file)
assert dask.base.tokenize(map_ds) != dask.base.tokenize(read)
assert not dask.base.tokenize(map_ds) == dask.base.tokenize(read)
read.close()


Expand Down
Loading
Loading