Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

STY: manual fixes for newly flagged violations of UP031 #5064

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions yt/data_objects/construction_data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2591,7 +2591,7 @@ def _export_ply(
]
f.write(b"ply\n")
f.write(b"format binary_little_endian 1.0\n")
line = "element vertex %i\n" % (nv)
line = f"element vertex {nv}\n"
f.write(line.encode("latin-1"))
f.write(b"property float x\n")
f.write(b"property float y\n")
Expand All @@ -2612,7 +2612,7 @@ def _export_ply(
)
else:
v = np.empty(self.vertices.shape[1], dtype=vs[:3])
line = "element face %i\n" % (nv / 3)
line = f"element face {nv/3}\n"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be explicitly specified as follows?

Suggested change
line = f"element face {nv/3}\n"
line = f"element face {nv/3:.0f}\n"

Or

Suggested change
line = f"element face {nv/3}\n"
line = f"element face {int(nv/3)}\n"

nv/3 may indeed be a float rather than an int.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice catch. I think your second suggestion is more in line with the original. Thanks !

f.write(line.encode("latin-1"))
f.write(b"property list uchar int vertex_indices\n")
if color_field is not None and sample_type == "face":
Expand Down Expand Up @@ -2751,9 +2751,9 @@ def export_sketchfab(
# to a file.
if self.vertices.shape[1] > 1e7:
tfi = 0
fn = "temp_model_%03i.ply" % tfi
fn = f"temp_model_{tfi:03}.ply"
while os.path.exists(fn):
fn = "temp_model_%03i.ply" % tfi
fn = f"temp_model_{tfi:03}.ply"
tfi += 1
open(fn, "wb").write(ply_file.read())
raise YTTooManyVertices(self.vertices.shape[1], fn)
Expand Down
4 changes: 2 additions & 2 deletions yt/data_objects/index_subobjects/unstructured_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def _check_consistency(self):
mylog.debug("Connectivity is consistent.")

def __repr__(self):
return "UnstructuredMesh_%04i" % (self.mesh_id)
return f"UnstructuredMesh_{self.mesh_id:04}"

def get_global_startindex(self):
"""
Expand Down Expand Up @@ -151,7 +151,7 @@ class SemiStructuredMesh(UnstructuredMesh):
_container_fields = ("dx", "dy", "dz")

def __repr__(self):
return "SemiStructuredMesh_%04i" % (self.mesh_id)
return f"SemiStructuredMesh_{self.mesh_id:04}"

def _generate_container_field(self, field):
if self._current_chunk is None:
Expand Down
2 changes: 1 addition & 1 deletion yt/data_objects/level_sets/clump_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def save_as_dataset(self, filename=None, fields=None):
"""

ds = self.data.ds
keyword = "%s_clump_%d" % (str(ds), self.clump_id)
keyword = f"{ds}_clump_{self.clump_id}"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here, isn't f"{self.clump_id}" != "%d" % self.clump_id if the latter isn't an int?

filename = get_output_filename(filename, keyword, ".h5")

# collect clump info fields
Expand Down
4 changes: 2 additions & 2 deletions yt/data_objects/particle_trajectories.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ def trajectory_from_index(self, index):
"""
mask = np.isin(self.indices, (index,), assume_unique=True)
if not np.any(mask):
print("The particle index %d is not in the list!" % (index))
print(f"The particle index {index} is not in the list!")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i know you're going for a 1:1 refactor here... but what about moving this string to the actual IndexError below?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good idea, but I'd prefer to do it in a follow up PR. I'm worried about making a breaking change for anyone and unintentionally hiding it behind a giant refactor.

raise IndexError
fields = sorted(self.field_data.keys())
traj = {}
Expand Down Expand Up @@ -375,7 +375,7 @@ def write_out(self, filename_base):
[self.times[it]] + [self[field][ix, it] for field in fields]
)
)
fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
fid = open(f"{filename_base}_{self.indices[ix]}.dat", "w")
fid.writelines(outlines)
fid.close()
del fid
Expand Down
2 changes: 1 addition & 1 deletion yt/data_objects/region_expression.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def _create_slice(self, slice_tuple):
dim = self.ds.dimensionality
if dim < 2:
raise ValueError(
"Can not create a slice from data with dimensionality '%d'" % dim
f"Can not create a slice from data with dimensionality '{dim}'"
)
if dim == 2:
coord = self.ds.domain_center[2]
Expand Down
4 changes: 2 additions & 2 deletions yt/data_objects/selection_objects/data_selection_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ def __init__(self, ds, field_parameters, data_source=None):
if data_source._dimensionality < self._dimensionality:
raise RuntimeError(
"Attempted to construct a DataContainer with a data_source "
"of lower dimensionality (%u vs %u)"
% (data_source._dimensionality, self._dimensionality)
"of lower dimensionality "
f"({data_source._dimensionality} vs {self._dimensionality})"
)
self.field_parameters.update(data_source.field_parameters)
self.quantities = DerivedQuantityCollection(self)
Expand Down
6 changes: 3 additions & 3 deletions yt/fields/interpolated_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ def add_interpolated_field(

if len(axes_fields) != len(axes_data) or len(axes_fields) != len(table_data.shape):
raise RuntimeError(
"Data dimension mismatch: data is %d, "
"%d axes data provided, and %d axes fields provided."
% (len(table_data.shape), len(axes_data), len(axes_fields))
f"Data dimension mismatch: data is {len(table_data.shape)}, "
f"{len(axes_data)} axes data provided, "
f"and {len(axes_fields)} axes fields provided."
)

int_class = _int_class[len(table_data.shape)]
Expand Down
4 changes: 2 additions & 2 deletions yt/fields/xray_emission_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@


def _get_data_file(table_type, data_dir=None):
data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
data_file = f"{table_type}_emissivity_v{data_version[table_type]}.h5"
if data_dir is None:
supp_data_dir = ytcfg.get("yt", "supp_data_dir")
data_dir = supp_data_dir if os.path.exists(supp_data_dir) else "."
Expand All @@ -43,7 +43,7 @@ def __str__(self):

class ObsoleteDataException(YTException):
def __init__(self, table_type):
data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
data_file = f"{table_type}_emissivity_v{data_version[table_type]}.h5"
self.msg = "X-ray emissivity data is out of date.\n"
self.msg += f"Download the latest data from {data_url}/{data_file}."

Expand Down
2 changes: 1 addition & 1 deletion yt/frontends/adaptahop/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ def __init__(self, ptype, particle_identifier, parent_ds, halo_ds):
super().__init__(parent_ds, {})

def __repr__(self):
return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier)
return f"{self.ds}_{self.ptype}_{self.particle_identifier:09}"

def __getitem__(self, key):
return self.region[key]
Expand Down
12 changes: 5 additions & 7 deletions yt/frontends/amrex/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,9 +196,7 @@ def _generate_particle_fields(self, extra_field_names):
self.known_int_fields = self.known_int_fields[0 : self.num_int_base]

# these are extra integer fields
extra_int_fields = [
"particle_int_comp%d" % i for i in range(self.num_int_extra)
]
extra_int_fields = [f"particle_int_comp{i}" for i in range(self.num_int_extra)]
self.known_int_fields.extend(
[(self.particle_type, field) for field in extra_int_fields]
)
Expand All @@ -216,7 +214,7 @@ def _generate_particle_fields(self, extra_field_names):
assert len(extra_field_names) == self.num_real_extra
else:
extra_field_names = [
"particle_real_comp%d" % i for i in range(self.num_real_extra)
f"particle_real_comp{i}" for i in range(self.num_real_extra)
]

self.known_real_fields.extend(
Expand Down Expand Up @@ -1478,7 +1476,7 @@ def __init__(self, header_fn):
if len(line) == 1:
line = f.readline()
continue
self.data["species_%d" % i] = [float(val) for val in line]
self.data[f"species_{i}"] = [float(val) for val in line]
i = i + 1
line = f.readline()

Expand All @@ -1497,8 +1495,8 @@ def __init__(self, ds, dataset_type="boxlib_native"):
for key, val in self.warpx_header.data.items():
if key.startswith("species_"):
i = int(key.split("_")[-1])
charge_name = "particle%.1d_charge" % i
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is a weird one. As far as I understand %.1d is accepted but meaningless, and simply equivalent to %d.

mass_name = "particle%.1d_mass" % i
charge_name = f"particle{i}_charge"
mass_name = f"particle{i}_mass"
self.parameters[charge_name] = val[0]
self.parameters[mass_name] = val[1]

Expand Down
22 changes: 11 additions & 11 deletions yt/frontends/amrvac/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def _velocity(field, data, idir, prefix=None):
# used to generalize to dust fields
if prefix is None:
prefix = ""
moment = data["gas", "%smoment_%d" % (prefix, idir)]
moment = data["gas", f"{prefix}moment_{idir}"]
rho = data["gas", f"{prefix}density"]

mask1 = rho == 0
Expand All @@ -59,12 +59,12 @@ class AMRVACFieldInfo(FieldInfoContainer):
# for now, define a finite family of dust fields (up to 100 species)
MAXN_DUST_SPECIES = 100
known_dust_fields = [
("rhod%d" % idust, (code_density, ["dust%d_density" % idust], None))
(f"rhod{idust}", (code_density, [f"dust{idust}_density"], None))
for idust in range(1, MAXN_DUST_SPECIES + 1)
] + [
(
"m%dd%d" % (idir, idust),
(code_moment, ["dust%d_moment_%d" % (idust, idir)], None),
f"m{idir}d{idust}",
(code_moment, [f"dust{idust}_moment_{idir}"], None),
)
for idust in range(1, MAXN_DUST_SPECIES + 1)
for idir in (1, 2, 3)
Expand All @@ -91,12 +91,12 @@ def _setup_velocity_fields(self, idust=None):
if idust is None:
dust_flag = dust_label = ""
else:
dust_flag = "d%d" % idust
dust_label = "dust%d_" % idust
dust_flag = f"d{idust}"
dust_label = f"dust{idust}_"

us = self.ds.unit_system
for idir, alias in enumerate(direction_aliases[self.ds.geometry], start=1):
if ("amrvac", "m%d%s" % (idir, dust_flag)) not in self.field_list:
if ("amrvac", f"m{idir}{dust_flag}") not in self.field_list:
break
velocity_fn = functools.partial(_velocity, idir=idir, prefix=dust_label)
self.add_field(
Expand All @@ -107,20 +107,20 @@ def _setup_velocity_fields(self, idust=None):
sampling_type="cell",
)
self.alias(
("gas", "%svelocity_%d" % (dust_label, idir)),
("gas", f"{dust_label}velocity_{idir}"),
("gas", f"{dust_label}velocity_{alias}"),
units=us["velocity"],
)
self.alias(
("gas", f"{dust_label}moment_{alias}"),
("gas", "%smoment_%d" % (dust_label, idir)),
("gas", f"{dust_label}moment_{idir}"),
units=us["density"] * us["velocity"],
)

def _setup_dust_fields(self):
idust = 1
imax = self.__class__.MAXN_DUST_SPECIES
while ("amrvac", "rhod%d" % idust) in self.field_list:
while ("amrvac", f"rhod{idust}") in self.field_list:
if idust > imax:
mylog.error(
"Only the first %d dust species are currently read by yt. "
Expand All @@ -138,7 +138,7 @@ def _setup_dust_fields(self):
def _total_dust_density(field, data):
tot = np.zeros_like(data["gas", "density"])
for idust in range(1, n_dust_found + 1):
tot += data["dust%d_density" % idust]
tot += data[f"dust{idust}_density"]
return tot

self.add_field(
Expand Down
4 changes: 2 additions & 2 deletions yt/frontends/art/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def _parse_parameter_file(self):
self.parameters["wspecies"] = wspecies[:n]
self.parameters["lspecies"] = lspecies[:n]
for specie in range(n):
self.particle_types.append("specie%i" % specie)
self.particle_types.append(f"specie{specie}")
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe in English, 'species' is actually invariable, but I'm keeping bug-for-bug compatibility in this pure refactor

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the etymology rabbit hole for "specie" is kinda fun. I could be convinced that it's more appropriate to use specie for particle types in a numerical simulation: maybe having a variety of particle types is more similar to minting different types of coins than it is to variations in taxonomical classification. But most likely species was meant here :)

self.particle_types_raw = tuple(self.particle_types)
ls_nonzero = np.diff(lspecies)[: n - 1]
ls_nonzero = np.append(lspecies[0], ls_nonzero)
Expand Down Expand Up @@ -611,7 +611,7 @@ def _parse_parameter_file(self):
else:
particle_header_vals[a1] = arr[:a2]
for specie in range(n):
self.particle_types.append("specie%i" % specie)
self.particle_types.append(f"specie{specie}")
self.particle_types_raw = tuple(self.particle_types)
ls_nonzero = np.diff(lspecies)[: n - 1]
ls_nonzero = np.append(lspecies[0], ls_nonzero)
Expand Down
4 changes: 2 additions & 2 deletions yt/frontends/artio/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,13 +443,13 @@ def _parse_parameter_file(self):
if self.artio_parameters["num_primary_variables"][species] > 0:
self.particle_variables[species].extend(
self.artio_parameters[
"species_%02d_primary_variable_labels" % (species,)
f"species_{species:02}_primary_variable_labels"
]
)
if self.artio_parameters["num_secondary_variables"][species] > 0:
self.particle_variables[species].extend(
self.artio_parameters[
"species_%02d_secondary_variable_labels" % (species,)
f"species_{species:02}_secondary_variable_labels"
]
)

Expand Down
3 changes: 2 additions & 1 deletion yt/frontends/athena/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,8 @@ def _parse_index(self):
gridread["dimensions"][gridread["dimensions"] == 0] = 1
if np.prod(gridread["dimensions"]) != gridread["ncells"]:
mylog.error(
"product of dimensions %i not equal to number of cells %i",
f"product of dimensions {np.prod(grid['dimensions'])} "
f"not equal to number of cells {grid['ncells']}",
chrishavlin marked this conversation as resolved.
Show resolved Hide resolved
np.prod(gridread["dimensions"]),
gridread["ncells"],
)
Expand Down
6 changes: 3 additions & 3 deletions yt/frontends/athena_pp/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ def setup_fluid_fields(self):
# Add velocity fields
vel_prefix = "velocity"
for i, comp in enumerate(self.ds.coordinates.axis_order):
vel_field = ("athena_pp", "vel%d" % (i + 1))
mom_field = ("athena_pp", "mom%d" % (i + 1))
vel_field = ("athena_pp", f"vel{i+1}")
mom_field = ("athena_pp", f"mom{i+1}")
if vel_field in self.field_list:
self.add_output_field(
vel_field, sampling_type="cell", units="code_length/code_time"
Expand Down Expand Up @@ -113,5 +113,5 @@ def _temperature(field, data):
)

setup_magnetic_field_aliases(
self, "athena_pp", ["Bcc%d" % ax for ax in (1, 2, 3)]
self, "athena_pp", [f"Bcc{ax}" for ax in (1, 2, 3)]
)
4 changes: 1 addition & 3 deletions yt/frontends/chombo/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,9 +330,7 @@ def _determine_periodic(self):
is_periodic = np.array([True, True, True])
for dir in range(self.dimensionality):
try:
is_periodic[dir] = self._handle["/level_0"].attrs[
"is_periodic_%d" % dir
]
is_periodic[dir] = self._handle["/level_0"].attrs[f"is_periodic_{dir}"]
except KeyError:
is_periodic[dir] = True
self._periodicity = tuple(is_periodic)
Expand Down
4 changes: 2 additions & 2 deletions yt/frontends/chombo/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def box_size(corners):
num_comp = self._handle.attrs["num_components"]
level = 0
while True:
lname = "level_%i" % level
lname = f"level_{level}"
if lname not in self._handle:
break
boxes = self._handle["level_0"]["boxes"][()]
Expand Down Expand Up @@ -87,7 +87,7 @@ def particle_field_index(self):
return self._particle_field_index

def _read_data(self, grid, field):
lstring = "level_%i" % grid.Level
lstring = f"level_{grid.Level}"
lev = self._handle[lstring]
dims = grid.ActiveDimensions
shape = dims + 2 * self.ghost
Expand Down
2 changes: 1 addition & 1 deletion yt/frontends/eagle/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def _ion_density(field, data):
index = eaglenetwork_ion_lookup[ion]

# Ion to hydrogen number density ratio
ion_chem = data[ftype, "Chemistry_%03i" % index]
ion_chem = data[ftype, f"Chemistry_{index:03}"]

# Mass of a single ion
if ion[0:2].isalpha():
Expand Down
4 changes: 2 additions & 2 deletions yt/frontends/enzo/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,8 +574,8 @@ def _parse_index(self):
self.grids = np.empty(len(grids), dtype="object")
for i, grid in enumerate(grids):
if (i % 1e4) == 0:
mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid.filename = "Inline_processor_%07i" % (self.grid_procs[i, 0])
mylog.debug(f"Prepared {i:>7} / {self.num_grids:>7} grids")
chrishavlin marked this conversation as resolved.
Show resolved Hide resolved
grid.filename = f"Inline_processor_{self.grid_procs[i, 0]:07}"
grid._prepare_grid()
grid._setup_dx()
grid.proc_num = self.grid_procs[i, 0]
Expand Down
Loading
Loading