Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][C416][C417] rewrite unnecessary comprehension with function call and use generator instead of map #52140

Merged
merged 10 commits into from
Mar 30, 2023
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def find_arch_range(min_arch, max_arch):


def find_max_arch(arch):
arch = list(sorted(arch))
arch = sorted(arch)
idx = DEFAULT_ARCH.index(arch[-1])
if idx == len(DEFAULT_ARCH) - 1:
return MAX_ARCH
Expand Down
18 changes: 3 additions & 15 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,21 +33,7 @@ select = [
"F401",

# Comprehensions
"C400",
"C401",
"C402",
"C403",
"C404",
"C405",
"C408",
"C409",
"C410",
"C411",
# "C413",
# "C414",
# "C415",
# "C416",
# "C417",
enkilee marked this conversation as resolved.
Show resolved Hide resolved
"C4",

# Pyupgrade
"UP001",
Expand Down Expand Up @@ -174,5 +160,7 @@ unfixable = [
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py" = ["UP034"]
# Ignore version check in setup.py
"setup.py" = ["UP036"]
# Ignore unnecessary comprehension in dy2st unittest test_loop
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py" = ["C416"]
# Ignore unnecessary lambda in dy2st unittest test_lambda
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"]
4 changes: 2 additions & 2 deletions python/paddle/distributed/auto_parallel/dist_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ def save_inference_model(self, path, feed_vars, fetch_vars, exe, **kwargs):
global_block = dist_main_prog.global_block()

ops = global_block.ops
feed_vars_names = list(map(lambda x: x.name, feed_vars))
fetch_vars_names = list(map(lambda x: x.name, fetch_vars))
feed_vars_names = [x.name for x in feed_vars]
fetch_vars_names = [x.name for x in fetch_vars]

last_idx = -1
for idx, op in enumerate(ops):
Expand Down
14 changes: 7 additions & 7 deletions python/paddle/distributed/auto_parallel/dist_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _validate_sizes_and_dist_attr(
):
if not (
isinstance(sizes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, sizes))
and all(isinstance(x, int) and x >= 0 for x in sizes)
):
raise ValueError(
"The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format(
Expand All @@ -48,7 +48,7 @@ def _validate_sizes_and_dist_attr(
)
if not (
isinstance(dims_mapping, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= -1, dims_mapping))
and all(isinstance(x, int) and x >= -1 for x in dims_mapping)
):
raise ValueError(
"The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format(
Expand All @@ -57,7 +57,7 @@ def _validate_sizes_and_dist_attr(
)
if not (
isinstance(processes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, processes))
and all(isinstance(x, int) and x >= 0 for x in processes)
):
raise ValueError(
"The processes must be list or tuple and item in processes must be integer, but got {}".format(
Expand All @@ -66,7 +66,7 @@ def _validate_sizes_and_dist_attr(
)
if not (
isinstance(topology, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x > 0, topology))
and all(isinstance(x, int) and x > 0 for x in topology)
):
raise ValueError(
"The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format(
Expand Down Expand Up @@ -162,9 +162,9 @@ def get_local_shard(
len(local_sizes), len(local_offsets)
)

local_end_offsets = list(
map(lambda x: x[0] + x[1], zip(local_offsets, local_sizes))
)
local_end_offsets = [
x[0] + x[1] for x in zip(local_offsets, local_sizes)
]
local_shard = list(zip(local_offsets, local_end_offsets))
return local_shard

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def enum_valid_dist_attr_for_program(
vars = program.global_block().vars

processes = reduce(lambda x, y: x * y, process_mesh_topology)
global_group = [i for i in range(processes)]
global_group = list(range(processes))
global_process_mesh = None
pipeline_process_meshes = None

Expand Down
42 changes: 18 additions & 24 deletions python/paddle/distributed/auto_parallel/reshard.py
Original file line number Diff line number Diff line change
Expand Up @@ -1340,15 +1340,13 @@ def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
if op_input:
op_input_dims_mapping = dist_attr[1]
if all(
map(
lambda x: x,
[
tensor_dims_mapping,
tensor_process_mesh,
op_input_dims_mapping,
op_process_mesh,
],
)
x
for x in [
tensor_dims_mapping,
tensor_process_mesh,
op_input_dims_mapping,
op_process_mesh,
]
):
# judge whether need reshard by dims_mapping
if tensor_dims_mapping != op_input_dims_mapping:
Expand Down Expand Up @@ -1379,15 +1377,13 @@ def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
else:
op_output_dims_mapping = dist_attr[1]
if all(
map(
lambda x: x,
[
tensor_dims_mapping,
tensor_process_mesh,
op_output_dims_mapping,
op_process_mesh,
],
)
x
for x in [
tensor_dims_mapping,
tensor_process_mesh,
op_output_dims_mapping,
op_process_mesh,
]
):
if tensor_dims_mapping != op_output_dims_mapping:
raise ValueError(
Expand Down Expand Up @@ -1554,7 +1550,7 @@ def find_op_desc_seq(self, dist_tensor, dist_attr, serial=False):
i += 1

if i == len(has_used):
has_used = list(map(lambda x: False, has_used))
has_used = [False for x in has_used]
to_send_process = process_list[0]
has_used[0] = True
assert (
Expand Down Expand Up @@ -1744,11 +1740,9 @@ def parse_op_desc(
if isinstance(op_desc, AllGatherOpDesc): # noqa: F401
if var_name not in self.has_allgather.keys():
self.has_allgather[var_name] = []
if not self.has_allgather[
var_name
] or op_desc.group not in list(
map(lambda x: x[0], self.has_allgather[var_name])
):
if not self.has_allgather[var_name] or op_desc.group not in [
x[0] for x in self.has_allgather[var_name]
]:
if op_desc.is_bool:
# for bool data allgather, cast to int64 -> allgather -> cast bool
out_cast = Inserter.insert_cast_op(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def _generate_dims_mapping_candidates(
return self._cached_dims_mapping_candidates[key]
candidates = []
dims_mapping = [-1 for i in range(dims_mapping_len)]
dims_list = [i for i in range(process_mesh_len)]
dims_list = list(range(process_mesh_len))
visited = [False for i in range(process_mesh_len)]
self._generate_dims_mapping_candidates_helper(
dims_mapping, dims_list, 0, visited, candidates
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1631,13 +1631,12 @@ def _complete_sub_update_program(self, sub_program_dist_context):
Most of the logic is the same as the update completion in the completer.
"""
world_ranks = ProcessMesh(
[
i
for i in range(
list(
range(
self._cluster.get_num_machines()
* self._cluster._num_devices_per_machine
)
]
)
)
dist_tensors = sub_program_dist_context._dist_tensors_for_program

Expand Down Expand Up @@ -1958,10 +1957,9 @@ def prepare(self):
self.device_meshes_list.append([])
for device_mesh in device_meshes:
devices = reduce(lambda x, y: x * y, device_mesh)
processes = [
i
for i in range(has_used_devices, has_used_devices + devices)
]
processes = list(
range(has_used_devices, has_used_devices + devices)
)
device_mesh_shape = (
device_mesh
if device_mesh[0] != 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def get_state(self):
{"class_name": v.__class__.__name__, "state": v.get_state()}
for v in self._variables.values()
],
"values": {k: v for (k, v) in self.values.items()},
"values": dict(self.values.items()),
}

@classmethod
Expand All @@ -126,7 +126,7 @@ def from_state(cls, state):
for v in state["variables"]:
v = _deserialize_tunable_variable(v)
ts._variables[v.name] = v
ts._values = {k: v for (k, v) in state["values"].items()}
ts._values = dict(state["values"].items())
return ts


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def __init__(self, name, values, default=None):
default = bool(default)
else:
self._is_unknown_type = True
self._indices = [i for i in range(len(values))]
self._indices = list(range(len(values)))
self.values = values

if default is not None and default not in values:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1684,7 +1684,7 @@ def _compute_runtime(op_cost, op, vars):
shape = info[
shape_left_boundary + 1 : shape_right_boundary
].split(",")
shape = list(map(lambda x: int(x.strip()), shape))
shape = [int(x.strip()) for x in shape]
dtype_factor = 1
total_static_input_size += reduce(lambda x, y: x * y, shape)
if op.type == "c_embedding":
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/cloud_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,7 @@ def get_cloud_cluster(args_node_ips, args_node_ip, args_port, selected_devices):

if started_port is None:
started_port = 6170
ports = [
x for x in range(started_port, started_port + len(selected_devices))
]
ports = list(range(started_port, started_port + len(selected_devices)))
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/fleet/ascend_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,7 @@ def get_cloud_cluster(
device_count = 1

devices_per_proc = [str(x) for x in range(device_count)]
free_ports = [
x for x in range(start_port, start_port + len(devices_per_proc))
]
free_ports = list(range(start_port, start_port + len(devices_per_proc)))

trainer_endpoints = []
for ip in node_ips:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/util_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ def _proto_check(self, config):
if paddle.static.io.is_persistable(v)
]
pruned_vars = OrderedDict(pruned_vars)
pruned_vars_name = [name for name in pruned_vars]
pruned_vars_name = list(pruned_vars)
print("persistable vars in pruned program: {}".format(pruned_vars_name))

# feed and fetch op is added in pruned program when pruning, not need to be found in train program
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/fleet/cloud_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,7 @@ def get_cloud_cluster(

if started_port is None:
started_port = 6170
ports = [
x for x in range(started_port, started_port + len(devices_per_proc))
]
ports = list(range(started_port, started_port + len(devices_per_proc)))
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/elastic/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ def _host_to_endpoints(
ip = endpoints
port = start_port

ports = [x for x in range(port, port + len(devices_per_proc))]
ports = list(range(port, port + len(devices_per_proc)))
endpoint_list.extend(["%s:%d" % (ip, port) for port in ports])

dist_endpoints = ','.join(endpoint_list)
Expand All @@ -360,7 +360,7 @@ def exit(self, completed=False):
self.etcd.cancel_watch(watch)
self.etcd.delete(self.host_path)

hosts = [i for i in self.etcd.get_prefix(self.node_prefix)]
hosts = list(self.etcd.get_prefix(self.node_prefix))
if len(hosts) == 0:
self.etcd.delete_prefix(self.prefix)

Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/fleet/launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,9 +314,7 @@ def get_cluster_from_args(args, device_mode, devices_per_proc):
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))

free_ports = [
x for x in range(start_port, start_port + len(devices_per_proc))
]
free_ports = list(range(start_port, start_port + len(devices_per_proc)))

trainer_endpoints = []
for ip in node_ips:
Expand Down
38 changes: 13 additions & 25 deletions python/paddle/distributed/fleet/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -951,7 +951,7 @@ def get_device_proc_info(args):
if args.nproc_per_node is None:
devices_per_proc = [0]
else:
devices_per_proc = [x for x in range(0, args.nproc_per_node)]
devices_per_proc = list(range(0, args.nproc_per_node))
else:
raise AssertionError(
"Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
Expand Down Expand Up @@ -1107,20 +1107,14 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
else:
free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
Expand Down Expand Up @@ -1250,20 +1244,14 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
else:
free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def parse_program(
HcomGroupConfig(
name="hcom_group_0",
nranks=fleet.world_size(),
rank_ids=[x for x in range(fleet.world_size())],
rank_ids=list(range(fleet.world_size())),
)
)

Expand Down
Loading