Skip to content

Commit

Permalink
[CodeStyle][Ruff][BUAA][G-[59-67]] Fix ruff RUF005 diagnostic for 9 f…
Browse files Browse the repository at this point in the history
…iles in `python/paddle` (#67282)
  • Loading branch information
Fripping authored Aug 13, 2024
1 parent 16ffe81 commit 8a8cec5
Show file tree
Hide file tree
Showing 9 changed files with 29 additions and 24 deletions.
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -4546,7 +4546,7 @@ def adaptive_log_softmax_with_loss(
output = paddle.zeros([batch_size], dtype=input.dtype)
gather_inds = paddle.empty([batch_size], dtype=label.dtype)

cutoff_values = [0] + cutoffs
cutoff_values = [0, *cutoffs]
for i in range(len(cutoff_values) - 1):
low_idx = cutoff_values[i]
high_idx = cutoff_values[i + 1]
Expand Down
24 changes: 12 additions & 12 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,9 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
def _expand_low_nd_padding(padding):
# 1d to 2d fake input
if len(padding) == 2:
padding = [0] * 2 + padding
padding = [0, 0, *padding]
elif len(padding) == 1:
padding = [0] + padding
padding = [0, *padding]
else:
raise ValueError(
f"The size of padding's dimension should be 1 or 2. But got padding={padding}"
Expand Down Expand Up @@ -252,12 +252,12 @@ def avg_pool1d(
_check_input(x, 3)
x = unsqueeze(x, [2])
kernel_size = convert_to_list(kernel_size, 1, 'kernel_size')
kernel_size = [1] + kernel_size
kernel_size = [1, *kernel_size]
if stride is None:
stride = kernel_size
else:
stride = convert_to_list(stride, 1, 'pool_stride')
stride = [1] + stride
stride = [1, *stride]

_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
Expand Down Expand Up @@ -630,11 +630,11 @@ def max_pool1d(
data_format = "NCHW"
_check_input(x, 3)
x = unsqueeze(x, [2])
kernel_size = [1] + convert_to_list(kernel_size, 1, 'pool_size')
kernel_size = [1, *convert_to_list(kernel_size, 1, "pool_size")]
if stride is None:
stride = kernel_size
else:
stride = [1] + convert_to_list(stride, 1, 'pool_stride')
stride = [1, *convert_to_list(stride, 1, "pool_stride")]

padding, padding_algorithm = _update_padding_nd(
padding, 1, ceil_mode=ceil_mode
Expand Down Expand Up @@ -825,11 +825,11 @@ def max_unpool1d(
data_format = "NCHW"
x = unsqueeze(x, [2])
indices = unsqueeze(indices, [2])
kernel_size = [1] + convert_to_list(kernel_size, 1, 'pool_size')
kernel_size = [1, *convert_to_list(kernel_size, 1, "pool_size")]
if stride is None:
stride = kernel_size
else:
stride = [1] + convert_to_list(stride, 1, 'pool_stride')
stride = [1, *convert_to_list(stride, 1, 'pool_stride')]
padding, padding_algorithm = _update_padding_nd(padding, 1)
# use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)
Expand Down Expand Up @@ -1477,7 +1477,7 @@ def adaptive_avg_pool1d(
"""
pool_type = 'avg'
_check_input(x, 3)
pool_size = [1] + convert_to_list(output_size, 1, 'pool_size')
pool_size = [1, *convert_to_list(output_size, 1, "pool_size")]

x = unsqueeze(x, [2])
if in_dynamic_or_pir_mode():
Expand Down Expand Up @@ -1847,7 +1847,7 @@ def adaptive_max_pool1d(
"""
_check_input(x, 3)

pool_size = [1] + convert_to_list(output_size, 1, 'pool_size')
pool_size = [1, *convert_to_list(output_size, 1, "pool_size")]

x = unsqueeze(x, [2])
if in_dynamic_or_pir_mode():
Expand Down Expand Up @@ -2470,12 +2470,12 @@ def lp_pool1d(
_check_input(x, 3)
x = unsqueeze(x, [axis])
kernel_size = convert_to_list(kernel_size, 1, 'kernel_size')
kernel_size = [1] + kernel_size
kernel_size = [1, *kernel_size]
if stride is None:
stride = kernel_size
else:
stride = convert_to_list(stride, 1, 'pool_stride')
stride = [1] + stride
stride = [1, *stride]

_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
Expand Down
6 changes: 4 additions & 2 deletions python/paddle/nn/layer/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,8 @@ def __init__(
filter_shape = [
self._in_channels,
out_channels // groups,
] + self._kernel_size
*self._kernel_size,
]
else:
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups.")
Expand All @@ -167,7 +168,8 @@ def __init__(
filter_shape = [
out_channels,
in_channels // groups,
] + self._kernel_size
*self._kernel_size,
]

def _get_default_param_initializer():
if transposed:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/layer/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -2530,7 +2530,7 @@ def __init__(

self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.cutoffs = [*cutoffs, n_classes]
self.div_value = div_value
self._weight_attr = weight_attr
self._bias_attr = bias_attr
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/nn/layer/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def _maybe_copy(state: Tensor, new_state: Tensor, step_mask: Tensor) -> Tensor:


def _transpose_batch_time(x: Tensor) -> Tensor:
perm = [1, 0] + list(range(2, len(x.shape)))
perm = [1, 0, *list(range(2, len(x.shape)))]
return paddle.transpose(x, perm)


Expand Down Expand Up @@ -650,7 +650,7 @@ def _is_shape_sequence(seq):
class Shape:
def __init__(self, shape):
self.shape = (
list(shape) if shape[0] == -1 else ([-1] + list(shape))
list(shape) if shape[0] == -1 else ([-1, *list(shape)])
)

# nested structure of shapes
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/optimizer/asgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def _create_accumulators(self, block, parameters):
p_new,
p.dtype,
0,
[self._n] + list(p.shape),
[self._n, *list(p.shape)],
)

self._add_accumulator(
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/quantization/imperative/qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,8 +658,9 @@ def _gather_scales(self, program, scope, fetch_targets):

def _gather_input_scale():
target_ops = []
skip_ops = utils.fake_quantize_dequantize_op_types + [
"moving_average_abs_max_scale"
skip_ops = [
*utils.fake_quantize_dequantize_op_types,
"moving_average_abs_max_scale",
]
for block in program.blocks:
for op in block.ops:
Expand Down
6 changes: 4 additions & 2 deletions python/paddle/sparse/nn/layer/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ def __init__(
)

# the sparse conv restricts the shape is [D, H, W, in_channels, out_channels]
filter_shape = self._kernel_size + [
filter_shape = [
*self._kernel_size,
self._in_channels,
self._out_channels,
]
Expand Down Expand Up @@ -235,7 +236,8 @@ def __init__(
)

# the sparse conv restricts the shape is [H, W, in_channels, out_channels]
filter_shape = self._kernel_size + [
filter_shape = [
*self._kernel_size,
self._in_channels,
self._out_channels,
]
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/static/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def batch(self, batch_size: int | Size1) -> Self:
f"type(batch_size) shall be `int`, but received {type(batch_size).__name__}."
)

new_shape = [batch_size] + list(self.shape)
new_shape = [batch_size, *list(self.shape)]
self.shape = tuple(new_shape)

return self
Expand Down

0 comments on commit 8a8cec5

Please sign in to comment.