diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index d2b31524b062f..63d4a8e9bb25b 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -4546,7 +4546,7 @@ def adaptive_log_softmax_with_loss( output = paddle.zeros([batch_size], dtype=input.dtype) gather_inds = paddle.empty([batch_size], dtype=label.dtype) - cutoff_values = [0] + cutoffs + cutoff_values = [0, *cutoffs] for i in range(len(cutoff_values) - 1): low_idx = cutoff_values[i] high_idx = cutoff_values[i + 1] diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index cb6111e9cddaa..72df74b50e07e 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -183,9 +183,9 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): def _expand_low_nd_padding(padding): # 1d to 2d fake input if len(padding) == 2: - padding = [0] * 2 + padding + padding = [0, 0, *padding] elif len(padding) == 1: - padding = [0] + padding + padding = [0, *padding] else: raise ValueError( f"The size of padding's dimension should be 1 or 2. But got padding={padding}" @@ -252,12 +252,12 @@ def avg_pool1d( _check_input(x, 3) x = unsqueeze(x, [2]) kernel_size = convert_to_list(kernel_size, 1, 'kernel_size') - kernel_size = [1] + kernel_size + kernel_size = [1, *kernel_size] if stride is None: stride = kernel_size else: stride = convert_to_list(stride, 1, 'pool_stride') - stride = [1] + stride + stride = [1, *stride] _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(stride, "stride", min_limit=1e-3) @@ -630,11 +630,11 @@ def max_pool1d( data_format = "NCHW" _check_input(x, 3) x = unsqueeze(x, [2]) - kernel_size = [1] + convert_to_list(kernel_size, 1, 'pool_size') + kernel_size = [1, *convert_to_list(kernel_size, 1, "pool_size")] if stride is None: stride = kernel_size else: - stride = [1] + convert_to_list(stride, 1, 'pool_stride') + stride = [1, *convert_to_list(stride, 1, "pool_stride")] padding, padding_algorithm = _update_padding_nd( padding, 1, ceil_mode=ceil_mode @@ -825,11 +825,11 @@ def max_unpool1d( data_format = "NCHW" x = unsqueeze(x, [2]) indices = unsqueeze(indices, [2]) - kernel_size = [1] + convert_to_list(kernel_size, 1, 'pool_size') + kernel_size = [1, *convert_to_list(kernel_size, 1, "pool_size")] if stride is None: stride = kernel_size else: - stride = [1] + convert_to_list(stride, 1, 'pool_stride') + stride = [1, *convert_to_list(stride, 1, 'pool_stride')] padding, padding_algorithm = _update_padding_nd(padding, 1) # use 2d to implenment 1d should expand padding in advance. padding = _expand_low_nd_padding(padding) @@ -1477,7 +1477,7 @@ def adaptive_avg_pool1d( """ pool_type = 'avg' _check_input(x, 3) - pool_size = [1] + convert_to_list(output_size, 1, 'pool_size') + pool_size = [1, *convert_to_list(output_size, 1, "pool_size")] x = unsqueeze(x, [2]) if in_dynamic_or_pir_mode(): @@ -1847,7 +1847,7 @@ def adaptive_max_pool1d( """ _check_input(x, 3) - pool_size = [1] + convert_to_list(output_size, 1, 'pool_size') + pool_size = [1, *convert_to_list(output_size, 1, "pool_size")] x = unsqueeze(x, [2]) if in_dynamic_or_pir_mode(): @@ -2470,12 +2470,12 @@ def lp_pool1d( _check_input(x, 3) x = unsqueeze(x, [axis]) kernel_size = convert_to_list(kernel_size, 1, 'kernel_size') - kernel_size = [1] + kernel_size + kernel_size = [1, *kernel_size] if stride is None: stride = kernel_size else: stride = convert_to_list(stride, 1, 'pool_stride') - stride = [1] + stride + stride = [1, *stride] _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(stride, "stride", min_limit=1e-3) diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index 4f1c678ee2267..ad82cef0750ec 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -148,7 +148,8 @@ def __init__( filter_shape = [ self._in_channels, out_channels // groups, - ] + self._kernel_size + *self._kernel_size, + ] else: if in_channels % groups != 0: raise ValueError("in_channels must be divisible by groups.") @@ -167,7 +168,8 @@ def __init__( filter_shape = [ out_channels, in_channels // groups, - ] + self._kernel_size + *self._kernel_size, + ] def _get_default_param_initializer(): if transposed: diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 14846d7d4c2fb..9290f7f6c9ede 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -2530,7 +2530,7 @@ def __init__( self.in_features = in_features self.n_classes = n_classes - self.cutoffs = cutoffs + [n_classes] + self.cutoffs = [*cutoffs, n_classes] self.div_value = div_value self._weight_attr = weight_attr self._bias_attr = bias_attr diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index e26613a83c125..7e89a23f86ad7 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -169,7 +169,7 @@ def _maybe_copy(state: Tensor, new_state: Tensor, step_mask: Tensor) -> Tensor: def _transpose_batch_time(x: Tensor) -> Tensor: - perm = [1, 0] + list(range(2, len(x.shape))) + perm = [1, 0, *list(range(2, len(x.shape)))] return paddle.transpose(x, perm) @@ -650,7 +650,7 @@ def _is_shape_sequence(seq): class Shape: def __init__(self, shape): self.shape = ( - list(shape) if shape[0] == -1 else ([-1] + list(shape)) + list(shape) if shape[0] == -1 else ([-1, *list(shape)]) ) # nested structure of shapes diff --git a/python/paddle/optimizer/asgd.py b/python/paddle/optimizer/asgd.py index cd6b3b15e5bea..3f8d25b6857c9 100644 --- a/python/paddle/optimizer/asgd.py +++ b/python/paddle/optimizer/asgd.py @@ -176,7 +176,7 @@ def _create_accumulators(self, block, parameters): p_new, p.dtype, 0, - [self._n] + list(p.shape), + [self._n, *list(p.shape)], ) self._add_accumulator( diff --git a/python/paddle/quantization/imperative/qat.py b/python/paddle/quantization/imperative/qat.py index 7ffe86f9a23e2..d103bf937c091 100644 --- a/python/paddle/quantization/imperative/qat.py +++ b/python/paddle/quantization/imperative/qat.py @@ -658,8 +658,9 @@ def _gather_scales(self, program, scope, fetch_targets): def _gather_input_scale(): target_ops = [] - skip_ops = utils.fake_quantize_dequantize_op_types + [ - "moving_average_abs_max_scale" + skip_ops = [ + *utils.fake_quantize_dequantize_op_types, + "moving_average_abs_max_scale", ] for block in program.blocks: for op in block.ops: diff --git a/python/paddle/sparse/nn/layer/conv.py b/python/paddle/sparse/nn/layer/conv.py index 0f4126ae35a10..99b22c2188279 100644 --- a/python/paddle/sparse/nn/layer/conv.py +++ b/python/paddle/sparse/nn/layer/conv.py @@ -104,7 +104,8 @@ def __init__( ) # the sparse conv restricts the shape is [D, H, W, in_channels, out_channels] - filter_shape = self._kernel_size + [ + filter_shape = [ + *self._kernel_size, self._in_channels, self._out_channels, ] @@ -235,7 +236,8 @@ def __init__( ) # the sparse conv restricts the shape is [H, W, in_channels, out_channels] - filter_shape = self._kernel_size + [ + filter_shape = [ + *self._kernel_size, self._in_channels, self._out_channels, ] diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 763608bec3ce6..be21dbcf7e7c4 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -341,7 +341,7 @@ def batch(self, batch_size: int | Size1) -> Self: f"type(batch_size) shall be `int`, but received {type(batch_size).__name__}." ) - new_shape = [batch_size] + list(self.shape) + new_shape = [batch_size, *list(self.shape)] self.shape = tuple(new_shape) return self