diff --git a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py index 3de8e587eea297..1feb5d80c884ec 100644 --- a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py +++ b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py @@ -143,7 +143,9 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): for i in range(split_count): f.write("nodes" + str(i + 1) + ".cc ") f.write("${fluid_manual_nodes} DEPS ${eager_deps} ${fluid_deps})\n") - f.write("add_dependencies(dygraph_node copy_dygraph_node)\n") + f.write( + "add_dependencies(dygraph_node copy_dygraph_node copy_dygraph_forward_functions)\n" + ) with open(forwards_level_cmakelist_path, "w") as f: f.write("add_custom_target(\n") @@ -181,7 +183,7 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): "${fluid_manual_functions} DEPS ${eager_deps} ${fluid_deps} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})\n" ) f.write( - "add_dependencies(dygraph_function copy_dygraph_forward_functions)\n" + "add_dependencies(dygraph_function copy_dygraph_forward_functions copy_dygraph_node)\n" ) with open(generated_level_cmakelist_path, "w") as f: diff --git a/paddle/fluid/eager/to_static/run_program_op_func.h b/paddle/fluid/eager/to_static/run_program_op_func.h index fad6f7bd31e435..3b5fc14c04901f 100644 --- a/paddle/fluid/eager/to_static/run_program_op_func.h +++ b/paddle/fluid/eager/to_static/run_program_op_func.h @@ -60,11 +60,6 @@ inline void run_program_ad_func( std::vector& step_scope, // NOLINT std::vector& dout, // NOLINT const paddle::framework::AttributeMap& attrs) { - VLOG(2) << "start run run_program"; - // Call forward function - RunProgramAPI(x, params, out, step_scope, dout, attrs); - VLOG(2) << "start run run_program grad"; - // Prepare Autograd Meta auto deref_out = details::DereferenceTensors(out); std::vector p_autograd_x = @@ -78,6 +73,13 @@ inline void run_program_ad_func( bool require_any_grad = egr::EagerUtils::ComputeRequireGrad( trace_backward, &p_autograd_x, &p_autograd_params); + VLOG(2) << "start run run_program with require_any_grad = " + << require_any_grad; + // Call forward function + // if require_any_grad is False, don't save any middle vars. + RunProgramAPI(x, params, out, step_scope, dout, require_any_grad, attrs); + VLOG(2) << "start run run_program grad"; + if (require_any_grad) { egr::EagerUtils::PassStopGradient(false, &p_autograd_outs); // Create GradOpNode (1 means [out_grad], 2 means [x_grad, paramx_grad]) diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 86bad770e812bc..2220be3c876a1d 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -283,6 +283,7 @@ inline void RunProgramAPI( std::vector &out, // NOLINT std::vector &step_scope, // NOLINT std::vector &dout, // NOLINT + bool require_any_grad, const paddle::framework::AttributeMap &attrs) { VLOG(2) << "RunProgramOpKernel Compute"; // In the original run_program OP, the default value of the is_test @@ -436,8 +437,10 @@ inline void RunProgramAPI( VLOG(3) << paddle::framework::GenScopeTreeDebugInfo(out_scope_vec->front()); - if (is_test || !egr::Controller::Instance().HasGrad()) { - VLOG(4) << "is test, set this scope can reused"; + if (is_test || !require_any_grad) { + VLOG(4) << "don't require any grad, set this scope can reused"; + VLOG(4) << "is_test: " << is_test + << ", require_any_grad: " << require_any_grad; global_inner_scope->SetCanReuesd(true); details::GcScope(global_inner_scope); } else { @@ -576,7 +579,8 @@ inline void RunProgramGradAPI( *backward_global_block, global_inner_scope); VLOG(4) << "after backward gc all vars"; - global_inner_scope->SetCanReuesd(true); + global_inner_scope->SetCanReuesd( + false); // can't reuse util call `~GradNodeRunProgram` details::GcScope(global_inner_scope); } } @@ -586,7 +590,15 @@ class GradNodeRunProgram : public egr::GradNodeBase { GradNodeRunProgram(size_t bwd_in_slot_num, size_t bwd_out_slot_num) : egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {} - ~GradNodeRunProgram() override = default; + ~GradNodeRunProgram() { + auto *out_scope_vec = &step_scope_; + // Normally out_scope_vec.size() == 1. for safty, we add for-loop here. + for (size_t i = 0; i < out_scope_vec->size(); ++i) { + paddle::framework::Scope *global_inner_scope = out_scope_vec->at(i); + global_inner_scope->SetCanReuesd(true); // set this to reuse scope. + details::GcScope(global_inner_scope); + } + } // Functor: perform backward computations virtual paddle::small_vector, egr::kSlotSmallVectorSize> diff --git a/paddle/fluid/prim/api/api.yaml b/paddle/fluid/prim/api/api.yaml index 529d024b8b8a08..e463bf791d313b 100644 --- a/paddle/fluid/prim/api/api.yaml +++ b/paddle/fluid/prim/api/api.yaml @@ -2,6 +2,12 @@ - subtract - multiply - divide +- less_equal +- less_than +- equal +- not_equal +- greater_equal +- greater_than - bitwise_and - bitwise_not - bitwise_or @@ -33,3 +39,4 @@ - put_along_axis - greater_than - less_equal +- where diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 304102b733b8c9..ef280e71fa7cb4 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -30,6 +30,18 @@ using Tensor = paddle::Tensor; using IntArray = paddle::experimental::IntArrayBase; // This function should have as same signature as phi, which defined in // paddle/phi/api/backward/backward_api.h +template +void relu_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) { + if (x_grad) { + auto condition = greater_than( + out, full(phi::vectorize(out.dims()), 0.0, out.dtype())); + auto res = where(condition, + out_grad, + full(phi::vectorize(out.dims()), 0.0, out.dtype())); + set_output(res, x_grad); + } +} + template void softmax_grad(const Tensor& out, const Tensor& out_grad, diff --git a/paddle/fluid/prim/tests/CMakeLists.txt b/paddle/fluid/prim/tests/CMakeLists.txt index 07098c92e05230..92845d5bd81c51 100644 --- a/paddle/fluid/prim/tests/CMakeLists.txt +++ b/paddle/fluid/prim/tests/CMakeLists.txt @@ -38,7 +38,8 @@ cc_test_old( static_global_utils static_tensor_operants tensor_api - operants_manager) + operants_manager + generated_static_op) if(NOT (NOT WITH_PYTHON AND ON_INFER)) cc_library( diff --git a/paddle/fluid/prim/tests/test_eager_prim.cc b/paddle/fluid/prim/tests/test_eager_prim.cc index b9c898a74e1827..98061cb0eff7c3 100644 --- a/paddle/fluid/prim/tests/test_eager_prim.cc +++ b/paddle/fluid/prim/tests/test_eager_prim.cc @@ -35,6 +35,12 @@ PD_DECLARE_KERNEL(tanh_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(pow, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(multiply, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_than, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(not_equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_than, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_and, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_or, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_xor, CPU, ALL_LAYOUT); @@ -46,6 +52,12 @@ PD_DECLARE_KERNEL(tanh_grad, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(pow, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(multiply, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_than, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(not_equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_than, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_and, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_or, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_xor, KPS, ALL_LAYOUT); @@ -151,6 +163,50 @@ TEST(EagerPrim, LogicalOperantsTest) { EXPECT_EQ(out0.data()[0], out1.data()[0]); } +TEST(EagerPrim, CompareOperantsTest) { + // 1. Initialized + eager_test::InitEnv(paddle::platform::CPUPlace()); + FLAGS_tensor_operants_mode = "eager"; + paddle::prim::InitTensorOperants(); + // 2. pre + paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); + paddle::Tensor tensor0 = + ::egr::egr_utils_api::CreateTensorWithValue(ddim, + paddle::platform::CPUPlace(), + phi::DataType::INT32, + phi::DataLayout::NCHW, + 1 /*value*/, + true /*is_leaf*/); + ::egr::egr_utils_api::RetainGradForTensor(tensor0); + paddle::Tensor tensor1 = + ::egr::egr_utils_api::CreateTensorWithValue(ddim, + paddle::platform::CPUPlace(), + phi::DataType::INT32, + phi::DataLayout::NCHW, + 0 /*value*/, + true /*is_leaf*/); + ::egr::egr_utils_api::RetainGradForTensor(tensor1); + // 3. Run Forward once + paddle::Tensor out0 = (tensor0 < tensor1); + paddle::Tensor out1 = less_than_ad_func(tensor0, tensor1); + EXPECT_EQ(out0.data()[0], out1.data()[0]); + out0 = (tensor0 <= tensor1); + out1 = less_equal_ad_func(tensor0, tensor1); + EXPECT_EQ(out0.data()[0], out1.data()[0]); + out0 = (tensor0 == tensor1); + out1 = equal_ad_func(tensor0, tensor1); + EXPECT_EQ(out0.data()[0], out1.data()[0]); + out0 = (tensor0 != tensor1); + out1 = not_equal_ad_func(tensor0, tensor1); + EXPECT_EQ(out0.data()[0], out1.data()[0]); + out0 = (tensor0 > tensor1); + out1 = greater_than_ad_func(tensor0, tensor1); + EXPECT_EQ(out0.data()[0], out1.data()[0]); + out0 = (tensor0 >= tensor1); + out1 = greater_equal_ad_func(tensor0, tensor1); + EXPECT_EQ(out0.data()[0], out1.data()[0]); +} + TEST(EagerPrim, TestFlags) { PrimCommonUtils::SetBwdPrimEnabled(true); ASSERT_TRUE(PrimCommonUtils::IsBwdPrimEnabled()); diff --git a/paddle/fluid/prim/tests/test_static_prim.cc b/paddle/fluid/prim/tests/test_static_prim.cc index d687781df20699..1ae7303d10d751 100644 --- a/paddle/fluid/prim/tests/test_static_prim.cc +++ b/paddle/fluid/prim/tests/test_static_prim.cc @@ -38,6 +38,12 @@ PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(subtract, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(multiply, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(concat, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_than, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(not_equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_equal, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_than, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_and, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_or, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_xor, CPU, ALL_LAYOUT); @@ -51,6 +57,12 @@ PD_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(subtract, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(multiply, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(concat, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(less_than, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(not_equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_equal, KPS, ALL_LAYOUT); +PD_DECLARE_KERNEL(greater_than, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_and, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_or, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(bitwise_xor, KPS, ALL_LAYOUT); @@ -429,6 +441,99 @@ TEST(StaticCompositeGradMaker, LogicalOperantsTest) { std::size_t(1)); } +TEST(StaticCompositeGradMaker, CompareOperantsTest) { + // Initialized environment + FLAGS_tensor_operants_mode = "static"; + paddle::OperantsManager::Instance().static_operants.reset( + new paddle::prim::StaticTensorOperants()); + + TestBaseProgram base_program = TestBaseProgram(); + auto* target_block = base_program.GetBlock(0); + std::vector shape = {2, 2}; + StaticCompositeContext::Instance().SetBlock(target_block); + Tensor x0 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x0_name = + std::static_pointer_cast(x0.impl())->Name(); + Tensor x1 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x1_name = + std::static_pointer_cast(x1.impl())->Name(); + Tensor x2 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x2_name = + std::static_pointer_cast(x2.impl())->Name(); + Tensor x3 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x3_name = + std::static_pointer_cast(x3.impl())->Name(); + Tensor x4 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x4_name = + std::static_pointer_cast(x4.impl())->Name(); + Tensor x5 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x5_name = + std::static_pointer_cast(x5.impl())->Name(); + Tensor x6 = prim::empty( + shape, phi::DataType::INT32, phi::CPUPlace()); + std::string x6_name = + std::static_pointer_cast(x6.impl())->Name(); + + Tensor out_less = (x0 < x1); + Tensor out_less_equal = (out_less <= x2); + Tensor out_equal = (out_less_equal == x3); + Tensor out_not_equal = (out_equal != x4); + Tensor out_greater = (out_not_equal > x5); + Tensor out_greater_equal = (out_greater >= x6); + + ASSERT_EQ(target_block->AllOps().size(), static_cast(6)); + ASSERT_EQ(target_block->AllOps()[0]->Type(), "less_than"); + ASSERT_EQ(target_block->AllOps()[0]->Inputs().at("X").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[0]->Inputs().at("X")[0], x0_name); + ASSERT_EQ(target_block->AllOps()[0]->Inputs().at("Y").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[0]->Inputs().at("Y")[0], x1_name); + ASSERT_EQ(target_block->AllOps()[0]->Outputs().at("Out").size(), + std::size_t(1)); + + ASSERT_EQ(target_block->AllOps()[1]->Type(), "less_equal"); + ASSERT_EQ(target_block->AllOps()[1]->Inputs().at("Y").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[1]->Inputs().at("Y")[0], x2_name); + ASSERT_EQ(target_block->AllOps()[1]->Outputs().at("Out").size(), + std::size_t(1)); + + ASSERT_EQ(target_block->AllOps()[2]->Type(), "equal"); + ASSERT_EQ(target_block->AllOps()[2]->Inputs().at("Y").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[2]->Inputs().at("Y")[0], x3_name); + ASSERT_EQ(target_block->AllOps()[2]->Outputs().at("Out").size(), + std::size_t(1)); + + ASSERT_EQ(target_block->AllOps()[3]->Type(), "not_equal"); + ASSERT_EQ(target_block->AllOps()[3]->Inputs().at("Y").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[3]->Inputs().at("Y")[0], x4_name); + ASSERT_EQ(target_block->AllOps()[3]->Outputs().at("Out").size(), + std::size_t(1)); + + ASSERT_EQ(target_block->AllOps()[4]->Type(), "greater_than"); + ASSERT_EQ(target_block->AllOps()[4]->Inputs().at("Y").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[4]->Inputs().at("Y")[0], x5_name); + ASSERT_EQ(target_block->AllOps()[4]->Outputs().at("Out").size(), + std::size_t(1)); + + ASSERT_EQ(target_block->AllOps()[5]->Type(), "greater_equal"); + ASSERT_EQ(target_block->AllOps()[5]->Inputs().at("Y").size(), + static_cast(1)); + ASSERT_EQ(target_block->AllOps()[5]->Inputs().at("Y")[0], x6_name); + ASSERT_EQ(target_block->AllOps()[5]->Outputs().at("Out").size(), + std::size_t(1)); +} + TEST(StaticPrim, TestFlags) { PrimCommonUtils::SetBwdPrimEnabled(true); ASSERT_TRUE(PrimCommonUtils::IsBwdPrimEnabled()); @@ -445,6 +550,12 @@ USE_OP_ITSELF(elementwise_mul); USE_OP_ITSELF(elementwise_sub); USE_OP_ITSELF(elementwise_pow); USE_OP_ITSELF(scale); +USE_OP_ITSELF(less_equal); +USE_OP_ITSELF(less_than); +USE_OP_ITSELF(equal); +USE_OP_ITSELF(not_equal); +USE_OP_ITSELF(greater_equal); +USE_OP_ITSELF(greater_than); USE_OP_ITSELF(bitwise_xor); USE_OP_ITSELF(bitwise_and); USE_OP_ITSELF(bitwise_not); diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 67a607f5880e23..d3943750fd21ef 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -534,29 +534,23 @@ class PADDLE_API Tensor final { * @return Tensor */ Tensor operator+(const Tensor& other) const; - Tensor operator-(const Tensor& other) const; - Tensor operator*(const Tensor& other) const; - Tensor operator/(const Tensor& other) const; - Tensor operator+(const Scalar& other) const; - Tensor operator-(const Scalar& other) const; - Tensor operator*(const Scalar& other) const; - Tensor operator/(const Scalar& other) const; - + Tensor operator<(const Tensor& other) const; + Tensor operator<=(const Tensor& other) const; + Tensor operator==(const Tensor& other) const; + Tensor operator!=(const Tensor& other) const; + Tensor operator>(const Tensor& other) const; + Tensor operator>=(const Tensor& other) const; Tensor operator-() const; - Tensor operator~() const; - Tensor operator&(const Tensor& other) const; - Tensor operator|(const Tensor& other) const; - Tensor operator^(const Tensor& other) const; /* Part 8: Autograd methods */ @@ -678,6 +672,12 @@ class PADDLE_API Tensor final { Tensor divide(const Scalar& y) const; Tensor multiply(const Scalar& y) const; Tensor subtract(const Scalar& y) const; + Tensor less_equal(const Tensor& y) const; + Tensor less_than(const Tensor& y) const; + Tensor equal(const Tensor& y) const; + Tensor not_equal(const Tensor& y) const; + Tensor greater_equal(const Tensor& y) const; + Tensor greater_than(const Tensor& y) const; Tensor bitwise_and(const Tensor& y) const; Tensor bitwise_or(const Tensor& y) const; Tensor bitwise_xor(const Tensor& y) const; diff --git a/paddle/phi/api/include/tensor_utils.h b/paddle/phi/api/include/tensor_utils.h index 43a19a09c14482..3fcd8b2a8de661 100644 --- a/paddle/phi/api/include/tensor_utils.h +++ b/paddle/phi/api/include/tensor_utils.h @@ -33,12 +33,11 @@ using Deleter = std::function; * @param dtype The data type of the tensor, should correspond to data type of * `data`. See PD_FOR_EACH_DATA_TYPE in `phi/common/data_type.h` * @param layout The data layout of the tensor. - * @param place The place where the tensor is located, should correspond to - * place of `data`. - * If `place` use the default value, it will be inferred from - * `data`, However,the feature is only supported on CPU or GPU. - * So make sure that `place` is equal to the place of `data` when - * using other devices. + * @param place The place where the tensor is located. + * If `place` is default value, it will be inferred from `data`, + * However,the feature is only supported on CPU or GPU. + * If `place` is not default value, make sure that `place` is equal + * to the place of `data` * @param deleter A function or function object that will be called to free the * memory buffer. * diff --git a/paddle/phi/api/lib/tensor_utils.cc b/paddle/phi/api/lib/tensor_utils.cc index c083e5be5f1d8f..b8d25e4f22b100 100644 --- a/paddle/phi/api/lib/tensor_utils.cc +++ b/paddle/phi/api/lib/tensor_utils.cc @@ -14,8 +14,6 @@ limitations under the License. */ #include "paddle/phi/api/include/tensor_utils.h" -#include - #include "paddle/phi/api/lib/api_registry.h" #include "paddle/phi/core/dense_tensor.h" @@ -72,8 +70,6 @@ PADDLE_API Tensor from_blob(void* data, phi::errors::InvalidArgument( "shape cannot be constructed from a Tensor.")); - // TODO(huangjiyi): We need copy data to specified place when - // the input place is different with place of data. phi::Place data_place; if (place.GetType() == phi::AllocationType::UNDEFINED || place.GetType() == phi::AllocationType::CPU || diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index cb4c0e085f4537..29b65fa9b3e31e 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -1142,6 +1142,7 @@ kernel : func : relu_grad backward: relu_double_grad + composite: relu_grad(out, out_grad, x_grad) inplace : (out_grad -> x_grad) - backward_op : renorm_grad diff --git a/paddle/phi/api/yaml/generator/tensor_operants_gen.py b/paddle/phi/api/yaml/generator/tensor_operants_gen.py index 5df297bbc950c2..38618bc9a3a027 100644 --- a/paddle/phi/api/yaml/generator/tensor_operants_gen.py +++ b/paddle/phi/api/yaml/generator/tensor_operants_gen.py @@ -144,6 +144,30 @@ class TensorOperantsBase { return paddle::OperantsManager::Instance().subtract(static_cast(*this), y); } +Tensor Tensor::operator<(const Tensor &other) const { + return less_than(other); +} + +Tensor Tensor::operator<=(const Tensor &other) const { + return less_equal(other); +} + +Tensor Tensor::operator==(const Tensor &other) const { + return equal(other); +} + +Tensor Tensor::operator!=(const Tensor &other) const { + return not_equal(other); +} + +Tensor Tensor::operator>(const Tensor &other) const { + return greater_than(other); +} + +Tensor Tensor::operator>=(const Tensor &other) const { + return greater_equal(other); +} + Tensor Tensor::operator-() const { return scale(-1.0, 0.0, true); } diff --git a/paddle/phi/api/yaml/tensor_operants.yaml b/paddle/phi/api/yaml/tensor_operants.yaml index 629408c4f40173..8c0b59fcdc4e45 100644 --- a/paddle/phi/api/yaml/tensor_operants.yaml +++ b/paddle/phi/api/yaml/tensor_operants.yaml @@ -4,6 +4,12 @@ - subtract - multiply - divide +- less_equal +- less_than +- equal +- not_equal +- greater_equal +- greater_than - bitwise_and - bitwise_not - bitwise_or diff --git a/paddle/phi/backends/xpu/xpu_context.cc b/paddle/phi/backends/xpu/xpu_context.cc index eec65d0d6a3eb5..acb8ae8db3b3a3 100644 --- a/paddle/phi/backends/xpu/xpu_context.cc +++ b/paddle/phi/backends/xpu/xpu_context.cc @@ -60,6 +60,8 @@ struct XPUContext::Impl { return false; } std::string cur_thread_name = phi::GetCurrentThreadName(); + VLOG(3) << "XPU Dataloader: current thread at Get Context = " + << phi::GetCurrentThreadName(); bool is_dataloader_thread = (cur_thread_name.substr(0, 10) == "Dataloader"); return is_dataloader_thread; } @@ -93,6 +95,7 @@ struct XPUContext::Impl { xpu::destroy_context(ctx); ctx = nullptr; } + xdl_context_map_.clear(); } } @@ -100,8 +103,7 @@ struct XPUContext::Impl { XPUStream stream() const { if (IsDataloader()) { - std::string cur_thread_name = phi::GetCurrentThreadName(); - xpu::Context* ctx_t = GetXdlCtx(cur_thread_name); + xpu::Context* ctx_t = GetXdlCtx(); return ctx_t->xpu_stream; } return context_->xpu_stream; @@ -120,12 +122,9 @@ struct XPUContext::Impl { // Overload GetXContext function to set and get // contexts of XPU Dataloader threads, and keep old GetXContext Method xpu::Context* GetXContext() { - std::string cur_thread_name = phi::GetCurrentThreadName(); - VLOG(3) << "XPU Dataloader: current thread at Get Context = " - << phi::GetCurrentThreadName(); if (IsDataloader()) { - SetXdlCtx(cur_thread_name); - xpu::Context* ctx_t = GetXdlCtx(cur_thread_name); + SetXdlCtx(); + xpu::Context* ctx_t = GetXdlCtx(); PD_CHECK(ctx_t != nullptr, "the xpu dataloader context is nullptr."); return ctx_t; } @@ -135,20 +134,15 @@ struct XPUContext::Impl { } void Wait() const { - backends::xpu::XPUDeviceGuard guard(place_.GetDeviceId()); - PD_CHECK(context_ != nullptr, "the xpu context is nullptr."); - xpu_wait(context_->xpu_stream); - } - - // Overload Wait for xpu wait on XPU Dataloader threads streams - void Wait() { if (IsDataloader()) { - std::string cur_thread_name = phi::GetCurrentThreadName(); - SetXdlCtx(cur_thread_name); - xpu::Context* ctx_t = GetXdlCtx(cur_thread_name); - PD_CHECK(ctx_t != nullptr, "the xpu dataloader context is nullptr."); - xpu_wait(GetXdlCtx(cur_thread_name)->xpu_stream); + xpu::Context* ctx_t = GetXdlCtx(); + if (ctx_t) { + PD_CHECK(ctx_t != nullptr, "the xpu dataloader context is nullptr."); + xpu_wait(ctx_t->xpu_stream); + } + return; } + backends::xpu::XPUDeviceGuard guard(place_.GetDeviceId()); PD_CHECK(context_ != nullptr, "the xpu context is nullptr."); xpu_wait(context_->xpu_stream); @@ -191,22 +185,24 @@ struct XPUContext::Impl { for (const auto& tp : thread_map) { std::string t_name = tp.second; if (t_name.substr(0, 10) == "Dataloader") { - SetXdlCtx(t_name); + SetXdlCtx(); } } } - void SetXdlCtx(std::string thread_name) { - if (xdl_context_map_.find(thread_name) == xdl_context_map_.end()) { + void SetXdlCtx() { + auto pid = phi::GetProcessId(); + if (xdl_context_map_.find(pid) == xdl_context_map_.end()) { xpu::Context* ctx_t = xpu::create_context(); - xdl_context_map_[thread_name] = ctx_t; + xdl_context_map_[pid] = ctx_t; } } - xpu::Context* GetXdlCtx(const std::string thread_name) const { - return (xdl_context_map_.find(thread_name) == xdl_context_map_.end()) + xpu::Context* GetXdlCtx() const { + auto pid = phi::GetProcessId(); + return (xdl_context_map_.find(pid) == xdl_context_map_.end()) ? nullptr - : xdl_context_map_.find(thread_name)->second; + : xdl_context_map_.find(pid)->second; } std::vector GetAllXdlCtxs() { @@ -221,7 +217,7 @@ struct XPUContext::Impl { Place place_; backends::xpu::XPUVersion xpu_version_; xpu::Context* context_{nullptr}; - std::unordered_map xdl_context_map_; + std::unordered_map xdl_context_map_; // NOTE: Distributed communicator, distributed framework manages its // resources, XPUContext only holds references. diff --git a/paddle/phi/kernels/xpu/pool_grad_kernel.cc b/paddle/phi/kernels/xpu/pool_grad_kernel.cc index 6f937b93e19762..dfea57231560ad 100644 --- a/paddle/phi/kernels/xpu/pool_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/pool_grad_kernel.cc @@ -340,22 +340,41 @@ void Pool3dGradKernel(const Context& ctx, PADDLE_ENFORCE_XDNN_SUCCESS(r, "adaptive_pool3d_grad"); } else { if (pooling_type == "max") { - r = xpu::max_pool3d_grad( - ctx.x_context(), - reinterpret_cast(x.data()), - reinterpret_cast(out.data()), - index_data, - reinterpret_cast(dout.data()), - reinterpret_cast(dx->data()), - n, - c, - in_d, - in_h, - in_w, - kernel_size, - strides, - paddings, - !channel_last); + if (kernel_size[0] == 1 && kernel_size.size() == 3 && + strides.size() == 3 && paddings.size() == 6) { + r = xpu::max_pool2d_grad( + ctx.x_context(), + reinterpret_cast(x.data()), + reinterpret_cast(out.data()), + index_data, + reinterpret_cast(dout.data()), + reinterpret_cast(dx->data()), + n, + c * in_d, + in_h, + in_w, + {kernel_size[1], kernel_size[2]}, + {strides[1], strides[2]}, + {paddings[2], paddings[3], paddings[4], paddings[5]}, + !channel_last); + } else { + r = xpu::max_pool3d_grad( + ctx.x_context(), + reinterpret_cast(x.data()), + reinterpret_cast(out.data()), + index_data, + reinterpret_cast(dout.data()), + reinterpret_cast(dx->data()), + n, + c, + in_d, + in_h, + in_w, + kernel_size, + strides, + paddings, + !channel_last); + } } else if (pooling_type == "avg") { r = xpu::avg_pool3d_grad( ctx.x_context(), diff --git a/python/paddle/common_ops_import.py b/python/paddle/common_ops_import.py index c8fffd9a246365..62ee1eb0cee042 100644 --- a/python/paddle/common_ops_import.py +++ b/python/paddle/common_ops_import.py @@ -33,7 +33,6 @@ in_dygraph_mode, ) from paddle.fluid.layer_helper import LayerHelper # noqa: F401 -from paddle.fluid.layers import fill_constant # noqa: F401 from paddle.fluid.layers.layer_function_generator import ( # noqa: F401 templatedoc, ) diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index 7f5ed62fa4efec..91990205504adf 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -17,8 +17,8 @@ import paddle from paddle import _legacy_C_ops from paddle.framework import core, in_dygraph_mode +from paddle.tensor import fill_constant -from ...fluid.layers.tensor import fill_constant from ..collective import _get_global_env, _new_ring_id diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index d348d6a8f3e2e3..f3cafbec5f7845 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -50,9 +50,9 @@ class HybridParallelInferenceHelper: # while op pattern with paddle.fluid.device_guard(f'{device}:all'): # init global cond - max_len = layers.fill_constant(shape=[1], dtype="int64", value=10, force_cpu=False) - step_idx = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False) - cond_int = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False, name="cond_int") + max_len = paddle.full(shape=[1], dtype="int64", fill_value=10) + step_idx = paddle.full(shape=[1], dtype="int64", fill_value=0) + cond_int = paddle.full(shape=[1], dtype="int64", fill_value=0, name="cond_int") cond = layers.cast(step_idx < max_len, dtype="bool") while_op = layers.While(cond, is_test=True) @@ -124,14 +124,14 @@ class HybridParallelInferenceHelper: X = paddle.static.data(name='X', shape=[None, 2], dtype='float32') with paddle.fluid.device_guard(f'{device}:all'): - max_len = layers.fill_constant( - shape=[1], dtype="int64", value=5, force_cpu=False, name="n") - step_idx = layers.fill_constant( - shape=[1], dtype="int64", value=0, force_cpu=False, name="i") + max_len = paddle.full( + shape=[1], dtype="int64", fill_value=5, name="n") + step_idx = paddle.full( + shape=[1], dtype="int64", fill_value=0, name="i") data = paddle.tensor.array_write(X, step_idx) - cond_int = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False, name="cond_int") + cond_int = paddle.full(shape=[1], dtype="int64", fill_value=0, name="cond_int") cond = paddle.less_than(x=step_idx, y=max_len) while_op = layers.While(cond, is_test=True) diff --git a/python/paddle/distributed/launch/controllers/collective.py b/python/paddle/distributed/launch/controllers/collective.py index 170f19123e3f12..de4db754b72cea 100644 --- a/python/paddle/distributed/launch/controllers/collective.py +++ b/python/paddle/distributed/launch/controllers/collective.py @@ -35,9 +35,9 @@ def build_pod(self): and self.ctx.args.start_port and self.ctx.args.ips ): - self._build_pod_with_args() + return self._build_pod_with_args() else: - self._build_pod_with_master() + return self._build_pod_with_master() def _build_pod_with_args(self): self.pod.replicas = self.pod_replicas() diff --git a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py index f807127f45eaef..4bf460d1b42889 100644 --- a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py +++ b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py @@ -25,7 +25,6 @@ set_var_dist_attr, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.fluid import layers from paddle.framework import core from paddle.static import device_guard @@ -284,7 +283,7 @@ def true_apply_gradient(): # clear gradient_merge_vars for param, new_grad in new_params_to_grads: - layers.fill_constant( + paddle.tensor.fill_constant( shape=new_grad.shape, dtype=new_grad.dtype, value=0.0, diff --git a/python/paddle/fluid/contrib/layers/metric_op.py b/python/paddle/fluid/contrib/layers/metric_op.py index a2b23167dc322c..8aa8098090e83e 100755 --- a/python/paddle/fluid/contrib/layers/metric_op.py +++ b/python/paddle/fluid/contrib/layers/metric_op.py @@ -14,7 +14,7 @@ """ Contrib layers just related to metric. """ - +import paddle import warnings import paddle from paddle.fluid.layer_helper import LayerHelper @@ -79,7 +79,7 @@ def ctr_metric_bundle(input, label, ins_tag_weight=None): """ if ins_tag_weight is None: - ins_tag_weight = tensor.fill_constant( + ins_tag_weight = paddle.tensor.fill_constant( shape=[1, 1], dtype="float32", value=1.0 ) diff --git a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py index ef21c51c9a2b54..46d11d37918cc0 100644 --- a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py +++ b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py @@ -287,10 +287,10 @@ def decorate_with_data_loader(self): iterable=False, use_double_buffer=False, ) - zero_var = fluid.layers.fill_constant( + zero_var = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=0 ) - one_var = fluid.layers.fill_constant( + one_var = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=1 ) with fluid.layers.control_flow.Switch() as switch: diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index d98f90ed7d7f4f..0697b53914f17d 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -15,7 +15,6 @@ from ..wrapped_decorator import signature_safe_contextmanager from .layer_function_generator import templatedoc -from .tensor import fill_constant from .. import core from ..framework import ( Program, @@ -925,11 +924,12 @@ class While: .. code-block:: python import paddle.fluid as fluid + import paddle import numpy as np - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) # loop counter + i = paddle.full(shape=[1], dtype='int64', fill_value=0) # loop counter - loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10) # loop length + loop_len = paddle.full(shape=[1],dtype='int64', fill_value=10) # loop length cond = paddle.less_than(x=i, y=loop_len) while_op = fluid.layers.While(cond=cond) @@ -952,11 +952,11 @@ class While: import numpy as np paddle.enable_static() - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) - loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) - one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1) + i = paddle.full(shape=[1], dtype='int64', fill_value=0) + loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10) + one = paddle.full(shape=[1], dtype='float32', fill_value=1) data = fluid.data(name='data', shape=[1], dtype='float32') - sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained + sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained cond = paddle.less_than(x=i, y=loop_len) while_op = fluid.layers.While(cond=cond) @@ -1537,13 +1537,15 @@ class Switch: .. code-block:: python ''' + import paddle + import paddle.fluid as fluid with fluid.layers.Switch() as switch: with switch.case(cond1): - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1) + i = paddle.full(shape=[1], dtype='int64', fill_value=1) with switch.case(cond2): - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2) + i = paddle.full(shape=[1], dtype='int64', fill_value=2) with switch.default(): - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.full(shape=[1], dtype='int64', fill_value=0) ''' Args: @@ -1561,20 +1563,20 @@ class Switch: dtype='float32', persistable=True, name="learning_rate") - zero_var = fluid.layers.fill_constant( - shape=[1], dtype='float32', value=0.0) - one_var = fluid.layers.fill_constant( - shape=[1], dtype='float32', value=1.0) - two_var = fluid.layers.fill_constant( - shape=[1], dtype='float32', value=2.0) + zero_var = paddle.full( + shape=[1], dtype='float32', fill_value=0.0) + one_var = paddle.full( + shape=[1], dtype='float32', fill_value=1.0) + two_var = paddle.full( + shape=[1], dtype='float32', fill_value=2.0) global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1) with fluid.layers.control_flow.Switch() as switch: with switch.case(global_step == zero_var): - fluid.layers.assign(input=one_var, output=lr) + paddle.assign(input=one_var, output=lr) with switch.default(): - fluid.layers.assign(input=two_var, output=lr) + paddle.assign(input=two_var, output=lr) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 104266600181ef..6d86fa9448c170 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -352,10 +352,10 @@ def polynomial_decay( if cycle: div_res = paddle.ceil(global_step / decay_steps) - zero_var = tensor.fill_constant( + zero_var = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.0 ) - one_var = tensor.fill_constant( + one_var = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.0 ) @@ -364,7 +364,7 @@ def polynomial_decay( paddle.assign(one_var, output=div_res) decay_steps = decay_steps * div_res else: - decay_steps_var = tensor.fill_constant( + decay_steps_var = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=float(decay_steps) ) global_step = paddle.minimum(x=global_step, y=decay_steps_var) @@ -435,21 +435,21 @@ def piecewise_decay(boundaries, values): with control_flow.Switch() as switch: for i in range(len(boundaries)): - boundary_val = tensor.fill_constant( + boundary_val = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=float(boundaries[i]), force_cpu=True, ) with switch.case(global_step < boundary_val): - tensor.fill_constant( + paddle.tensor.fill_constant( shape=[1], dtype="float32", value=float(values[i]), out=lr, ) with switch.default(): - tensor.fill_constant( + paddle.tensor.fill_constant( shape=[1], dtype="float32", value=float(values[len(values) - 1]), @@ -598,7 +598,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): paddle.assign(decayed_lr, lr) with switch.default(): if not isinstance(learning_rate, Variable): - learning_rate = tensor.fill_constant( + learning_rate = paddle.tensor.fill_constant( shape=[1], dtype=dtype, value=float(learning_rate) ) paddle.assign(learning_rate, lr) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f2f763e3b60f0e..f253721aeecce7 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -41,7 +41,7 @@ templatedoc, _generate_doc_string_, ) -from .tensor import fill_constant, zeros +from .tensor import zeros from .. import unique_name from .. import core from ...utils import deprecated diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index d3d5911c19cb63..372cefac712a3d 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -39,142 +39,10 @@ __all__ = [ 'fill_constant_batch_size_like', - 'fill_constant', 'zeros', ] -def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): - """ - - This OP creates a Tensor with specified `shape` and `dtype`, and - initializes it with a constant specified by `value`. - - The attribute `stop_gradient` of the created Tensor is set to True. - - Args: - shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64. - If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. - If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64. - dtype(np.dtype|str): Data type of the output Tensor which can - be float16, float32, float64, uint8, int16, int32, int64. - value(bool|float|int|Tensor): The constant value used to initialize - the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor. - force_cpu(bool, optional): data should be on CPU if it's true, default value is False. - out(Tensor, optional): Optional output which can be any created - Tensor that meets the requirements to store the result of operation. - if ``out`` is None, a new Tensor will be create to store the result. - name(str, optional): The default value is None. Normally there is no need for user to set this - property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor: Tensor which is created according to shape and dtype. - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - # attr shape is a list which doesn't contain Tensor. - data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]] - data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1) - # data1=[[5], [5]] data2=[[5], [5]] - - # attr shape is a list which contains Tensor. - positive_2 = fluid.layers.fill_constant([1], "int32", 2) - data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]] - - # attr shape is a Tensor. - shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2] - data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]] - - # attr value is a Tensor. - val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0] - data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]] - """ - - if in_dygraph_mode(): - place = _current_expected_place() - if force_cpu: - place = core.CPUPlace() - if isinstance(shape, (list, tuple)): - shape = paddle.utils.convert_shape_to_list(shape) - - if not isinstance(dtype, core.VarDesc.VarType): - dtype = convert_np_dtype_to_dtype_(dtype) - - if out is None: - out = _C_ops.full(shape, float(value), dtype, place) - out.stop_gradient = True - return out - - if out is not None: - # final state mode is support out is not None. - _C_ops.full_(out, shape, float(value), dtype, place) - out.stop_gradient = True - return out - else: - attrs = {'force_cpu': force_cpu} - dtype = convert_dtype(dtype) - if not isinstance(value, Variable): - if dtype in ['uint8', 'int16', 'int32', 'int64']: - attrs['str_value'] = str(int(value)) - attrs['value'] = int(value) - else: - attrs['str_value'] = str(float(value)) - attrs['value'] = float(value) - - helper = LayerHelper("fill_constant", **locals()) - inputs = {} - if isinstance(value, Variable): - if convert_dtype(value.dtype) != dtype: - value = paddle.cast(value, dtype) - inputs['ValueTensor'] = value - - paddle.utils.check_shape(shape) - check_dtype( - dtype, - 'dtype', - [ - 'bool', - 'float16', - 'float32', - 'float64', - 'uint8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - 'uint16', - ], - 'fill_constant', - ) - check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant') - - if out is not None: - check_variable_and_dtype( - out, 'out', [convert_dtype(dtype)], 'fill_constant' - ) - - helper = LayerHelper("fill_constant", **locals()) - paddle.utils.get_shape_tensor_inputs( - inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' - ) - - if out is None: - out = helper.create_variable_for_type_inference(dtype=dtype) - attrs['dtype'] = out.dtype - helper.append_op( - type='fill_constant', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs, - stop_gradient=True, - ) - out.stop_gradient = True - return out - - @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant") @templatedoc() def fill_constant_batch_size_like( @@ -214,8 +82,9 @@ def fill_constant_batch_size_like( .. code-block:: python + import paddle import paddle.fluid as fluid - like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]] + like = paddle.full(shape=[1,2], fill_value=10, dtype='int64') #like=[[10, 10]] data = fluid.layers.fill_constant_batch_size_like( input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0] @@ -279,10 +148,16 @@ def zeros(shape, dtype, force_cpu=False, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] # shape is a Tensor - shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2) + shape = paddle.full(shape=[2], dtype='int32', fill_value=2) data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]] """ - return fill_constant(value=0.0, **locals()) + # TODO: remove zeros + from paddle.tensor import fill_constant + + return fill_constant( + value=0.0, shape=shape, dtype=dtype, force_cpu=force_cpu, name=name + ) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 6a97ab0cbd87da..181058b8920e29 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -236,7 +236,7 @@ def state_dict(self): None, name='global_step', dtype='int32' ) - tensor.fill_constant( + paddle.tensor.fill_constant( [1], "int32", self._learning_rate.step_num, out=var_temp ) @@ -7393,11 +7393,11 @@ def minimize(self, loss, startup_program=None): paddle.increment(x=step, value=1.0) # lookahead - zero_var = layers.fill_constant( + zero_var = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.0 ) - one_var = layers.fill_constant( + one_var = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.0 ) @@ -7747,7 +7747,7 @@ def true_apply_gradient(): # clear gradient_merge_vars for param, new_grad in new_params_grads: - layers.fill_constant( + paddle.tensor.fill_constant( shape=new_grad.shape, dtype=new_grad.dtype, value=0.0, diff --git a/python/paddle/fluid/tests/custom_op/custom_tensor_operator.cc b/python/paddle/fluid/tests/custom_op/custom_tensor_operator.cc index 3036fc89dea5a3..c6fa6fabefef86 100644 --- a/python/paddle/fluid/tests/custom_op/custom_tensor_operator.cc +++ b/python/paddle/fluid/tests/custom_op/custom_tensor_operator.cc @@ -453,3 +453,93 @@ PD_BUILD_OP(custom_logical_not) .Inputs({"X"}) .Outputs({"Out"}) .SetKernelFn(PD_KERNEL(NotForward)); + +// out = (x < y) +std::vector LessThanForward(const paddle::Tensor& x, + const paddle::Tensor& y) { + if (x.is_cpu() || x.is_gpu()) { + return {x < y}; + } else { + PD_THROW("Not implemented."); + } +} + +PD_BUILD_OP(custom_less_than) + .Inputs({"X", "Y"}) + .Outputs({"Out"}) + .SetKernelFn(PD_KERNEL(LessThanForward)); + +// out = (x <= y) +std::vector LessEqualForward(const paddle::Tensor& x, + const paddle::Tensor& y) { + if (x.is_cpu() || x.is_gpu()) { + return {x <= y}; + } else { + PD_THROW("Not implemented."); + } +} + +PD_BUILD_OP(custom_less_equal) + .Inputs({"X", "Y"}) + .Outputs({"Out"}) + .SetKernelFn(PD_KERNEL(LessEqualForward)); + +// out = (x == y) +std::vector EqualForward(const paddle::Tensor& x, + const paddle::Tensor& y) { + if (x.is_cpu() || x.is_gpu()) { + return {x == y}; + } else { + PD_THROW("Not implemented."); + } +} + +PD_BUILD_OP(custom_equal) + .Inputs({"X", "Y"}) + .Outputs({"Out"}) + .SetKernelFn(PD_KERNEL(EqualForward)); + +// out = (x != y) +std::vector NotEqualForward(const paddle::Tensor& x, + const paddle::Tensor& y) { + if (x.is_cpu() || x.is_gpu()) { + return {x != y}; + } else { + PD_THROW("Not implemented."); + } +} + +PD_BUILD_OP(custom_not_equal) + .Inputs({"X", "Y"}) + .Outputs({"Out"}) + .SetKernelFn(PD_KERNEL(NotEqualForward)); + +// out = (x > y) +std::vector GreaterThanForward(const paddle::Tensor& x, + const paddle::Tensor& y) { + if (x.is_cpu() || x.is_gpu()) { + return {x > y}; + } else { + PD_THROW("Not implemented."); + } +} + +PD_BUILD_OP(custom_greater_than) + .Inputs({"X", "Y"}) + .Outputs({"Out"}) + .SetKernelFn(PD_KERNEL(GreaterThanForward)); + +// out = (x >= y) +std::vector GreaterEqualForward(const paddle::Tensor& x, + const paddle::Tensor& y) { + if (x.is_cpu() || x.is_gpu()) { + return {x >= y}; + } else { + PD_THROW("Not implemented."); + } +} + +PD_BUILD_OP(custom_greater_equal) + .Inputs({"X", "Y"}) + .Outputs({"Out"}) + .SetKernelFn(PD_KERNEL(GreaterEqualForward)); diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py index 05827d77914e04..04ec4dcde86eb3 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py @@ -17,7 +17,13 @@ import numpy as np from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static -from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes +from utils import ( + IS_MAC, + extra_cc_args, + extra_nvcc_args, + paddle_includes, + paddle_libraries, +) import paddle from paddle.utils.cpp_extension import get_build_directory, load @@ -44,6 +50,7 @@ name='custom_relu_module_jit', sources=sources, extra_include_paths=paddle_includes, # add for Coverage CI + extra_library_paths=paddle_libraries, extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags verbose=True, diff --git a/python/paddle/fluid/tests/custom_op/test_custom_tensor_operator.py b/python/paddle/fluid/tests/custom_op/test_custom_tensor_operator.py index bad85eb5986321..1df0986a38dd6b 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_tensor_operator.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_tensor_operator.py @@ -247,6 +247,7 @@ def test_all(self): self._test_static() self._test_dynamic() self._test_logical_operants() + self._test_compare_operants() def _test_static(self): for device in self.devices: @@ -355,6 +356,38 @@ def _test_logical_operants(self): pd_out = paddle.bitwise_not(x) np.testing.assert_equal(out.numpy(), pd_out.numpy()) + def _test_compare_operants(self): + for device in self.devices: + paddle.set_device(device) + np_x = paddle.randint(0, 2, [4, 8]) + x = paddle.to_tensor(np_x, dtype="int32") + np_y = paddle.randint(0, 2, [4, 8]) + y = paddle.to_tensor(np_y, dtype="int32") + + out = self.custom_module.custom_less_than(x, y) + pd_out = paddle.less_than(x, y) + np.testing.assert_equal(out.numpy(), pd_out.numpy()) + + out = self.custom_module.custom_less_equal(x, y) + pd_out = paddle.less_equal(x, y) + np.testing.assert_equal(out.numpy(), pd_out.numpy()) + + out = self.custom_module.custom_equal(x, y) + pd_out = paddle.equal(x, y) + np.testing.assert_equal(out.numpy(), pd_out.numpy()) + + out = self.custom_module.custom_not_equal(x, y) + pd_out = paddle.not_equal(x, y) + np.testing.assert_equal(out.numpy(), pd_out.numpy()) + + out = self.custom_module.custom_greater_than(x, y) + pd_out = paddle.greater_than(x, y) + np.testing.assert_equal(out.numpy(), pd_out.numpy()) + + out = self.custom_module.custom_greater_equal(x, y) + pd_out = paddle.greater_equal(x, y) + np.testing.assert_equal(out.numpy(), pd_out.numpy()) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/custom_op/utils.py b/python/paddle/fluid/tests/custom_op/utils.py index 69530da6047962..7e199f3a6114d1 100644 --- a/python/paddle/fluid/tests/custom_op/utils.py +++ b/python/paddle/fluid/tests/custom_op/utils.py @@ -25,6 +25,7 @@ # paddle include directory. Because the following path is generated after installing # PaddlePaddle whl. So here we specific `include_dirs` to avoid errors in CI. paddle_includes = [] +paddle_libraries = [] for site_packages_path in getsitepackages(): paddle_includes.append( os.path.join(site_packages_path, 'paddle', 'include') @@ -32,6 +33,7 @@ paddle_includes.append( os.path.join(site_packages_path, 'paddle', 'include', 'third_party') ) + paddle_libraries.append(os.path.join(site_packages_path, 'paddle', 'libs')) # Test for extra compile args extra_cc_args = ['-w', '-g'] if not IS_WINDOWS else ['/w'] diff --git a/python/paddle/fluid/tests/unittests/ascend_group.py b/python/paddle/fluid/tests/unittests/ascend_group.py index adbfdf889ab442..02efeaeb138c43 100644 --- a/python/paddle/fluid/tests/unittests/ascend_group.py +++ b/python/paddle/fluid/tests/unittests/ascend_group.py @@ -96,7 +96,9 @@ def init_communicator( with fluid.program_guard(main_program): op_type = "c_allreduce_sum" - data = fluid.layers.fill_constant(shape=[1], dtype='float32', value=2.5) + data = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=2.5 + ) helper = LayerHelper(op_type, **locals()) helper.append_op( type=op_type, diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py index dd593bca3e7fdf..bfb51ae60a4676 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py @@ -25,6 +25,7 @@ from paddle.fluid.framework import _non_static_mode from paddle.io import Dataset from paddle.jit.dy2static.utils import is_paddle_func +from paddle.nn import Sequential from paddle.static import InputSpec batch_size = 4 @@ -199,6 +200,9 @@ def test_is_paddle_func(self): self.assertFalse(is_paddle_func(proxy_layer._train)) self.assertFalse(is_paddle_func(proxy_layer._eval)) self.assertFalse(is_paddle_func(proxy_layer._predict)) + # test for nn.Sequential + net = Sequential(('mlp', mlp)) + self.assertFalse(is_paddle_func(net)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py index 560ab9cf3cb9ac..1596977a8ea2d1 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py @@ -117,11 +117,11 @@ def get_program(): with fluid.program_guard(train_program, start_program): # 循环计数器 - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) auto.shard_tensor(i, _g_process_mesh, [None]) # 循环次数 - loop_len = fluid.layers.fill_constant( + loop_len = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=epoch_num ) auto.shard_tensor(loop_len, _g_process_mesh, [None]) diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py index 3b559bdd268c2e..c62cc3340eec23 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py @@ -50,7 +50,7 @@ def net(): y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') # test int64 value - zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + zero = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) # test float16 value fp16_zero = paddle.cast(zero, dtype='float16') diff --git a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py index de7ec2cfcd377d..5263a79b997284 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py @@ -16,7 +16,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -32,7 +31,7 @@ def get_model(self, main_prog, startup_program, rank): shape=[10, 1000], dtype='float32', ) - toutdata = layers.fill_constant( + toutdata = paddle.tensor.fill_constant( shape=[5, 1000], dtype='float32', value=1.0 ) tensor_list = None diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py index ff2f40ec5a6353..4912af238eaafe 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py @@ -41,7 +41,7 @@ def get_model(self, main_prog, startup_program): data1 = paddle.assign(np.array([[3, 4, 5]], dtype='float32')) data2 = paddle.assign(np.array([[0, 1, 2]], dtype='float32')) tensor_array = paddle.tensor.create_array(dtype='float32') - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) paddle.tensor.array_write(data1, i, tensor_array) paddle.tensor.array_write(data2, i + 1, tensor_array) if self.rank == 0: diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py index b68e2059d8f0f6..6ce4d654b30d71 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py @@ -19,7 +19,6 @@ import paddle import paddle.distributed.fleet as fleet -import paddle.fluid.layers as layers from paddle.distributed.fleet.utils.hybrid_parallel_inference import ( HybridParallelInferenceHelper, ) @@ -66,16 +65,16 @@ def test_hybrid_parallel_inference_helper_mp1pp2(self): ) with paddle.fluid.device_guard(f'{device}:all'): - max_len = layers.fill_constant( + max_len = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=2, force_cpu=False, name="n" ) - step_idx = layers.fill_constant( + step_idx = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=0, force_cpu=False, name="i" ) data = paddle.tensor.array_write(X, step_idx) - cond_int = layers.fill_constant( + cond_int = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=0, diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py index 22ae37010f795a..083bd0a8a01bd5 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py @@ -59,7 +59,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index 5d22d8b5198fb3..0d36a20b6febca 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -60,7 +60,7 @@ def dyfunc_with_if_else2(x, col=100): # TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently. # `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed. # col = -1 - col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64") + col = paddle.tensor.fill_constant(shape=[1], value=-1, dtype="int64") if paddle.mean(x).numpy() > x.numpy()[row][col]: y = paddle.nn.functional.relu(x) else: @@ -149,7 +149,7 @@ def dyfunc_with_if_else_with_list_geneator(x): def nested_if_else(x_v): batch_size = 16 feat_size = x_v.shape[-1] - bias = fluid.layers.fill_constant([feat_size], dtype='float32', value=1) + bias = paddle.tensor.fill_constant([feat_size], dtype='float32', value=1) if x_v.shape[0] != batch_size: # TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently. # `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed. @@ -160,14 +160,14 @@ def nested_if_else(x_v): # if tensor.shape is [1], now support to compare with numpy. if paddle.mean(x_v).numpy() < 0: y = x_v + bias - w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10) + w = paddle.tensor.fill_constant([feat_size], dtype='float32', value=10) if y.numpy()[0] < 10: tmp = y * w y = paddle.nn.functional.relu(tmp) if paddle.mean(y).numpy() < batch_size: y = paddle.abs(y) else: - tmp = fluid.layers.fill_constant( + tmp = paddle.tensor.fill_constant( y.shape, dtype='float32', value=-1 ) y = y - tmp @@ -185,13 +185,13 @@ def nested_if_else_2(x): x_shape_0 = x.shape[0] if x_shape_0 < 1: if paddle.shape(y).numpy()[0] < 1: - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=2, shape=x.shape, dtype="int32" ) # `z` is a new var here. z = y + 1 else: - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=3, shape=x.shape, dtype="int32" ) else: @@ -218,13 +218,13 @@ def nested_if_else_3(x): else: y_shape = paddle.shape(y) if y_shape.numpy()[0] < 1: - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=2, shape=x.shape, dtype="int32" ) # `z` is created in above code block. z = y + 1 else: - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=3, shape=x.shape, dtype="int32" ) # `out` is a new var. @@ -259,14 +259,14 @@ def forward(self, input): ) ) - self.constant_vars['bias'] = fluid.layers.fill_constant( + self.constant_vars['bias'] = paddle.tensor.fill_constant( [5], dtype='float32', value=1 ) # Control flow `if` statement fc_out = self.fc(input) if paddle.mean(fc_out).numpy() < 0: y = fc_out + self.constant_vars['bias'] - self.constant_vars['w'] = fluid.layers.fill_constant( + self.constant_vars['w'] = paddle.tensor.fill_constant( [5], dtype='float32', value=10 ) if y.numpy()[0] < self.alpha: @@ -277,12 +277,12 @@ def forward(self, input): # Nested `if/else` if y.numpy()[-1] < self.alpha: # Modify variable of class - self.constant_vars['w'] = fluid.layers.fill_constant( + self.constant_vars['w'] = paddle.tensor.fill_constant( [hidden_dim], dtype='float32', value=9 ) y = paddle.abs(y) else: - tmp = fluid.layers.fill_constant( + tmp = paddle.tensor.fill_constant( y.shape, dtype='float32', value=-1 ) y = y - tmp diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py index 5dee9a56d8df85..ab669f5eee029c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py @@ -15,7 +15,6 @@ from functools import reduce import paddle -import paddle.fluid as fluid import paddle.fluid.param_attr as attr from paddle.common_ops_import import Variable from paddle.fluid.dygraph import Layer @@ -214,7 +213,7 @@ def ops(self, input, shape, dtype, value): shape = list(shape) input_shape = paddle.shape(input) shape[0] = input_shape[0] - constant = fluid.layers.fill_constant(shape, dtype, value) + constant = paddle.tensor.fill_constant(shape, dtype, value) return constant diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py index d8c5956357827c..f9b69eb06ff82a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py @@ -210,7 +210,7 @@ def ops(self, input, shape, dtype, value): shape = list(shape) input_shape = paddle.shape(input) shape[0] = input_shape[0] - constant = paddle.fluid.layers.fill_constant(shape, dtype, value) + constant = paddle.tensor.fill_constant(shape, dtype, value) return constant diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py index 03f89bb84fc429..e7cff9cf032f87 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py @@ -62,7 +62,7 @@ def test_continue_in_for_at_end(x): def test_continue_in_while(x): x = fluid.dygraph.to_variable(x) - i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: i += 1 if i > 5: @@ -94,7 +94,7 @@ def test_break_in_for_at_end(x): def test_break_in_while(x): x = fluid.dygraph.to_variable(x) - i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: i += 1 if i > 5: @@ -116,8 +116,8 @@ def test_break_continue_in_for(x): break x += 10086 - a = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) - b = fluid.layers.fill_constant(shape=[1], dtype='int32', value=3) + a = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) + b = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=3) # b = 10 # TODO: add Raise Error and suggestion for usage: # Py for contains break/continue depends on control-flow. @@ -192,7 +192,7 @@ def test_optim_break_in_for(x): def test_optim_break_in_while(x): x = paddle.to_tensor(x) - i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: if i > 5: break diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_layer_norm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_layer_norm.py index 78fea41662e49a..2de94fdcbb193d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_layer_norm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_layer_norm.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import platform import unittest import numpy as np @@ -171,28 +170,24 @@ def check_prim(self, net, use_prim): self.assertTrue('layer_norm' not in fwd_ops) def test_cinn_prim(self): - plat = platform.system() - if plat == "Linux": - for dtype in self.dtypes: - if paddle.device.get_device() == "cpu": - print("need pass this case") - continue - x_n, w_n, b_n = generate_data(dtype) - self.x = paddle.to_tensor(x_n) - self.w = paddle.to_tensor(w_n) - self.b = paddle.to_tensor(b_n) - self.x.stop_gradient = False - dy_res = self.train(use_prim=False) - cinn_res = self.train(use_prim=True) - - np.testing.assert_allclose( - cinn_res, - dy_res, - rtol=TOLERANCE[dtype]['rtol'], - atol=TOLERANCE[dtype]['atol'], - ) - else: - pass + for dtype in self.dtypes: + if paddle.device.get_device() == "cpu": + print("need pass this case") + continue + x_n, w_n, b_n = generate_data(dtype) + self.x = paddle.to_tensor(x_n) + self.w = paddle.to_tensor(w_n) + self.b = paddle.to_tensor(b_n) + self.x.stop_gradient = False + dy_res = self.train(use_prim=False) + cinn_res = self.train(use_prim=True) + + np.testing.assert_allclose( + cinn_res, + dy_res, + rtol=TOLERANCE[dtype]['rtol'], + atol=TOLERANCE[dtype]['atol'], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_mean.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_mean.py index ff433f439e056e..ae2de19c8721d4 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_mean.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cinn_prim_mean.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import platform import unittest import numpy as np @@ -185,31 +184,24 @@ def check_prim(self, net, use_prim): self.assertTrue('reduce_mean' not in fwd_ops) def test_cinn_prim(self): - plat = platform.system() - if plat == "Linux": - for shape in self.shapes: - for dtype in self.dtypes: - # mean-kernel on cpu not support float16 - if ( - paddle.device.get_device() == "cpu" - and dtype == "float16" - ): - print("need pass this case") - continue - data = generate_data(shape, dtype) - data_t = paddle.to_tensor(data) - data_t.stop_gradient = False - dy_res = self.train(use_prim=False, data=data_t) - cinn_res = self.train(use_prim=True, data=data_t) - - np.testing.assert_allclose( - cinn_res, - dy_res, - rtol=TOLERANCE[dtype]['rtol'], - atol=TOLERANCE[dtype]['atol'], - ) - else: - pass + for shape in self.shapes: + for dtype in self.dtypes: + # mean-kernel on cpu not support float16 + if paddle.device.get_device() == "cpu" and dtype == "float16": + print("need pass this case") + continue + data = generate_data(shape, dtype) + data_t = paddle.to_tensor(data) + data_t.stop_gradient = False + dy_res = self.train(use_prim=False, data=data_t) + cinn_res = self.train(use_prim=True, data=data_t) + + np.testing.assert_allclose( + cinn_res, + dy_res, + rtol=TOLERANCE[dtype]['rtol'], + atol=TOLERANCE[dtype]['atol'], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py index 2a9b8157dea90c..6a92f0cc5e39c9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py @@ -84,12 +84,12 @@ def __init__(self, batch_size=64, hidden_size=16, output_size=16): def forward(self, input, max_len=4): input = fluid.dygraph.to_variable(input) cache = { - "k": fluid.layers.fill_constant( + "k": paddle.tensor.fill_constant( shape=[self.batch_size, self.output_size], dtype='float32', value=0, ), - "v": fluid.layers.fill_constant( + "v": paddle.tensor.fill_constant( shape=[self.batch_size, self.output_size], dtype='float32', value=0, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py index 074d45f9ccb41a..525dd766eeb38f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py @@ -25,7 +25,7 @@ def inner_func(): - fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int") + paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int") return @@ -50,7 +50,7 @@ def func_error_in_compile_time_2(x): @paddle.jit.to_static def func_error_in_runtime(x): x = fluid.dygraph.to_variable(x) - two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") + two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32") x = paddle.reshape(x, shape=[1, two]) return x @@ -77,7 +77,7 @@ def __init__(self, fc_size=20): ) def forward(self, x): y = self._linear(x) - z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int") + z = paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int") out = paddle.mean(y[z]) return out @@ -101,7 +101,7 @@ def test_func(self): @paddle.jit.to_static def func_error_in_runtime_with_empty_line(x): x = fluid.dygraph.to_variable(x) - two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") + two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32") x = paddle.reshape(x, shape=[1, two]) @@ -261,7 +261,7 @@ def set_message(self): 'inner_func()', 'File "{}", line 28, in inner_func'.format(self.filepath), 'def inner_func():', - 'fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")', + 'paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int")', '<--- HERE', 'return', ] @@ -340,7 +340,7 @@ def set_message(self): self.filepath ), 'x = fluid.dygraph.to_variable(x)', - 'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")', + 'two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")', 'x = paddle.reshape(x, shape=[1, two])', '<--- HERE', 'return x', @@ -356,7 +356,7 @@ def set_message(self): 'File "{}", line 106, in func_error_in_runtime_with_empty_line'.format( self.filepath ), - 'two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32")', + 'two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")', 'x = paddle.reshape(x, shape=[1, two])', '<--- HERE', 'return x', @@ -379,7 +379,7 @@ def set_message(self): 'File "{}", line 80, in forward'.format(self.filepath), 'def forward(self, x):', 'y = self._linear(x)', - 'z = fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int")', + 'z = paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int")', '<--- HERE', 'out = paddle.mean(y[z])', 'return out', diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py index e7dcfb684914d8..cf2ae90423b14c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py @@ -26,7 +26,7 @@ # 0. for in range var.numpy()[0] @paddle.jit.to_static def for_in_range(x): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x = fluid.dygraph.to_variable(x) for i in range(x.numpy()[0]): z = z + i @@ -36,7 +36,7 @@ def for_in_range(x): # 1. for iter list @paddle.jit.to_static def for_iter_list(x_array): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) for x in x_array: z = z + x return z @@ -45,7 +45,7 @@ def for_iter_list(x_array): # 2. for enumerate list @paddle.jit.to_static def for_enumerate_list(x_array): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) for i, x in enumerate(x_array): z = z + x + i return z @@ -54,7 +54,7 @@ def for_enumerate_list(x_array): # 3. for iter var.numpy() @paddle.jit.to_static def for_iter_var_numpy(x_array): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for x in x_array.numpy(): z = z + x @@ -64,8 +64,8 @@ def for_iter_var_numpy(x_array): # 4. for enumerate var.numpy() @paddle.jit.to_static def for_enumerate_var_numpy(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy()): y = y + i @@ -76,8 +76,8 @@ def for_enumerate_var_numpy(x_array): # 5. for enumerate var.numpy() with start @paddle.jit.to_static def for_enumerate_var_numpy_with_start(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy(), 1): y = y + i @@ -88,7 +88,7 @@ def for_enumerate_var_numpy_with_start(x_array): # 6. for in range with break @paddle.jit.to_static def for_in_range_with_break(x): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x = fluid.dygraph.to_variable(x) for i in range(x.numpy()[0]): z = z + i @@ -100,8 +100,8 @@ def for_in_range_with_break(x): # 7. for enumerate var.numpy() with break @paddle.jit.to_static def for_enumerate_var_numpy_with_break(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy()): y = y + i @@ -114,8 +114,8 @@ def for_enumerate_var_numpy_with_break(x_array): # 8. for enumerate var.numpy() with continue @paddle.jit.to_static def for_enumerate_var_numpy_with_continue(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy()): y = y + i @@ -128,8 +128,8 @@ def for_enumerate_var_numpy_with_continue(x_array): # 9. for enumerate var.numpy() with start & break @paddle.jit.to_static def for_enumerate_var_numpy_with_start_break(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy(), 1): y = y + i @@ -142,8 +142,8 @@ def for_enumerate_var_numpy_with_start_break(x_array): # 10. for enumerate var.numpy() with start & continue @paddle.jit.to_static def for_enumerate_var_numpy_with_start_continue(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy(), 1): y = y + i @@ -156,7 +156,7 @@ def for_enumerate_var_numpy_with_start_continue(x_array): # 11. for iter var @paddle.jit.to_static def for_iter_var(x_array): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for x in x_array: @@ -167,8 +167,8 @@ def for_iter_var(x_array): # 12. for enumerate var @paddle.jit.to_static def for_enumerate_var(x_array): - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, x in enumerate(x_array): y = y + i @@ -181,12 +181,12 @@ def for_enumerate_var(x_array): def for_iter_var_list(x): # 1. prepare data, ref test_list.py x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant(shape=[1], value=5, dtype="int32") + iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32") a = [] for i in range(iter_num): a.append(x + i) # 2. iter list[var] - y = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) for x in a: y = y + x return y @@ -197,13 +197,13 @@ def for_iter_var_list(x): def for_enumerate_var_list(x): # 1. prepare data, ref test_list.py x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant(shape=[1], value=5, dtype="int32") + iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32") a = [] for i in range(iter_num): a.append(x + i) # 2. iter list[var] - y = fluid.layers.fill_constant([1], 'int32', 0) - z = fluid.layers.fill_constant([1], 'int32', 0) + y = paddle.tensor.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) for i, x in enumerate(a): y = y + i z = z + x @@ -213,7 +213,7 @@ def for_enumerate_var_list(x): # 15. for enumerate list[var] with a nested for range @paddle.jit.to_static def for_enumerate_var_with_nested_range(x_array): - x = fluid.layers.fill_constant([1], 'int32', 0) + x = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for i, num in enumerate(x_array): for idx in range(num): @@ -224,7 +224,7 @@ def for_enumerate_var_with_nested_range(x_array): # 16. for iter var[idx] @paddle.jit.to_static def for_iter_var_idx(x_array): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_array = fluid.dygraph.to_variable(x_array) for x in x_array[0:]: @@ -306,7 +306,7 @@ def forward(self, x): # 21. for original list @paddle.jit.to_static def for_original_list(): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) for x in [1, 2, 3]: z = z + x return z @@ -315,7 +315,7 @@ def for_original_list(): # 22. for original tuple @paddle.jit.to_static def for_original_tuple(): - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) for x in (1, 2, 3): z = z + x return z diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py index d400f15285f800..b1774dacb6a933 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py @@ -158,8 +158,8 @@ def body(i, ten, y): i += 1 return [i, ten, y] - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) - ten = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) + ten = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10) i, ten, y = paddle.static.nn.while_loop(cond, body, [i, ten, y]) return y[0] @@ -180,7 +180,7 @@ def add_fn(x): def map_func(func, tensor_list): return [func(x) for x in tensor_list] - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) # It will be converted into `layers.cond` as followed. # map_func(lambda x: paddle.static.nn.cond(i==1, lambda: x, lambda: add_fn(x), y) # `if (Tensor) == 1` is supported in dygraph. diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py index 218e3ed4326ad5..b032454461139f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py @@ -35,7 +35,7 @@ def len_with_tensor(x): def len_with_lod_tensor_array(x): x = fluid.dygraph.to_variable(x) - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) arr = paddle.tensor.array_write(x, i=i) arr_len = len(arr) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py index dd5e3f3adde8b1..4ca7e125fc76b3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py @@ -42,7 +42,7 @@ def test_list_append_in_if(x): a.append(x) else: a.append( - fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int64") + paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int64") ) # TODO(Aurelius84): Currently, run_program_op doesn't support output LoDTensorArray. return a[0] @@ -51,7 +51,7 @@ def test_list_append_in_if(x): def test_list_append_in_for_loop(x, iter_num): x = fluid.dygraph.to_variable(x) # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor - iter_num = fluid.layers.fill_constant( + iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved a = [] @@ -88,7 +88,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num): x = fluid.dygraph.to_variable(x) a = [] # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor - iter_num = fluid.layers.fill_constant( + iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved for i in range(iter_num): @@ -99,7 +99,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num): def test_list_append_in_while_loop(x, iter_num): x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant( + iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) a = [] @@ -112,7 +112,7 @@ def test_list_append_in_while_loop(x, iter_num): def test_list_append_in_while_loop_with_stack(x, iter_num): x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant( + iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) a = [] @@ -159,11 +159,11 @@ def test_list_pop_in_if(x): if x.numpy()[0] > 0: a.append(x) b.append(x + 1) - a.append(fluid.layers.fill_constant(shape=[1], value=1, dtype="int64")) + a.append(paddle.tensor.fill_constant(shape=[1], value=1, dtype="int64")) else: a.append(x + 1) b.append(x - 1) - a.append(fluid.layers.fill_constant(shape=[2], value=2, dtype="int64")) + a.append(paddle.tensor.fill_constant(shape=[2], value=2, dtype="int64")) item1 = a.pop(1) return item1, b[-1] @@ -171,7 +171,7 @@ def test_list_pop_in_if(x): def test_list_pop_in_for_loop(x, iter_num): x = fluid.dygraph.to_variable(x) # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor - iter_num = fluid.layers.fill_constant( + iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved @@ -189,7 +189,7 @@ def test_list_pop_in_for_loop(x, iter_num): def test_list_pop_in_while_loop(x, iter_num): x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant( + iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) a = [] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py index 5c84da8e621be9..5f9d59daf09529 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py @@ -89,7 +89,7 @@ def for_loop_dyfunc(max_len): def for_loop_dyfunc2(max_len): # Test case: a variable is used and created in loop, but used before created - x = fluid.layers.fill_constant(shape=[1, 2], dtype="int32", value=1) + x = paddle.tensor.fill_constant(shape=[1, 2], dtype="int32", value=1) for i in range(max_len): if i > 1: @@ -97,7 +97,7 @@ def for_loop_dyfunc2(max_len): a = 1 q, _ = x.shape # test var x.shape only used but not created in loop - ret = fluid.layers.fill_constant(shape=[1], dtype="int32", value=s + q) + ret = paddle.tensor.fill_constant(shape=[1], dtype="int32", value=s + q) return ret @@ -189,7 +189,7 @@ def __init__(self): foo = Foo() # Use `to_variable` so that static analysis can analyze the type of X is Tensor - max_len = fluid.layers.fill_constant( + max_len = paddle.tensor.fill_constant( shape=[1], value=max_len, dtype="int32" ) @@ -206,8 +206,8 @@ def var_create_in_for_loop(max_len): def nested_for_loop_dyfunc(): - two = fluid.layers.fill_constant(shape=[1], value=2, dtype="int32") - three = fluid.layers.fill_constant(shape=[1], value=3, dtype="int32") + two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32") + three = paddle.tensor.fill_constant(shape=[1], value=3, dtype="int32") for j in range(two): for i in range(10): a = 2 + j diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py index 407e11349c2de1..fed94b40030208 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py @@ -14,7 +14,6 @@ import math import os -import platform import tempfile import time import unittest @@ -442,22 +441,18 @@ def test_resnet_composite_backward(self): ) def test_resnet_composite_forward_backward(self): - plat = platform.system() - if plat == "Linux": - core._set_prim_all_enabled(True) - static_loss = self.train(to_static=True) - core._set_prim_all_enabled(False) - dygraph_loss = self.train(to_static=True) - np.testing.assert_allclose( - static_loss, - dygraph_loss, - rtol=1e-02, - err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss - ), - ) - else: - pass + core._set_prim_all_enabled(True) + static_loss = self.train(to_static=True) + core._set_prim_all_enabled(False) + dygraph_loss = self.train(to_static=True) + np.testing.assert_allclose( + static_loss, + dygraph_loss, + rtol=1e-02, + err_msg='static_loss: {} \n dygraph_loss: {}'.format( + static_loss, dygraph_loss + ), + ) def test_in_static_mode_mkldnn(self): fluid.set_flags({'FLAGS_use_mkldnn': True}) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py index 48c60795c4c38b..1b16e79a8c1dfc 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py @@ -69,7 +69,7 @@ def test_return_if_else(x): @to_static def test_return_in_while(x): x = fluid.dygraph.to_variable(x) - i = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: i += 1 if i > 5: diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py index 116a2d7c68dc75..4e4e876c14a929 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py @@ -109,11 +109,11 @@ def dyfunc_with_if_1(x): # `res.shape[0]` is transformed into # `paddle.jit.dy2static.convert_var_shape(res)[0]` if res.shape[0] > 1: - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=2, shape=x.shape, dtype="int32" ) else: - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=3, shape=x.shape, dtype="int32" ) return res @@ -125,14 +125,14 @@ def dyfunc_with_if_2(x): if len(x.shape) < 1: res = x else: - res = fluid.layers.fill_constant(value=8, shape=x.shape, dtype="int32") + res = paddle.tensor.fill_constant(value=8, shape=x.shape, dtype="int32") return res def dyfunc_with_for_1(x): x = fluid.dygraph.to_variable(x) - res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") + res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` for i in range(x.shape[0]): res += 1 @@ -142,7 +142,7 @@ def dyfunc_with_for_1(x): def dyfunc_with_for_2(x): x = fluid.dygraph.to_variable(x) x_shape_0 = x.shape[0] - res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") + res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` for i in range(x_shape_0): @@ -152,7 +152,7 @@ def dyfunc_with_for_2(x): def dyfunc_with_for_3(x): x = fluid.dygraph.to_variable(x) - res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") + res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `len(x.shape)` is not transformed. for i in range(len(x.shape)): res += 1 @@ -162,7 +162,7 @@ def dyfunc_with_for_3(x): def dyfunc_with_while_1(x): x = fluid.dygraph.to_variable(x) - res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") + res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` i = 1 while i < x.shape[0]: @@ -174,7 +174,7 @@ def dyfunc_with_while_1(x): def dyfunc_with_while_2(x): x = fluid.dygraph.to_variable(x) x_shape_0 = x.shape[0] - res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") + res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") i = 1 # `x_shape_0` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` while i < x_shape_0: @@ -186,7 +186,7 @@ def dyfunc_with_while_2(x): def dyfunc_with_while_3(x): x = fluid.dygraph.to_variable(x) x_shape = x.shape - res = fluid.layers.fill_constant(value=0, shape=[1], dtype="int32") + res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") i = 1 # `len(x.shape)` is not transformed. @@ -601,7 +601,7 @@ def dyfunc_with_static_convert_var_shape(x): else: # Test for correctly to find `batch_size__static_convert_var_shape_suffix_0` in # deeply nested scope. - res = fluid.layers.fill_constant( + res = paddle.tensor.fill_constant( value=8, shape=[batch_size], dtype="int32" ) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py index e0ff492388e99f..22e2b68c330a42 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py @@ -16,7 +16,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers import paddle.nn.functional as F from paddle.fluid.dygraph import Layer, to_variable from paddle.jit.api import dygraph_to_static_func @@ -796,7 +795,7 @@ def gather(input, indices, batch_pos): # constant number inf = float(1.0 * 1e7) max_len = (enc_output.shape[1] + 20) if max_len is None else max_len - vocab_size_tensor = layers.fill_constant( + vocab_size_tensor = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=self.trg_vocab_size ) end_token_tensor = to_variable( @@ -824,7 +823,7 @@ def gather(input, indices, batch_pos): np.full([batch_size, beam_size], 0, dtype="bool") ) - trg_word = layers.fill_constant( + trg_word = paddle.tensor.fill_constant( shape=[batch_size * beam_size, 1], dtype="int64", value=bos_id ) @@ -838,12 +837,12 @@ def gather(input, indices, batch_pos): # init states (caches) for transformer, need to be updated according to selected beam caches = [ { - "k": layers.fill_constant( + "k": paddle.tensor.fill_constant( shape=[batch_size, beam_size, self.n_head, 0, self.d_key], dtype=enc_output.dtype, value=0, ), - "v": layers.fill_constant( + "v": paddle.tensor.fill_constant( shape=[batch_size, beam_size, self.n_head, 0, self.d_value], dtype=enc_output.dtype, value=0, @@ -853,7 +852,7 @@ def gather(input, indices, batch_pos): ] for i in range(paddle.to_tensor(max_len)): - trg_pos = layers.fill_constant( + trg_pos = paddle.tensor.fill_constant( shape=trg_word.shape, dtype="int64", value=i ) caches = paddle.utils.map_structure( diff --git a/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py index 8297806179a6a2..b3c27b784f632c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py @@ -89,10 +89,10 @@ def build_model(self): name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - min = paddle.fluid.layers.fill_constant( + min = paddle.tensor.fill_constant( name="min", shape=[1], dtype='float32', value=0.1 ) - max = paddle.fluid.layers.fill_constant( + max = paddle.tensor.fill_constant( name="max", shape=[1], dtype='float32', value=3.4 ) x = paddle.clip(x, min=min, max=max) @@ -106,7 +106,7 @@ def build_model(self): name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - min = paddle.fluid.layers.fill_constant( + min = paddle.tensor.fill_constant( name="min", shape=[1], dtype='float32', value=0.1 ) x = paddle.clip(x, min=min) @@ -120,7 +120,7 @@ def build_model(self): name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - max = paddle.fluid.layers.fill_constant( + max = paddle.tensor.fill_constant( name="max", shape=[1], dtype='float32', value=3.4 ) x = paddle.clip(x, max=max) @@ -134,7 +134,7 @@ def build_model(self): name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - min = paddle.fluid.layers.fill_constant( + min = paddle.tensor.fill_constant( name="min", shape=[1], dtype='float32', value=0.1 ) x = paddle.clip(x, min=min, max=3.4) @@ -148,7 +148,7 @@ def build_model(self): name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - max = paddle.fluid.layers.fill_constant( + max = paddle.tensor.fill_constant( name="max", shape=[1], dtype='float32', value=3.4 ) x = paddle.clip(x, min=0.1, max=max) @@ -186,10 +186,10 @@ def build_model(self): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' ) - min = paddle.fluid.layers.fill_constant( + min = paddle.tensor.fill_constant( name="min", shape=[1], dtype='int32', value=1 ) - max = paddle.fluid.layers.fill_constant( + max = paddle.tensor.fill_constant( name="max", shape=[1], dtype='int32', value=3 ) x = paddle.clip(x, min=min, max=max) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py index 062814a11cff55..9d46263b6b2591 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py @@ -80,7 +80,7 @@ def build_model(self): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" ) - expand_times = paddle.fluid.layers.fill_constant( + expand_times = paddle.tensor.fill_constant( shape=[len(self.feed_shape[0])], dtype="int32", value=2 ) out = paddle.expand(x, expand_times, **self.attrs) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py index e29f633075246b..6dfad5aa5d55fb 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py @@ -100,7 +100,7 @@ def build_model(self): 'dtype': 'int32', 'value': 3, } - y = paddle.fluid.layers.fill_constant(**self.attrs) + y = paddle.tensor.fill_constant(**self.attrs) out = paddle.expand(x, shape=y) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py index f1e2c79cc6ba37..4a4e2c4da79649 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py @@ -46,7 +46,7 @@ def set_op_attrs(self): @IPUOpTest.static_graph def build_model(self): - x = paddle.fluid.layers.fill_constant(**self.attrs) + x = paddle.tensor.fill_constant(**self.attrs) out = paddle.add(x, x) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py index 702f2b4ea1c722..22af217f66ee43 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py @@ -73,7 +73,7 @@ def build_model(self): 'dtype': 'int32', 'value': 2, } - y = paddle.fluid.layers.fill_constant(**const_attrs) + y = paddle.tensor.fill_constant(**const_attrs) pad = paddle.nn.functional.pad(x, pad=y) self.fetch_list = [pad.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py index 6a302a7cb5a899..031389859afb0a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py @@ -57,7 +57,7 @@ def build_model(self): topk_values, topk_indices = self.op(x, **self.attrs) else: # !important, popart cannot accept non const tensor - K_t = paddle.fluid.layers.fill_constant( + K_t = paddle.tensor.fill_constant( shape=[1], dtype='int32', value=self.k, name="in_2" ) topk_values, topk_indices = self.op(x, K_t, **self.attrs) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py index 40c56af9228332..c79cf4ec49205a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py @@ -64,7 +64,7 @@ def build_model(self): 'dtype': 'int32', 'value': 6, } - img_size = paddle.fluid.layers.fill_constant(**attrs) + img_size = paddle.tensor.fill_constant(**attrs) out = paddle.vision.ops.yolo_box(x=x, img_size=img_size, **self.attrs) self.fetch_list = [x.name for x in out] diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index a12fb83ee5be9c..f0a121e5fcbeb2 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers class FusionGroupPassTest(PassTest): @@ -86,7 +85,7 @@ def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 64], dtype, 5, False) - one = layers.fill_constant(shape=[1], dtype=dtype, value=1.0) + one = paddle.tensor.fill_constant(shape=[1], dtype=dtype, value=1.0) tmp_0 = one * self.feed_vars[0] # subgraph with 9 op nodes tmp_1 = tmp_0 * paddle.nn.functional.sigmoid( @@ -142,7 +141,9 @@ def build_program(self, dtype): tmp_0 = self.feed_vars[0] * self.feed_vars[1] tmp_0.stop_gradient = False tmp_1 = paddle.cast(tmp_0, dtype="float16") - zero = layers.fill_constant(shape=[128], dtype="float16", value=0) + zero = paddle.tensor.fill_constant( + shape=[128], dtype="float16", value=0 + ) # TODO(xreki): fix precision problem when using softmax of float16. # tmp_2 = layers.softmax(tmp_1) tmp_2 = paddle.add(tmp_1, zero) @@ -212,7 +213,9 @@ def build_program(self, dtype): tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1]) tmp_0.stop_gradient = False - tmp_1 = layers.fill_constant(shape=[2, 2], dtype=dtype, value=2.0) + tmp_1 = paddle.tensor.fill_constant( + shape=[2, 2], dtype=dtype, value=2.0 + ) tmp_2 = paddle.scale( tmp_1, scale=3.0, bias=1.0, bias_after_scale=True ) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py index 6e0bb08c926b31..1936f895e805be 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py @@ -16,7 +16,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.framework import IrGraph, Program, program_guard from paddle.fluid.tests.unittests.op_test import OpTestTool @@ -55,8 +54,12 @@ def false_func(): return linear_fc(5) with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) pred = paddle.less_than(y, x) out = paddle.static.nn.cond(pred, true_func, false_func) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py index c2a3dbfa20944c..90de2e1e182510 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py @@ -235,7 +235,7 @@ def test_api(self): name='x', shape=[12, 14], dtype="float32" ) - positive_2 = fluid.layers.fill_constant([1], "int32", 12) + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) expand_shape = paddle.static.data( name="expand_shape", shape=[2], diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index 6d67b6fc8f095a..ed03ac0d3ad667 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -268,8 +268,8 @@ def test_check_output(self): class TestFillConstantAPI(unittest.TestCase): def test_api(self): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) shape_tensor_int32 = fluid.data( name="shape_tensor_int32", shape=[2], dtype="int32" @@ -278,41 +278,41 @@ def test_api(self): name="shape_tensor_int64", shape=[2], dtype="int64" ) - out_1 = fluid.layers.fill_constant( + out_1 = paddle.tensor.fill_constant( shape=[1, 2], dtype="float32", value=1.1 ) - out_2 = fluid.layers.fill_constant( + out_2 = paddle.tensor.fill_constant( shape=[1, positive_2_int32], dtype="float32", value=1.1 ) - out_3 = fluid.layers.fill_constant( + out_3 = paddle.tensor.fill_constant( shape=[1, positive_2_int64], dtype="float32", value=1.1 ) - out_4 = fluid.layers.fill_constant( + out_4 = paddle.tensor.fill_constant( shape=shape_tensor_int32, dtype="float32", value=1.1 ) - out_5 = fluid.layers.fill_constant( + out_5 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype="float32", value=1.1 ) - out_6 = fluid.layers.fill_constant( + out_6 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype=np.float32, value=1.1 ) - val1 = fluid.layers.fill_constant( + val1 = paddle.tensor.fill_constant( shape=[1], dtype=np.float32, value=1.1 ) - val2 = fluid.layers.fill_constant( + val2 = paddle.tensor.fill_constant( shape=[1], dtype=np.float64, value=1.1 ) - out_7 = fluid.layers.fill_constant( + out_7 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype=np.float32, value=val1 ) - out_8 = fluid.layers.fill_constant( + out_8 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype=np.float32, value=val2 ) @@ -345,16 +345,16 @@ def test_api(self): shape = fluid.dygraph.to_variable(data1) val = fluid.dygraph.to_variable(data2) value = fluid.dygraph.to_variable(data3) - res1 = fluid.layers.fill_constant( + res1 = paddle.tensor.fill_constant( shape=[1, 2], dtype='float32', value=1.1 ) - res2 = fluid.layers.fill_constant( + res2 = paddle.tensor.fill_constant( shape=shape, dtype='float32', value=1.1 ) - res3 = fluid.layers.fill_constant( + res3 = paddle.tensor.fill_constant( shape=shape, dtype='float32', value=val ) - res4 = fluid.layers.fill_constant( + res4 = paddle.tensor.fill_constant( shape=shape, dtype='int32', value=value ) assert np.array_equal( @@ -372,17 +372,17 @@ def test_api(self): def test_nan(self): with fluid.dygraph.guard(): - res = fluid.layers.fill_constant([1], 'float32', np.nan) + res = paddle.tensor.fill_constant([1], 'float32', np.nan) self.assertTrue(np.isnan(res.numpy().item(0))) def test_inf(self): with fluid.dygraph.guard(): - res = fluid.layers.fill_constant([1], 'float32', np.inf) + res = paddle.tensor.fill_constant([1], 'float32', np.inf) self.assertTrue(np.isinf(res.numpy().item(0))) def test_ninf(self): with fluid.dygraph.guard(): - res = fluid.layers.fill_constant([1], 'float32', np.NINF) + res = paddle.tensor.fill_constant([1], 'float32', np.NINF) self.assertTrue(np.isinf(res.numpy().item(0))) self.assertEqual(np.NINF, res.numpy().item(0)) @@ -434,13 +434,13 @@ def test_errors(self): # The argument shape's type of fill_constant_op must be list, tuple or Variable. def test_shape_type(): - fluid.layers.fill_constant(shape=1, dtype="float32", value=1) + paddle.tensor.fill_constant(shape=1, dtype="float32", value=1) self.assertRaises(TypeError, test_shape_type) # The argument shape's size of fill_constant_op must not be 0. def test_shape_size(): - fluid.layers.fill_constant(shape=[], dtype="float32", value=1) + paddle.tensor.fill_constant(shape=[], dtype="float32", value=1) self.assertRaises(AssertionError, test_shape_size) @@ -449,7 +449,7 @@ def test_shape_tensor_dtype(): shape = fluid.data( name="shape_tensor", shape=[2], dtype="float32" ) - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=shape, dtype="float32", value=1 ) @@ -459,7 +459,7 @@ def test_shape_tensor_list_dtype(): shape = fluid.data( name="shape_tensor_list", shape=[1], dtype="bool" ) - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[shape, 2], dtype="float32", value=1 ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py index 07a9179c5a8b11..f078edb566dcff 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py @@ -525,8 +525,8 @@ def test_check_grad_normal(self): class TestStridedSliceAPI(unittest.TestCase): def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float32") - minus_1 = fluid.layers.fill_constant([1], "int32", -1) - minus_3 = fluid.layers.fill_constant([1], "int32", -3) + minus_1 = paddle.tensor.fill_constant([1], "int32", -1) + minus_3 = paddle.tensor.fill_constant([1], "int32", -3) starts = paddle.static.data( name='starts', shape=[3], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py index c3f4b8a755558f..2898f832c53c61 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py @@ -234,7 +234,7 @@ def test_api(self): repeat_times = [2, 2] x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="int32") out = paddle.tile(x1, repeat_times) - positive_2 = fluid.layers.fill_constant([1], dtype="int32", value=2) + positive_2 = paddle.tensor.fill_constant([1], dtype="int32", value=2) out2 = paddle.tile(x1, repeat_times=[positive_2, 2]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py index b1d1a69b256647..241145e362f77c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py @@ -171,7 +171,7 @@ def set_program(self, use_fluid_api): with fluid.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') - zero = fluid.layers.fill_constant( + zero = paddle.tensor.fill_constant( shape=[1], value=0, dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py index 2176f67047e8b4..1380e8cfeae617 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py @@ -270,7 +270,7 @@ def test_static(self): dtype="float32", ) - positive_2 = fluid.layers.fill_constant([1], "int32", 12) + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) expand_shape = paddle.static.data( name="expand_shape", shape=[2], diff --git a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py index aa6de910838996..a6d65a6eccc9a1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py @@ -139,7 +139,7 @@ def set_program(self): with fluid.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') - zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") + zero = paddle.tensor.fill_constant(shape=[1], value=0, dtype="int64") for i in range(self.iter_num): paddle.tensor.array_write(input, zero + i, tensor_array) @@ -177,7 +177,7 @@ def set_program(self): with fluid.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') - zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") + zero = paddle.tensor.fill_constant(shape=[1], value=0, dtype="int64") for i in range(self.iter_num): paddle.tensor.array_write(input, zero + i, tensor_array) diff --git a/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py index 1bee4627e16ac1..95cbef265b8628 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py @@ -578,8 +578,8 @@ def test_check_grad_normal(self): class TestStridedSliceAPI(unittest.TestCase): def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float64") - minus_1 = fluid.layers.fill_constant([1], "int32", -1) - minus_3 = fluid.layers.fill_constant([1], "int32", -3) + minus_1 = paddle.tensor.fill_constant([1], "int32", -1) + minus_3 = paddle.tensor.fill_constant([1], "int32", -3) starts = paddle.static.data( name='starts', shape=[3], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py index 78356e7168df78..fef934a6f93a75 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py @@ -50,15 +50,15 @@ def simple_net(self): i = layers.zeros(shape=[1], dtype='int32') i = paddle.cast(i, 'int64') i.stop_gradient = True - array_len = layers.fill_constant(shape=[1], dtype='int32', value=5) + array_len = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=5) array_len = paddle.cast(array_len, 'int64') array_len.stop_gradient = True cond = paddle.ones(shape=[1], dtype='int32') cond = paddle.cast(cond, 'bool') - j = layers.fill_constant(shape=[1], dtype='int32', value=1) + j = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=1) j = paddle.cast(j, 'int64') j.stop_gradient = True - array_len2 = layers.fill_constant(shape=[1], dtype='int32', value=3) + array_len2 = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=3) array_len2 = paddle.cast(array_len2, 'int64') array_len2.stop_gradient = True cond2 = paddle.logical_or(x=j, y=array_len2) diff --git a/python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_relu_custom_vjp.py b/python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_relu_custom_vjp.py new file mode 100644 index 00000000000000..8113ddee89a29c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/prim/composite_ops/test_composite_relu_custom_vjp.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from utils import TOLERANCE + +import paddle +import paddle.nn.functional as F +from paddle.fluid import core + + +def generate_data(shape, dtype="float32"): + np_data = np.random.random(shape).astype(dtype) + return np_data + + +class Attr: + def __init__(self) -> None: + self.dtype = None + self.shape = None + + def set_dtype(self, dtype) -> None: + self.dtype = dtype + return + + def set_shape(self, shape) -> None: + self.shape = shape + return + + def get_rtol(self, flag): + rtol = TOLERANCE[self.dtype][flag].get("rtol") + return rtol + + def get_atol(self, flag): + atol = TOLERANCE[self.dtype][flag].get("atol") + return atol + + +attrs = Attr() + + +def fn(x): + return F.relu(x) + + +def expect_grad(inputs): + paddle.disable_static() + inputs.stop_gradient = False + res = fn(inputs) + + gradients = paddle.grad(res, inputs) + return gradients + + +class TestCompositeSoftmaxPrimBackward(unittest.TestCase): + "test composite softmax and prim backward" + + def setUp(self): + core._set_prim_backward_enabled(True) + self.dtypes = ["float16", "float32", "float64"] + self.shapes = [[2, 3, 4], [2, 3]] + + def cal_composite_grad(self, inputs): + paddle.enable_static() + core._set_prim_all_enabled(True) + startup_program = paddle.static.Program() + main_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + x = paddle.static.data( + 'x', shape=inputs.shape, dtype=str(inputs.dtype) + ) + x.stop_gradient = False + y = fn(x) + blocks = main_program.blocks + z = paddle.static.gradients([y], x) + paddle.incubate.autograd.primapi.to_prim(blocks) + + exe = paddle.static.Executor() + exe.run(startup_program) + res = exe.run(main_program, feed={'x': inputs}, fetch_list=[z]) + paddle.disable_static() + core._set_prim_all_enabled(False) + return res + + def compare_backward(self): + np_data = generate_data(attrs.shape) + tensor_data = paddle.to_tensor(np_data) + + expect = expect_grad(tensor_data)[0].numpy() + actual = self.cal_composite_grad(np_data)[0] + + assert expect.dtype == actual.dtype + np.testing.assert_allclose( + expect, + actual, + rtol=attrs.get_rtol("prim_backward"), + atol=attrs.get_rtol("prim_backward"), + ) + + def test_prim_backward(self): + for j in self.dtypes: + for t in self.shapes: + attrs.set_dtype(j) + attrs.set_shape(t) + self.compare_backward() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/prim/model/test_resnet_prim_cinn.py b/python/paddle/fluid/tests/unittests/prim/model/test_resnet_prim_cinn.py index 32e83c4b2abe7b..3d5fec6ed2ed79 100644 --- a/python/paddle/fluid/tests/unittests/prim/model/test_resnet_prim_cinn.py +++ b/python/paddle/fluid/tests/unittests/prim/model/test_resnet_prim_cinn.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import platform import time import unittest @@ -63,9 +62,7 @@ def train(to_static, enable_prim, enable_cinn): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - fluid.core._set_prim_all_enabled( - enable_prim and platform.system() == 'Linux' - ) + fluid.core._set_prim_all_enabled(enable_prim) train_reader = paddle.batch( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/python/paddle/fluid/tests/unittests/prim/prim/flags/test_prim_flags_case.py b/python/paddle/fluid/tests/unittests/prim/prim/flags/test_prim_flags_case.py index 309959747e0647..b2e2ad05ea439e 100644 --- a/python/paddle/fluid/tests/unittests/prim/prim/flags/test_prim_flags_case.py +++ b/python/paddle/fluid/tests/unittests/prim/prim/flags/test_prim_flags_case.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import platform import unittest import paddle @@ -95,78 +94,48 @@ def test_cinn_prim_all(self): self.reset_env_flag() os.environ["FLAGS_prim_all"] = "True" self.flag = "cinn_prim_all" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=True) - else: - pass + _ = self.train(use_cinn=True) def test_prim_all(self): """prim forward + prim backward""" self.reset_env_flag() os.environ["FLAGS_prim_all"] = "True" self.flag = "prim_all" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=False) - else: - pass + _ = self.train(use_cinn=False) def test_cinn_prim_forward(self): """cinn + prim forward""" - self.reset_env_flag() - os.environ["FLAGS_prim_forward"] = "True" self.flag = "cinn_prim_forward" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=True) - else: - pass + _ = self.train(use_cinn=True) def test_prim_forward(self): """only prim forward""" self.reset_env_flag() os.environ["FLAGS_prim_forward"] = "True" self.flag = "prim_forward" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=False) - else: - pass + _ = self.train(use_cinn=False) def test_cinn_prim_backward(self): """cinn + prim_backward""" self.reset_env_flag() os.environ["FLAGS_prim_backward"] = "True" self.flag = "cinn_prim_backward" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=True) - else: - pass + _ = self.train(use_cinn=True) def test_prim_backward(self): """only prim backward""" self.reset_env_flag() os.environ["FLAGS_prim_backward"] = "True" self.flag = "prim_backward" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=False) - else: - pass + _ = self.train(use_cinn=False) def test_cinn(self): """only cinn""" self.reset_env_flag() self.flag = "cinn" - plat = platform.system() - if plat == "Linux": - _ = self.train(use_cinn=True) - else: - pass + _ = self.train(use_cinn=True) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py index 000f1b7d4b8561..069240823d8fd5 100644 --- a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py +++ b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py @@ -17,7 +17,6 @@ import numpy as np import paddle -import paddle.fluid.layers as layers from paddle.fluid import core, framework from paddle.fluid.framework import Program, program_guard @@ -42,20 +41,26 @@ def _get_feed(self): def build_program(self): def true_func(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[1, 2], dtype='int32', value=1 - ), layers.fill_constant(shape=[2, 3], dtype='bool', value=True) + ), paddle.tensor.fill_constant( + shape=[2, 3], dtype='bool', value=True + ) def false_func(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[3, 4], dtype='float32', value=3 - ), layers.fill_constant(shape=[4, 5], dtype='int64', value=2) + ), paddle.tensor.fill_constant(shape=[4, 5], dtype='int64', value=2) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) pred = paddle.less_than(x, y) out = paddle.static.nn.cond(pred, true_func, false_func) # out is a tuple containing 2 tensors diff --git a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_executor.py b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_executor.py index 86060d6bf2de12..c208044722f52a 100644 --- a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_executor.py +++ b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_executor.py @@ -395,7 +395,7 @@ class TestInplaceApiWithDataTransform(unittest.TestCase): def test_increment(self): if paddle.fluid.core.is_compiled_with_cuda(): with paddle.fluid.device_guard("gpu:0"): - x = paddle.fluid.layers.fill_constant([1], "float32", 0) + x = paddle.tensor.fill_constant([1], "float32", 0) with paddle.fluid.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 9eae74a17cd03c..154e3c33f12365 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -3045,7 +3045,7 @@ def test_api(self): ) factor_1 = 2.0 - factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) + factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0) out_1 = paddle.pow(x, factor_1) out_2 = paddle.pow(x, factor_2) out_4 = paddle.pow(x, factor_1, name='pow_res') diff --git a/python/paddle/fluid/tests/unittests/test_assert_op.py b/python/paddle/fluid/tests/unittests/test_assert_op.py index f62ad38d459d1a..48a9369dce2264 100644 --- a/python/paddle/fluid/tests/unittests/test_assert_op.py +++ b/python/paddle/fluid/tests/unittests/test_assert_op.py @@ -16,7 +16,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.static.nn.control_flow import Assert @@ -31,7 +30,7 @@ def run_network(self, net_func): def test_assert_true(self): def net_func(): - condition = layers.fill_constant( + condition = paddle.tensor.fill_constant( shape=[1], dtype='bool', value=True ) Assert(condition, []) @@ -40,7 +39,7 @@ def net_func(): def test_assert_false(self): def net_func(): - condition = layers.fill_constant( + condition = paddle.tensor.fill_constant( shape=[1], dtype='bool', value=False ) Assert(condition) @@ -50,7 +49,7 @@ def net_func(): def test_assert_cond_numel_error(self): def net_func(): - condition = layers.fill_constant( + condition = paddle.tensor.fill_constant( shape=[1, 2], dtype='bool', value=True ) Assert(condition, []) @@ -60,8 +59,10 @@ def net_func(): def test_assert_print_data(self): def net_func(): - zero = layers.fill_constant(shape=[1], dtype='int64', value=0) - one = layers.fill_constant(shape=[1], dtype='int64', value=1) + zero = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=0 + ) + one = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) condition = paddle.less_than(one, zero) # False Assert(condition, [zero, one]) @@ -71,7 +72,9 @@ def net_func(): def test_assert_summary(self): def net_func(): - x = layers.fill_constant(shape=[10], dtype='float32', value=2.0) + x = paddle.tensor.fill_constant( + shape=[10], dtype='float32', value=2.0 + ) condition = paddle.max(x) < 1.0 Assert(condition, (x,), 5) @@ -81,7 +84,9 @@ def net_func(): def test_assert_summary_greater_than_size(self): def net_func(): - x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) + x = paddle.tensor.fill_constant( + shape=[2, 3], dtype='float32', value=2.0 + ) condition = paddle.max(x) < 1.0 Assert(condition, [x], 10, name="test") diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index e53f0b8a119fe6..b3e79d26520022 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -76,11 +76,11 @@ def test_assign_LoDTensorArray(self): with program_guard(main_program): x = fluid.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False - y = fluid.layers.fill_constant( + y = paddle.tensor.fill_constant( shape=[100, 10], dtype='float32', value=1 ) z = paddle.add(x=x, y=y) - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) init_array = paddle.tensor.array_write(x=z, i=i) array = paddle.assign(init_array) sums = paddle.tensor.array_read(array=init_array, i=i) @@ -129,11 +129,11 @@ def test_assign_LoDTensorArray(self): with program_guard(main_program): x = fluid.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False - y = fluid.layers.fill_constant( + y = paddle.tensor.fill_constant( shape=[100, 10], dtype='float32', value=1 ) z = paddle.add(x=x, y=y) - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) init_array = paddle.tensor.array_write(x=z, i=i) array = paddle.assign(init_array) sums = paddle.tensor.array_read(array=init_array, i=i) diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py index 18daf2059f2410..7d723ed2d070e9 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py @@ -44,7 +44,7 @@ def test_api(self): input = np.random.random([12, 14]).astype("float32") x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") - positive_2 = fluid.layers.fill_constant([1], "int32", 12) + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) expand_shape = paddle.static.data( name="expand_shape", shape=[2], @@ -81,7 +81,7 @@ def test_api_fp16_gpu(self): name="x", shape=[12, 14], dtype="float16" ) - positive_2 = paddle.fluid.layers.fill_constant([1], "int32", 12) + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) expand_shape = paddle.static.data( name="expand_shape", shape=[2], diff --git a/python/paddle/fluid/tests/unittests/test_case.py b/python/paddle/fluid/tests/unittests/test_case.py index 675b51cf0a0535..d0c6d2f9837708 100644 --- a/python/paddle/fluid/tests/unittests/test_case.py +++ b/python/paddle/fluid/tests/unittests/test_case.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers import paddle.fluid.optimizer as optimizer from paddle.fluid.backward import append_backward from paddle.fluid.framework import Program, program_guard @@ -31,20 +30,32 @@ class TestAPICase(unittest.TestCase): def test_return_single_var(self): def fn_1(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=1 + ) def fn_2(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=2 + ) def fn_3(): - return layers.fill_constant(shape=[4, 3], dtype='int32', value=3) + return paddle.tensor.fill_constant( + shape=[4, 3], dtype='int32', value=3 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.3 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + z = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.2 + ) pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 @@ -244,26 +255,32 @@ def fn_3(): def test_return_var_tuple(self): def fn_1(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[1, 2], dtype='int32', value=1 - ), layers.fill_constant(shape=[2, 3], dtype='float32', value=2) + ), paddle.tensor.fill_constant( + shape=[2, 3], dtype='float32', value=2 + ) def fn_2(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[3, 4], dtype='int32', value=3 - ), layers.fill_constant(shape=[4, 5], dtype='float32', value=4) + ), paddle.tensor.fill_constant( + shape=[4, 5], dtype='float32', value=4 + ) def fn_3(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[5], dtype='int32', value=5 - ), layers.fill_constant(shape=[5, 6], dtype='float32', value=6) + ), paddle.tensor.fill_constant( + shape=[5, 6], dtype='float32', value=6 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=1) - y = layers.fill_constant(shape=[1], dtype='float32', value=1) - z = layers.fill_constant(shape=[1], dtype='float32', value=3) + x = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=1) + y = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=1) + z = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=3) pred_1 = paddle.equal(x, y) # true pred_2 = paddle.equal(x, z) # false @@ -291,14 +308,18 @@ def fn_3(): class TestAPICase_Nested(unittest.TestCase): def test_nested_case(self): def fn_1(x=1): - var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) - var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) + var_5 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=5 + ) + var_6 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=6 + ) out = paddle.static.nn.control_flow.case( pred_fn_pairs=[ ( var_5 < var_6, partial( - layers.fill_constant, + paddle.tensor.fill_constant, shape=[1], dtype='int32', value=x, @@ -307,7 +328,7 @@ def fn_1(x=1): ( var_5 == var_6, partial( - layers.fill_constant, + paddle.tensor.fill_constant, shape=[2], dtype='int32', value=x, @@ -318,15 +339,19 @@ def fn_1(x=1): return out def fn_2(x=2): - var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) - var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) + var_5 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=5 + ) + var_6 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=6 + ) out = paddle.static.nn.control_flow.case( pred_fn_pairs=[ (var_5 < var_6, partial(fn_1, x=x)), ( var_5 == var_6, partial( - layers.fill_constant, + paddle.tensor.fill_constant, shape=[2], dtype='int32', value=x, @@ -337,15 +362,19 @@ def fn_2(x=2): return out def fn_3(): - var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) - var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) + var_5 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=5 + ) + var_6 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=6 + ) out = paddle.static.nn.control_flow.case( pred_fn_pairs=[ (var_5 < var_6, partial(fn_2, x=3)), ( var_5 == var_6, partial( - layers.fill_constant, + paddle.tensor.fill_constant, shape=[2], dtype='int32', value=7, @@ -358,9 +387,15 @@ def fn_3(): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.3 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + z = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.2 + ) pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 @@ -496,13 +531,19 @@ def fn_3(): class TestAPICase_Error(unittest.TestCase): def test_error(self): def fn_1(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=1 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.23) - z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) + z = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.2 + ) pred_1 = paddle.less_than(z, x) # true # The type of 'pred_fn_pairs' in case must be list or tuple @@ -570,7 +611,7 @@ def test_optimizer_in_case(self): switch_id = fluid.data(name='switch_id', shape=[1], dtype='int32') - one = layers.fill_constant(shape=[1], dtype='int32', value=1) + one = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=1) adam = optimizer.Adam(learning_rate=0.001) adagrad = optimizer.Adagrad(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 78a8f194abc3f8..4dcb4610a1fd62 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -347,8 +347,8 @@ def test_fluid_api(self): input_3 = np.random.random([2, 2, 4, 5]).astype("int32") x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') - positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1) - positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1) + positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) + positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) out_1 = paddle.concat([x_2, x_3], axis=1) out_2 = paddle.concat([x_2, x_3], axis=positive_1_int32) out_3 = paddle.concat([x_2, x_3], axis=positive_1_int64) @@ -374,9 +374,9 @@ def test_api(self): input_3 = np.random.random([2, 2, 4, 5]).astype("int32") x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') - positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1) - positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1) - negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3) + positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) + positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) + negative_int64 = paddle.tensor.fill_constant([1], "int64", -3) out_1 = paddle.concat(x=[x_2, x_3], axis=1) out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32) out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64) @@ -464,7 +464,7 @@ def set_program(self, use_fluid_api): with fluid.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') - zero = fluid.layers.fill_constant( + zero = paddle.tensor.fill_constant( shape=[1], value=0, dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index d6a2b54e90d83d..b5fb0a50f181fd 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -21,7 +21,6 @@ import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework -import paddle.fluid.layers as layers from paddle.fluid.backward import append_backward from paddle.fluid.framework import Program, program_guard @@ -42,16 +41,24 @@ def test_return_single_var(self): paddle.enable_static() def true_func(): - return layers.fill_constant(shape=[2, 3], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[2, 3], dtype='int32', value=2 + ) def false_func(): - return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1) + return paddle.tensor.fill_constant( + shape=[3, 2], dtype='int32', value=-1 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) pred = paddle.less_than(y, x) out = paddle.static.nn.cond(pred, true_func, false_func) # out is one tensor @@ -217,19 +224,23 @@ def test_return_var_tuple(self): paddle.enable_static() def true_func(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[1, 2], dtype='int32', value=1 - ), layers.fill_constant(shape=[2, 3], dtype='bool', value=True) + ), paddle.tensor.fill_constant( + shape=[2, 3], dtype='bool', value=True + ) def false_func(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[3, 4], dtype='float32', value=3 - ), layers.fill_constant(shape=[4, 5], dtype='int64', value=2) + ), paddle.tensor.fill_constant(shape=[4, 5], dtype='int64', value=2) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - pred = layers.fill_constant(shape=[1], dtype='bool', value=True) + pred = paddle.tensor.fill_constant( + shape=[1], dtype='bool', value=True + ) out = paddle.static.nn.cond(pred, true_func, false_func) # out is a tuple containing 2 tensors @@ -271,7 +282,9 @@ def false_func(a, i): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7) + a = paddle.tensor.fill_constant( + shape=[3, 2, 1], dtype='int32', value=7 + ) i = fluid.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 a = paddle.static.nn.cond( @@ -346,12 +359,14 @@ def func_return_none(): return None def func_return_one_tensor(): - return layers.fill_constant(shape=[2, 7], dtype='int32', value=3) + return paddle.tensor.fill_constant( + shape=[2, 7], dtype='int32', value=3 + ) def func_return_two_tensors(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[3, 1], dtype='int32', value=7 - ), layers.fill_constant(shape=[3, 1], dtype='int32', value=8) + ), paddle.tensor.fill_constant(shape=[3, 1], dtype='int32', value=8) main_program = Program() startup_program = Program() @@ -398,11 +413,11 @@ def test_extremely_simple_net_with_op_in_condition(self): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - a = fluid.layers.fill_constant( + a = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.23 ) a.stop_gradient = False - b = fluid.layers.fill_constant( + b = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.25 ) b.stop_gradient = False @@ -567,11 +582,11 @@ def test_cond_op_in_condition(self): startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - a = fluid.layers.fill_constant( + a = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.23 ) a.stop_gradient = False - b = fluid.layers.fill_constant( + b = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.24 ) b.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index 7e4224a768d4c5..c62243af78138c 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -19,7 +19,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid.backward import append_backward from paddle.fluid.executor import Executor from paddle.fluid.layers.control_flow import ConditionalBlock @@ -67,7 +66,7 @@ def test_infer_shape(self): step_scope = global_block.create_var( type=core.VarDesc.VarType.STEP_SCOPES ) - cond_var = layers.fill_constant( + cond_var = paddle.tensor.fill_constant( shape=[1], dtype='bool', value=False ) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py index ba6aec796dbc1e..668ae8a04a118b 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py index 3487e5f9894656..e785375aea55d8 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py index eb064ba5459b4e..45d74ee8347c41 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py index 46029341c2cc7a..749d07af339fdf 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py @@ -44,7 +44,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py index 4bdb71272d19d3..95e911a42ec626 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py @@ -45,7 +45,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py index f5c0f11e856d00..3515bfc610acc5 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py @@ -44,7 +44,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py index ebdcbe4bb8f6fd..f05cfb45f02cca 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py index ac911a7418df46..0dad2dcfd04467 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py index 994d0943e5989d..b92275ed2cb767 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py index 5d97ac944c2563..a1e98542750e4f 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py @@ -41,7 +41,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py index 61b91e08a33af9..beac8248d7add0 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py @@ -39,7 +39,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond_3 = paddle.sum(cond) acc = paddle.divide( cond_3, - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' ), name="simnet_acc", diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index 2d3fcf1b617b2d..9ea90d152dad9c 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -18,7 +18,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.tensor.manipulation import tensor_array_to_tensor paddle.enable_static() @@ -33,10 +32,10 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): np.random.rand(batch_size, beam_size, 32).astype("float32") ) indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices") - step_idx = layers.fill_constant( + step_idx = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=0, force_cpu=True ) - max_len = layers.fill_constant( + max_len = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=10, force_cpu=True ) cond = paddle.less_than(x=step_idx, y=max_len) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py index 3153257670737b..8fdb4621410057 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py @@ -73,14 +73,18 @@ def run_main(self, place): i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True - array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) + array_len = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=1 + ) array_len.stop_gradient = True cond = paddle.less_than(x=i, y=array_len) - j = layers.fill_constant(shape=[1], dtype='int64', value=1) + j = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True - array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) + array_len2 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) array_len2.stop_gradient = True cond2 = paddle.less_than(x=j, y=array_len2) diff --git a/python/paddle/fluid/tests/unittests/test_empty_op.py b/python/paddle/fluid/tests/unittests/test_empty_op.py index 871fa1ff146f3b..d13b7f53d366e4 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_op.py @@ -239,8 +239,8 @@ def test_dygraph_api_attr(self): def test_static_graph(self): dtype = 'float64' - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 3) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 3) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 3) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 3) shape_tensor_int32 = fluid.data( name="shape_tensor_int32", shape=[2], dtype="int32" diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index cd85120b7de4af..e26002eee3713a 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -217,7 +217,7 @@ def test_api(self): input = np.random.random([12, 14]).astype("float32") x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") - positive_2 = fluid.layers.fill_constant([1], "int32", 12) + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) expand_shape = paddle.static.data( name="expand_shape", shape=[2], diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index 8f8703093b3877..56a703cf08c748 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -282,8 +282,8 @@ def test_check_output(self): class TestFillConstantAPI(unittest.TestCase): def test_api(self): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) shape_tensor_int32 = fluid.data( name="shape_tensor_int32", shape=[2], dtype="int32" @@ -292,41 +292,41 @@ def test_api(self): name="shape_tensor_int64", shape=[2], dtype="int64" ) - out_1 = fluid.layers.fill_constant( + out_1 = paddle.tensor.fill_constant( shape=[1, 2], dtype="float32", value=1.1 ) - out_2 = fluid.layers.fill_constant( + out_2 = paddle.tensor.fill_constant( shape=[1, positive_2_int32], dtype="float32", value=1.1 ) - out_3 = fluid.layers.fill_constant( + out_3 = paddle.tensor.fill_constant( shape=[1, positive_2_int64], dtype="float32", value=1.1 ) - out_4 = fluid.layers.fill_constant( + out_4 = paddle.tensor.fill_constant( shape=shape_tensor_int32, dtype="float32", value=1.1 ) - out_5 = fluid.layers.fill_constant( + out_5 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype="float32", value=1.1 ) - out_6 = fluid.layers.fill_constant( + out_6 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype=np.float32, value=1.1 ) - val1 = fluid.layers.fill_constant( + val1 = paddle.tensor.fill_constant( shape=[1], dtype=np.float32, value=1.1 ) - val2 = fluid.layers.fill_constant( + val2 = paddle.tensor.fill_constant( shape=[1], dtype=np.float64, value=1.1 ) - out_7 = fluid.layers.fill_constant( + out_7 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype=np.float32, value=val1 ) - out_8 = fluid.layers.fill_constant( + out_8 = paddle.tensor.fill_constant( shape=shape_tensor_int64, dtype=np.float32, value=val2 ) @@ -359,16 +359,16 @@ def test_api(self): shape = fluid.dygraph.to_variable(data1) val = fluid.dygraph.to_variable(data2) value = fluid.dygraph.to_variable(data3) - res1 = fluid.layers.fill_constant( + res1 = paddle.tensor.fill_constant( shape=[1, 2], dtype='float32', value=1.1 ) - res2 = fluid.layers.fill_constant( + res2 = paddle.tensor.fill_constant( shape=shape, dtype='float32', value=1.1 ) - res3 = fluid.layers.fill_constant( + res3 = paddle.tensor.fill_constant( shape=shape, dtype='float32', value=val ) - res4 = fluid.layers.fill_constant( + res4 = paddle.tensor.fill_constant( shape=shape, dtype='int32', value=value ) assert np.array_equal( @@ -386,17 +386,17 @@ def test_api(self): def test_nan(self): with fluid.dygraph.guard(): - res = fluid.layers.fill_constant([1], 'float32', np.nan) + res = paddle.tensor.fill_constant([1], 'float32', np.nan) self.assertTrue(np.isnan(res.numpy().item(0))) def test_inf(self): with fluid.dygraph.guard(): - res = fluid.layers.fill_constant([1], 'float32', np.inf) + res = paddle.tensor.fill_constant([1], 'float32', np.inf) self.assertTrue(np.isinf(res.numpy().item(0))) def test_ninf(self): with fluid.dygraph.guard(): - res = fluid.layers.fill_constant([1], 'float32', np.NINF) + res = paddle.tensor.fill_constant([1], 'float32', np.NINF) self.assertTrue(np.isinf(res.numpy().item(0))) self.assertEqual(np.NINF, res.numpy().item(0)) @@ -408,7 +408,7 @@ def test_errors(self): x1 = paddle.static.data(name='x1', shape=[-1, 1], dtype="int16") self.assertRaises( TypeError, - fluid.layers.fill_constant, + paddle.tensor.fill_constant, shape=[1], value=5, dtype='uint4', @@ -416,7 +416,7 @@ def test_errors(self): self.assertRaises( TypeError, - fluid.layers.fill_constant, + paddle.tensor.fill_constant, shape=[1.1], value=5, dtype='float32', @@ -429,7 +429,7 @@ def test_errors(self): self.assertRaises( TypeError, - fluid.layers.fill_constant, + paddle.tensor.fill_constant, shape=[1], value=5, dtype='float64', @@ -439,7 +439,7 @@ def test_errors(self): x3 = np.random.randn(100, 100).astype('int32') self.assertRaises( TypeError, - fluid.layers.fill_constant, + paddle.tensor.fill_constant, shape=[100, 100], value=5, dtype='float64', @@ -448,7 +448,7 @@ def test_errors(self): # The argument shape's type of fill_constant_op must be list, tuple or Variable. def test_shape_type(): - fluid.layers.fill_constant(shape=1, dtype="float32", value=1) + paddle.tensor.fill_constant(shape=1, dtype="float32", value=1) self.assertRaises(TypeError, test_shape_type) @@ -457,7 +457,7 @@ def test_shape_tensor_dtype(): shape = fluid.data( name="shape_tensor", shape=[2], dtype="float32" ) - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=shape, dtype="float32", value=1 ) @@ -467,7 +467,7 @@ def test_shape_tensor_list_dtype(): shape = fluid.data( name="shape_tensor_list", shape=[1], dtype="bool" ) - fluid.layers.fill_constant( + paddle.tensor.fill_constant( shape=[shape, 2], dtype="float32", value=1 ) diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 162dcae7841359..670cf2acb7675c 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -24,9 +24,9 @@ # Test python API class TestFullAPI(unittest.TestCase): def test_api(self): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) shape_tensor_int32 = fluid.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) @@ -57,7 +57,9 @@ def test_api(self): shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1 ) - val = fluid.layers.fill_constant(shape=[1], dtype=np.float32, value=1.1) + val = paddle.tensor.fill_constant( + shape=[1], dtype=np.float32, value=1.1 + ) out_7 = paddle.full( shape=shape_tensor_int64, dtype=np.float32, fill_value=val ) @@ -82,9 +84,11 @@ def test_api(self): def test_api_eager(self): with fluid.dygraph.base.guard(): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) - positive_4_int64 = fluid.layers.fill_constant([1], "int64", 4, True) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) + positive_4_int64 = paddle.tensor.fill_constant( + [1], "int64", 4, True + ) out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) @@ -106,7 +110,7 @@ def test_api_eager(self): out_6 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=1.1) - val = fluid.layers.fill_constant( + val = paddle.tensor.fill_constant( shape=[1], dtype=np.float32, value=1.1 ) out_7 = paddle.full(shape=[1, 2], dtype=np.float32, fill_value=val) diff --git a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py index 8d5ecb0b55bce9..d6a7f73a9259f0 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py @@ -21,7 +21,6 @@ import paddle import paddle.nn.functional as F from paddle import tensor -from paddle.fluid import layers from paddle.fluid.framework import default_main_program from paddle.incubate.nn import FusedMultiTransformer from paddle.incubate.nn.functional import fused_multi_transformer @@ -862,7 +861,7 @@ def GetFusedMultiTransformerOutStatic(self): assert self.query_length == self.cache_length cache_kv[:] = 0 else: - time_step = layers.fill_constant( + time_step = paddle.tensor.fill_constant( shape=[1], dtype="int32", value=0, force_cpu=True ) time_step_feed = self.cache_length @@ -947,7 +946,7 @@ def GetFusedMultiTransformerOutStatic(self): for i in range(self.layers): if self.has_cache_kv: cache_kvs.append( - layers.fill_constant( + paddle.tensor.fill_constant( shape=cache_kv.shape, dtype=cache_kv.dtype, value=0 ) ) @@ -955,13 +954,13 @@ def GetFusedMultiTransformerOutStatic(self): if self.has_pre_cache: cache_kvs.append( - layers.fill_constant( + paddle.tensor.fill_constant( shape=cache_kv.shape, dtype=cache_kv.dtype, value=0 ) ) cache_kvs_feed.append(cache_kv) pre_caches.append( - layers.fill_constant( + paddle.tensor.fill_constant( shape=self.pre_cache_kv.shape, dtype=self.pre_cache_kv.dtype, value=0, diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index f12e7964ca1800..18b08620f96388 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -209,9 +209,9 @@ def init_data(self): # Test python API class TestGaussianRandomAPI(unittest.TestCase): def test_api(self): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) shape_tensor_int32 = fluid.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) diff --git a/python/paddle/fluid/tests/unittests/test_histogram_op.py b/python/paddle/fluid/tests/unittests/test_histogram_op.py index dc52df4226a297..5d4ae29ba0cb69 100644 --- a/python/paddle/fluid/tests/unittests/test_histogram_op.py +++ b/python/paddle/fluid/tests/unittests/test_histogram_op.py @@ -82,7 +82,7 @@ def test_bins_error(self): """Test bins should be greater than or equal to 1.""" def net_func(): - input_value = paddle.fluid.layers.fill_constant( + input_value = paddle.tensor.fill_constant( shape=[3, 4], dtype='float32', value=3.0 ) paddle.histogram(input=input_value, bins=-1, min=1, max=5) @@ -94,7 +94,7 @@ def test_min_max_error(self): """Test max must be larger or equal to min.""" def net_func(): - input_value = paddle.fluid.layers.fill_constant( + input_value = paddle.tensor.fill_constant( shape=[3, 4], dtype='float32', value=3.0 ) paddle.histogram(input=input_value, bins=1, min=5, max=1) @@ -106,7 +106,7 @@ def test_min_max_range_error(self): """Test range of min, max is not finite""" def net_func(): - input_value = paddle.fluid.layers.fill_constant( + input_value = paddle.tensor.fill_constant( shape=[3, 4], dtype='float32', value=3.0 ) paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 34806a8305a724..dee791a0387b21 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -77,7 +77,7 @@ def test_gan_float32(self): d_loss_real = paddle.mean( paddle.nn.functional.binary_cross_entropy_with_logits( logit=d_real, - label=fluid.layers.fill_constant( + label=paddle.tensor.fill_constant( shape=[2, 1], dtype='float32', value=1.0 ), ) @@ -87,7 +87,7 @@ def test_gan_float32(self): d_loss_fake = paddle.mean( paddle.nn.functional.binary_cross_entropy_with_logits( logit=d_fake, - label=fluid.layers.fill_constant( + label=paddle.tensor.fill_constant( shape=[2, 1], dtype='float32', value=0.0 ), ) @@ -108,7 +108,7 @@ def test_gan_float32(self): g_loss = paddle.mean( paddle.nn.functional.binary_cross_entropy_with_logits( logit=d_fake, - label=fluid.layers.fill_constant( + label=paddle.tensor.fill_constant( shape=[2, 1], dtype='float32', value=1.0 ), ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py index 5e0b30ada9048b..945fc395bf3a1b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py @@ -53,20 +53,20 @@ def forward(self, inputs): def init_weights(layer): if type(layer) == nn.Linear: - new_weight = paddle.fluid.layers.fill_constant( + new_weight = paddle.tensor.fill_constant( layer.weight.shape, layer.weight.dtype, value=0.9 ) layer.weight.set_value(new_weight) - new_bias = paddle.fluid.layers.fill_constant( + new_bias = paddle.tensor.fill_constant( layer.bias.shape, layer.bias.dtype, value=-0.1 ) layer.bias.set_value(new_bias) elif type(layer) == nn.Conv2D: - new_weight = paddle.fluid.layers.fill_constant( + new_weight = paddle.tensor.fill_constant( layer.weight.shape, layer.weight.dtype, value=0.7 ) layer.weight.set_value(new_weight) - new_bias = paddle.fluid.layers.fill_constant( + new_bias = paddle.tensor.fill_constant( layer.bias.shape, layer.bias.dtype, value=-0.2 ) layer.bias.set_value(new_bias) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index c9134a6e33ebaf..b2e0331c32ac25 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -212,7 +212,7 @@ def __init__( ) h_0 = to_variable(h_0) else: - h_0 = fluid.layers.fill_constant( + h_0 = paddle.tensor.fill_constant( shape=[Config.batch_size, rnn_hidden_size], dtype='float32', value=0, diff --git a/python/paddle/fluid/tests/unittests/test_increment.py b/python/paddle/fluid/tests/unittests/test_increment.py index 18901aab4ccdaf..f2427ae2e541ec 100755 --- a/python/paddle/fluid/tests/unittests/test_increment.py +++ b/python/paddle/fluid/tests/unittests/test_increment.py @@ -23,7 +23,7 @@ class TestIncrement(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.layers.fill_constant( + input = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=5 ) expected_result = np.array([8], dtype='int64') @@ -45,7 +45,7 @@ def test_increment(self): if fluid.core.is_compiled_with_cuda(): paddle.enable_static() with paddle.fluid.device_guard("gpu:0"): - x = paddle.fluid.layers.fill_constant([1], "float32", 0) + x = paddle.tensor.fill_constant([1], "float32", 0) with paddle.fluid.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_input_spec.py b/python/paddle/fluid/tests/unittests/test_input_spec.py index 2cffae070a38d7..dad821438afb86 100644 --- a/python/paddle/fluid/tests/unittests/test_input_spec.py +++ b/python/paddle/fluid/tests/unittests/test_input_spec.py @@ -19,7 +19,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.jit.dy2static.utils import _compatible_non_tensor_spec @@ -35,7 +34,9 @@ def test_default(self): self.assertIsNone(tensor_spec.name) def test_from_tensor(self): - x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True) + x_bool = paddle.tensor.fill_constant( + shape=[1], dtype='bool', value=True + ) bool_spec = InputSpec.from_tensor(x_bool) self.assertEqual(bool_spec.dtype, x_bool.dtype) self.assertEqual(list(bool_spec.shape), list(x_bool.shape)) diff --git a/python/paddle/fluid/tests/unittests/test_lambv2_op.py b/python/paddle/fluid/tests/unittests/test_lambv2_op.py index f7aa3f6fbd92af..f193eea69f2f9c 100644 --- a/python/paddle/fluid/tests/unittests/test_lambv2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lambv2_op.py @@ -18,7 +18,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.dygraph.base import switch_to_static_graph @@ -39,13 +38,13 @@ def _append_optimize_op(self, block, param_and_grad): self._beta2_pow_acc_str, param_and_grad[0] ) - beta_1 = layers.fill_constant( + beta_1 = paddle.tensor.fill_constant( dtype='float32', shape=[1], value=self._beta1, name='lamb_beta_1' ) - beta_2 = layers.fill_constant( + beta_2 = paddle.tensor.fill_constant( dtype='float32', shape=[1], value=self._beta2, name='lamb_beta_2' ) - epsilon = layers.fill_constant( + epsilon = paddle.tensor.fill_constant( dtype='float32', shape=[1], value=self._epsilon, name='epsilon' ) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 9eb04490bff989..ce4e71242a5c36 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1030,8 +1030,10 @@ def test_conv3d_transpose(self): def test_while_loop(self): with self.static_graph(): - i = layers.fill_constant(shape=[1], dtype='int64', value=0) - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) def cond(i): return paddle.less_than(i, ten) @@ -1043,8 +1045,10 @@ def body(i): static_ret = self.get_static_graph_result(feed={}, fetch_list=out) with self.dynamic_graph(): - i = layers.fill_constant(shape=[1], dtype='int64', value=0) - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) def cond1(i): return paddle.less_than(i, ten) @@ -1054,7 +1058,9 @@ def body1(i): dy_ret = paddle.static.nn.while_loop(cond1, body1, [i]) with self.assertRaises(ValueError): - j = layers.fill_constant(shape=[1], dtype='int64', value=0) + j = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=0 + ) def body2(i): return i + 1, i + 2 @@ -1170,10 +1176,10 @@ def greater_equal_branch(a, b): return paddle.subtract(a, b) with self.static_graph(): - a = fluid.layers.fill_constant( + a = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.1 ) - b = fluid.layers.fill_constant( + b = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.23 ) out = paddle.static.nn.cond( @@ -1215,18 +1221,30 @@ def greater_equal_branch(a, b): def test_case(self): def fn_1(): - return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) + return paddle.tensor.fill_constant( + shape=[1, 2], dtype='float32', value=1 + ) def fn_2(): - return layers.fill_constant(shape=[2, 2], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[2, 2], dtype='int32', value=2 + ) def fn_3(): - return layers.fill_constant(shape=[3], dtype='int32', value=3) + return paddle.tensor.fill_constant( + shape=[3], dtype='int32', value=3 + ) with self.static_graph(): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.3 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + z = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.2 + ) pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 @@ -1248,9 +1266,15 @@ def fn_3(): static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2]) with self.dynamic_graph(): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.3 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + z = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.2 + ) pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 @@ -1270,17 +1294,27 @@ def fn_3(): def test_switch_case(self): def fn_1(): - return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) + return paddle.tensor.fill_constant( + shape=[1, 2], dtype='float32', value=1 + ) def fn_2(): - return layers.fill_constant(shape=[2, 2], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[2, 2], dtype='int32', value=2 + ) def fn_3(): - return layers.fill_constant(shape=[3], dtype='int32', value=3) + return paddle.tensor.fill_constant( + shape=[3], dtype='int32', value=3 + ) with self.static_graph(): - index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) - index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) + index_1 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=1 + ) + index_2 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=2 + ) out_1 = paddle.static.nn.switch_case( branch_index=index_1, @@ -1308,8 +1342,12 @@ def fn_3(): ) with self.dynamic_graph(): - index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) - index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) + index_1 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=1 + ) + index_2 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=2 + ) out_1 = paddle.static.nn.switch_case( branch_index=index_1, @@ -1987,9 +2025,15 @@ def make_range(self): paddle.arange(0, 10, 2, 'int32') paddle.arange(0.1, 10.0, 0.2, 'float32') paddle.arange(0.1, 10.0, 0.2, 'float64') - start = layers.fill_constant(shape=[1], value=0.1, dtype="float32") - end = layers.fill_constant(shape=[1], value=10.0, dtype="float32") - step = layers.fill_constant(shape=[1], value=0.2, dtype="float32") + start = paddle.tensor.fill_constant( + shape=[1], value=0.1, dtype="float32" + ) + end = paddle.tensor.fill_constant( + shape=[1], value=10.0, dtype="float32" + ) + step = paddle.tensor.fill_constant( + shape=[1], value=0.2, dtype="float32" + ) y = paddle.arange(start, end, step, 'float64') return y @@ -2088,7 +2132,7 @@ def test_stridedslice(self): def test_fill_constant_batch_size_like(self): with self.static_graph(): - like = fluid.layers.fill_constant( + like = paddle.tensor.fill_constant( shape=[1, 200], value=10, dtype='int64' ) out = layers.fill_constant_batch_size_like( diff --git a/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py index d9a1237d3b4402..990a5ffc9fea79 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py @@ -26,7 +26,7 @@ class TestLoDArrayLength(unittest.TestCase): def test_array_length(self): tmp = layers.zeros(shape=[10], dtype='int32') - i = layers.fill_constant(shape=[1], dtype='int64', value=10) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=10) arr = paddle.tensor.array_write(tmp, i=i) arr_len = paddle.tensor.array_length(arr) cpu = core.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py index 58bb6192f9e336..90b990f636d786 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py @@ -19,7 +19,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers import paddle.fluid.optimizer as optimizer from paddle.fluid.framework import Program, program_guard @@ -98,7 +97,7 @@ def fn_2(opt, avg_loss=None, pred=None, label=None): sgd = optimizer.SGD(learning_rate=LR) id = fluid.data('id', [1], 'int32') - two = layers.fill_constant([1], 'int32', 2) + two = paddle.tensor.fill_constant([1], 'int32', 2) mod_two = paddle.remainder(id, two) == 0 if loss_in_switch: diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index c176af4ad9ef86..e30f77c725d519 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -45,7 +45,7 @@ def build_program(self, compile_program=True): counter = fluid.layers.zeros( shape=[1], dtype='int64', force_cpu=True ) - until = layers.fill_constant([1], dtype='int64', value=10) + until = paddle.tensor.fill_constant([1], dtype='int64', value=10) data_arr = paddle.tensor.array_write(hidden1, i) cond = paddle.less_than(x=counter, y=until) while_op = paddle.static.nn.control_flow.While(cond=cond) diff --git a/python/paddle/fluid/tests/unittests/test_program_code.py b/python/paddle/fluid/tests/unittests/test_program_code.py index 3ecd2619c15fcf..673fd5655b8529 100644 --- a/python/paddle/fluid/tests/unittests/test_program_code.py +++ b/python/paddle/fluid/tests/unittests/test_program_code.py @@ -16,7 +16,6 @@ import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers class TestProgramToReadableCode(unittest.TestCase): @@ -37,14 +36,22 @@ def setUp(self): def append_cond_op(self, program): def true_func(): - return layers.fill_constant(shape=[2, 3], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[2, 3], dtype='int32', value=2 + ) def false_func(): - return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1) + return paddle.tensor.fill_constant( + shape=[3, 2], dtype='int32', value=-1 + ) with fluid.program_guard(program): - x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) - y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) + x = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + y = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) pred = paddle.less_than(y, x) out = paddle.static.nn.cond(pred, true_func, false_func) diff --git a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py index 885c8fa829aa98..7693f621397275 100755 --- a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py +++ b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py @@ -72,7 +72,7 @@ def loss2(pred, label): avg_loss = paddle.mean(loss, name='mean_softmax_loss') return avg_loss - two = fluid.layers.fill_constant([1], 'int32', 2) + two = paddle.tensor.fill_constant([1], 'int32', 2) pred = two == 0 avg_loss = paddle.static.nn.case( [(pred, lambda: loss1(prediction, label))], @@ -106,7 +106,7 @@ def loss2(opt, pred, label, with_optimize): return avg_loss sgd = fluid.optimizer.SGD(learning_rate=0.1) - two = fluid.layers.fill_constant([1], 'int32', 2) + two = paddle.tensor.fill_constant([1], 'int32', 2) pred = two == 0 avg_loss = paddle.static.nn.case( [(pred, lambda: loss1(sgd, prediction, label, with_optimize))], diff --git a/python/paddle/fluid/tests/unittests/test_rand_op.py b/python/paddle/fluid/tests/unittests/test_rand_op.py index f4daf2b7ec4de4..9f59c3d39085d6 100644 --- a/python/paddle/fluid/tests/unittests/test_rand_op.py +++ b/python/paddle/fluid/tests/unittests/test_rand_op.py @@ -42,8 +42,8 @@ def test_Variable(): self.assertRaises(TypeError, test_Variable) def test_dtype(): - dim_1 = fluid.layers.fill_constant([1], "int64", 3) - dim_2 = fluid.layers.fill_constant([1], "int32", 5) + dim_1 = paddle.tensor.fill_constant([1], "int64", 3) + dim_2 = paddle.tensor.fill_constant([1], "int32", 5) rand(shape=[dim_1, dim_2], dtype='int32') self.assertRaises(TypeError, test_dtype) @@ -64,8 +64,8 @@ def run_net(self, use_cuda=False): result_0 = rand([3, 4]) result_1 = rand([3, 4], 'float64') - dim_1 = fluid.layers.fill_constant([1], "int64", 3) - dim_2 = fluid.layers.fill_constant([1], "int32", 5) + dim_1 = paddle.tensor.fill_constant([1], "int64", 3) + dim_2 = paddle.tensor.fill_constant([1], "int32", 5) result_2 = rand(shape=[dim_1, dim_2]) var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") @@ -104,8 +104,8 @@ def run_net(self, use_cuda=False): rand([3, 4], 'float64') - dim_1 = fluid.layers.fill_constant([1], "int64", 3) - dim_2 = fluid.layers.fill_constant([1], "int32", 5) + dim_1 = paddle.tensor.fill_constant([1], "int64", 3) + dim_2 = paddle.tensor.fill_constant([1], "int32", 5) rand(shape=[dim_1, dim_2]) var_shape = fluid.dygraph.to_variable(np.array([3, 4])) diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index 9cf21bcee7c697..73ee7b07b46075 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -126,8 +126,8 @@ def test_api(self): low=-100, high=100, shape=(32, 32, 3), dtype='int64' ) # shape is a tensorlist and dtype is 'float32' - dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32) - dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) + dim_1 = paddle.tensor.fill_constant([1], "int64", 32) + dim_2 = paddle.tensor.fill_constant([1], "int32", 50) out4 = paddle.randint( low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py index 8c9d89bd4037a2..2fc9126ad059df 100644 --- a/python/paddle/fluid/tests/unittests/test_randn_op.py +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -30,8 +30,8 @@ def test_api(self): x1 = paddle.randn(shape, 'float32') x2 = paddle.randn(shape, 'float64') - dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) - dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) + dim_1 = paddle.tensor.fill_constant([1], "int64", 20) + dim_2 = paddle.tensor.fill_constant([1], "int32", 50) x3 = paddle.randn([dim_1, dim_2, 784]) var_shape = paddle.static.data('X', [2], 'int32') @@ -66,8 +66,8 @@ def test_api(self): x1 = paddle.randn(shape, 'float32') x2 = paddle.randn(shape, 'float64') - dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) - dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) + dim_1 = paddle.tensor.fill_constant([1], "int64", 20) + dim_2 = paddle.tensor.fill_constant([1], "int32", 50) x3 = paddle.randn(shape=[dim_1, dim_2, 784]) var_shape = paddle.to_tensor(np.array(shape)) diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index 40c007a3af1da2..b83d7250def5cb 100755 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -327,7 +327,7 @@ def test_check_grad(self): # Test python API class TestReshapeAPI(unittest.TestCase): def _set_paddle_api(self): - self.fill_constant = paddle.fluid.layers.fill_constant + self.fill_constant = paddle.tensor.fill_constant self.data = paddle.static.data self.to_tensor = paddle.to_tensor self._executed_api() diff --git a/python/paddle/fluid/tests/unittests/test_retain_graph.py b/python/paddle/fluid/tests/unittests/test_retain_graph.py index eb6ab6c1e2487d..4cc4d959adb210 100644 --- a/python/paddle/fluid/tests/unittests/test_retain_graph.py +++ b/python/paddle/fluid/tests/unittests/test_retain_graph.py @@ -80,7 +80,7 @@ def cal_gradient_penalty( fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1) disc_interpolates = netD(fake_AB) - outs = paddle.fluid.layers.fill_constant( + outs = paddle.tensor.fill_constant( disc_interpolates.shape, disc_interpolates.dtype, 1.0 ) gradients = paddle.grad( @@ -125,7 +125,7 @@ def run_retain(self, need_retain): fake_AB = paddle.concat((realA, fakeB), 1) G_pred_fake = d(fake_AB.detach()) - false_target = paddle.fluid.layers.fill_constant( + false_target = paddle.tensor.fill_constant( G_pred_fake.shape, 'float32', 0.0 ) @@ -140,7 +140,7 @@ def run_retain(self, need_retain): optim_g.clear_gradients() fake_AB = paddle.concat((realA, fakeB), 1) G_pred_fake = d(fake_AB) - true_target = paddle.fluid.layers.fill_constant( + true_target = paddle.tensor.fill_constant( G_pred_fake.shape, 'float32', 1.0 ) loss_g = l1_criterion(fakeB, realB) + gan_criterion( diff --git a/python/paddle/fluid/tests/unittests/test_set_value_op.py b/python/paddle/fluid/tests/unittests/test_set_value_op.py index 4adc9aa8bf7edc..914bdba872e260 100644 --- a/python/paddle/fluid/tests/unittests/test_set_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_set_value_op.py @@ -1306,19 +1306,13 @@ def test_static_graph(self): numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape) def op1(x): - value = paddle.fluid.layers.fill_constant([1], "float32", 1) + value = paddle.tensor.fill_constant([1], "float32", 1) # test stop_gradient value.stop_gradient = True x.stop_gradient = False - start = paddle.fluid.layers.fill_constant( - [1], "int32", 5, force_cpu=True - ) - end = paddle.fluid.layers.fill_constant( - [1], "int32", 0, force_cpu=True - ) - step = paddle.fluid.layers.fill_constant( - [1], "int32", -2, force_cpu=True - ) + start = paddle.tensor.fill_constant([1], "int32", 5, force_cpu=True) + end = paddle.tensor.fill_constant([1], "int32", 0, force_cpu=True) + step = paddle.tensor.fill_constant([1], "int32", -2, force_cpu=True) inputs = { 'Input': x, @@ -1347,7 +1341,7 @@ def op1(x): return y, value def op2(x): - value = paddle.fluid.layers.fill_constant([1, 3, 2], "float32", 1) + value = paddle.tensor.fill_constant([1, 3, 2], "float32", 1) # test stop_gradient value.stop_gradient = False x.stop_gradient = False @@ -1372,18 +1366,12 @@ def op2(x): return y, value def op3(x): - value = paddle.fluid.layers.fill_constant([1], "float32", 1) + value = paddle.tensor.fill_constant([1], "float32", 1) x.stop_gradient = True value.stop_gradient = False - start = paddle.fluid.layers.fill_constant( - [1], "int32", 0, force_cpu=True - ) - end = paddle.fluid.layers.fill_constant( - [1], "int32", 5, force_cpu=True - ) - step = paddle.fluid.layers.fill_constant( - [1], "int32", 3, force_cpu=True - ) + start = paddle.tensor.fill_constant([1], "int32", 0, force_cpu=True) + end = paddle.tensor.fill_constant([1], "int32", 5, force_cpu=True) + step = paddle.tensor.fill_constant([1], "int32", 3, force_cpu=True) inputs = { 'Input': x, diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index f9d6b729ce43ca..7557f7375e140b 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -196,8 +196,8 @@ def test_sparse_parameter_sgd(self): class TestSGDOpWithLargeInput(unittest.TestCase): def runTest(self): paddle.enable_static() - data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') - label = fluid.layers.fill_constant( + data = paddle.tensor.fill_constant(shape=[1], value=128, dtype='int64') + label = paddle.tensor.fill_constant( shape=[1, 150], value=0.5, dtype='float32' ) emb = paddle.static.nn.embedding( diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 472fc2d82398ea..7b095894185742 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -582,8 +582,8 @@ def test_check_grad_normal(self): class TestSliceAPI(unittest.TestCase): def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float64") - minus_1 = fluid.layers.fill_constant([1], "int32", -1) - minus_3 = fluid.layers.fill_constant([1], "int64", -3) + minus_1 = paddle.tensor.fill_constant([1], "int32", -1) + minus_3 = paddle.tensor.fill_constant([1], "int64", -3) starts = paddle.static.data( name='starts', shape=[1, 3], dtype="float32" ) @@ -597,7 +597,7 @@ def test_1(self): ) # value_int64 is greater than 2147483647 which is the max of int32 - value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) + value_int64 = paddle.tensor.fill_constant([1], "int64", 2147483648) out_1 = paddle.slice( x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1] @@ -739,7 +739,7 @@ def set_program_and_run(self, main_program, case_num): slice_arr, axis=self.axis, use_stack=True ) elif case_num == 3: - value_int64 = fluid.layers.fill_constant( + value_int64 = paddle.tensor.fill_constant( [1], "int64", 2147483648 ) self.sliced_arr = slice_arr = arr[self.start : value_int64] diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 5208fa32883906..e1987e9b2ea90d 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -281,9 +281,9 @@ def test_check_grad(self): class TestSplitAPI(unittest.TestCase): def test_api(self): input_1 = np.random.random([4, 5, 6]).astype("int32") - positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1) - positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) + positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) + positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) x_1 = fluid.data(shape=[4, 5, 6], dtype='int32', name='x_1') x_2 = fluid.data(shape=[4, 5, None], dtype='int32', name='x_2') diff --git a/python/paddle/fluid/tests/unittests/test_stack_op.py b/python/paddle/fluid/tests/unittests/test_stack_op.py index a74439ebd3b4fe..b065b603a66ca1 100644 --- a/python/paddle/fluid/tests/unittests/test_stack_op.py +++ b/python/paddle/fluid/tests/unittests/test_stack_op.py @@ -176,7 +176,9 @@ def set_program(self): with fluid.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') - zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") + zero = paddle.tensor.fill_constant( + shape=[1], value=0, dtype="int64" + ) for i in range(self.iter_num): paddle.tensor.array_write(input, zero + i, tensor_array) @@ -214,7 +216,9 @@ def set_program(self): with fluid.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') - zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") + zero = paddle.tensor.fill_constant( + shape=[1], value=0, dtype="int64" + ) for i in range(self.iter_num): paddle.tensor.array_write(input, zero + i, tensor_array) diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index 05a2631d775d0f..dc6b396dd2a41e 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -548,8 +548,8 @@ def test_check_grad_normal(self): class TestStridedSliceAPI(unittest.TestCase): def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float64") - minus_1 = fluid.layers.fill_constant([1], "int32", -1) - minus_3 = fluid.layers.fill_constant([1], "int32", -3) + minus_1 = paddle.tensor.fill_constant([1], "int32", -1) + minus_3 = paddle.tensor.fill_constant([1], "int32", -3) starts = paddle.static.data(name='starts', shape=[3], dtype='int32') ends = paddle.static.data(name='ends', shape=[3], dtype='int32') strides = paddle.static.data(name='strides', shape=[3], dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 2f9a811fcbcda0..a0c0d7757e5eaa 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -362,10 +362,10 @@ def test_check_grad(self): class API_Test_Add_n(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input0 = fluid.layers.fill_constant( + input0 = paddle.tensor.fill_constant( shape=[2, 3], dtype='int64', value=5 ) - input1 = fluid.layers.fill_constant( + input1 = paddle.tensor.fill_constant( shape=[2, 3], dtype='int64', value=3 ) expected_result = np.empty((2, 3)) diff --git a/python/paddle/fluid/tests/unittests/test_switch.py b/python/paddle/fluid/tests/unittests/test_switch.py index 7e91c77c56806d..5ae054d51ab814 100644 --- a/python/paddle/fluid/tests/unittests/test_switch.py +++ b/python/paddle/fluid/tests/unittests/test_switch.py @@ -24,11 +24,19 @@ class TestSwitch(unittest.TestCase): def check_switch(self, value): - x = layers.fill_constant(shape=[1], dtype='float32', value=value) - zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0) - one_var = layers.fill_constant(shape=[1], dtype='float32', value=1.0) - two_var = layers.fill_constant(shape=[1], dtype='float32', value=2.0) - three_var = layers.fill_constant(shape=[1], dtype='float32', value=3.0) + x = paddle.tensor.fill_constant(shape=[1], dtype='float32', value=value) + zero_var = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.0 + ) + one_var = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=1.0 + ) + two_var = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=2.0 + ) + three_var = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=3.0 + ) result = paddle.static.create_global_var( shape=[1], value=-1.0, dtype='float32', persistable=True @@ -66,8 +74,10 @@ def test_error(self): main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): - cond = layers.fill_constant(shape=[1], dtype='float32', value=0.0) - zero_var = layers.fill_constant( + cond = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=0.0 + ) + zero_var = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.0 ) diff --git a/python/paddle/fluid/tests/unittests/test_switch_case.py b/python/paddle/fluid/tests/unittests/test_switch_case.py index 3fad3bdfd0c0db..5a1e8fb451b63d 100644 --- a/python/paddle/fluid/tests/unittests/test_switch_case.py +++ b/python/paddle/fluid/tests/unittests/test_switch_case.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid.backward import append_backward from paddle.fluid.framework import Program, program_guard @@ -30,20 +29,32 @@ class TestAPISwitchCase(unittest.TestCase): def test_return_single_var(self): def fn_1(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=1 + ) def fn_2(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=2 + ) def fn_3(): - return layers.fill_constant(shape=[4, 3], dtype='int32', value=3) + return paddle.tensor.fill_constant( + shape=[4, 3], dtype='int32', value=3 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) - index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) - index_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) + index_1 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=1 + ) + index_2 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=2 + ) + index_5 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=5 + ) # call fn_1 out_0 = paddle.static.nn.switch_case( @@ -322,24 +333,32 @@ def fn_3(): def test_return_var_tuple(self): def fn_1(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[1, 2], dtype='int32', value=1 - ), layers.fill_constant(shape=[2, 3], dtype='float32', value=2) + ), paddle.tensor.fill_constant( + shape=[2, 3], dtype='float32', value=2 + ) def fn_2(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[3, 4], dtype='int32', value=3 - ), layers.fill_constant(shape=[4, 5], dtype='float32', value=4) + ), paddle.tensor.fill_constant( + shape=[4, 5], dtype='float32', value=4 + ) def fn_3(): - return layers.fill_constant( + return paddle.tensor.fill_constant( shape=[5], dtype='int32', value=5 - ), layers.fill_constant(shape=[5, 6], dtype='float32', value=6) + ), paddle.tensor.fill_constant( + shape=[5, 6], dtype='float32', value=6 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) + index_1 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=1 + ) out = paddle.static.nn.switch_case( index_1, ((1, fn_1), (2, fn_2)), fn_3 @@ -365,15 +384,21 @@ class TestAPISwitchCase_Nested(unittest.TestCase): def test_nested_switch_case(self): def fn_1(x=1): out = paddle.static.nn.switch_case( - branch_index=layers.fill_constant( + branch_index=paddle.tensor.fill_constant( shape=[1], dtype='int32', value=x ), branch_fns={ 1: partial( - layers.fill_constant, shape=[1], dtype='int32', value=1 + paddle.tensor.fill_constant, + shape=[1], + dtype='int32', + value=1, ), x: partial( - layers.fill_constant, shape=[2], dtype='int32', value=x + paddle.tensor.fill_constant, + shape=[2], + dtype='int32', + value=x, ), }, ) @@ -381,12 +406,12 @@ def fn_1(x=1): def fn_2(x=2): out = paddle.static.nn.switch_case( - branch_index=layers.fill_constant( + branch_index=paddle.tensor.fill_constant( shape=[1], dtype='int32', value=2 ), branch_fns={ 1: partial( - layers.fill_constant, + paddle.tensor.fill_constant, shape=[4, 3], dtype='int32', value=1, @@ -398,12 +423,12 @@ def fn_2(x=2): def fn_3(): out = paddle.static.nn.switch_case( - branch_index=layers.fill_constant( + branch_index=paddle.tensor.fill_constant( shape=[1], dtype='int32', value=3 ), branch_fns={ 1: partial( - layers.fill_constant, + paddle.tensor.fill_constant, shape=[4, 3], dtype='int32', value=1, @@ -417,8 +442,12 @@ def fn_3(): startup_program = Program() with program_guard(main_program, startup_program): index_1 = fluid.data(name="index_1", shape=[1], dtype='uint8') - index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) - index_3 = layers.fill_constant(shape=[1], dtype='int64', value=3) + index_2 = paddle.tensor.fill_constant( + shape=[1], dtype='int32', value=2 + ) + index_3 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) out_1 = paddle.static.nn.switch_case( branch_index=index_1, branch_fns={1: fn_1, 2: fn_2, 3: fn_3} @@ -566,21 +595,27 @@ def fn_3(): class TestAPISwitchCase_Error(unittest.TestCase): def test_error(self): def fn_1(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=1 + ) def fn_2(): - return layers.fill_constant(shape=[4, 2], dtype='int32', value=2) + return paddle.tensor.fill_constant( + shape=[4, 2], dtype='int32', value=2 + ) def fn_3(): - return layers.fill_constant(shape=[4, 3], dtype='int32', value=3) + return paddle.tensor.fill_constant( + shape=[4, 3], dtype='int32', value=3 + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - key_float32 = layers.fill_constant( + key_float32 = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.23 ) - key_int32 = layers.fill_constant( + key_int32 = paddle.tensor.fill_constant( shape=[1], dtype='int32', value=0.23 ) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py index 998a4744e9e671..bc4f77eaa1a5c2 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py @@ -195,7 +195,7 @@ def set_program(self): self.program = fluid.Program() with fluid.program_guard(self.program): self.array = array = paddle.tensor.create_array(dtype='float32') - idx = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0) + idx = paddle.tensor.fill_constant(shape=[1], dtype="int64", value=0) for i, x in enumerate(self.inputs): x = paddle.assign(x) paddle.tensor.array_write(x, idx + i, array) @@ -238,7 +238,7 @@ def _test_case(self, inp1, inp2): x0.stop_gradient = False x1 = paddle.assign(inp2) x1.stop_gradient = False - i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype="int64", value=0) array = paddle.tensor.create_array(dtype='float32') paddle.tensor.array_write(x0, i, array) paddle.tensor.array_write(x1, i + 1, array) @@ -273,9 +273,13 @@ def test_case(self): def test_while_loop_case(self): with fluid.dygraph.guard(): - zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1) - ten = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + zero = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=0 + ) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) array = paddle.tensor.create_array(dtype='float32') inp0 = np.random.rand(2, 3, 4).astype("float32") x0 = paddle.assign(inp0) @@ -294,7 +298,9 @@ def body(i, end, array): ) self.assertTrue(paddle.tensor.array_length(array), 10) - last = fluid.layers.fill_constant(shape=[1], dtype='int64', value=9) + last = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=9 + ) np.testing.assert_array_equal( paddle.tensor.array_read(array, last).numpy(), inp0 ) diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index 59d962d9dd887c..846b0d21a0d2ce 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -302,7 +302,9 @@ def test_api(self): repeat_times = [2, 2] x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="int32") out = paddle.tile(x1, repeat_times) - positive_2 = fluid.layers.fill_constant([1], dtype="int32", value=2) + positive_2 = paddle.tensor.fill_constant( + [1], dtype="int32", value=2 + ) out2 = paddle.tile(x1, repeat_times=[positive_2, 2]) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 3965425c7820d6..5ecb6d4b2c6c9b 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -329,7 +329,7 @@ def test_attr_tensor_API(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - dim_tensor = fluid.layers.fill_constant([1], "int64", 3) + dim_tensor = paddle.tensor.fill_constant([1], "int64", 3) ret = paddle.uniform([1, dim_tensor, 2]) place = fluid.CPUPlace() @@ -344,8 +344,8 @@ def test_attr_tensorlist_int32_API(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - dim_1 = fluid.layers.fill_constant([1], "int64", 3) - dim_2 = fluid.layers.fill_constant([1], "int32", 2) + dim_1 = paddle.tensor.fill_constant([1], "int64", 3) + dim_2 = paddle.tensor.fill_constant([1], "int32", 2) ret = paddle.uniform([1, dim_1, dim_2]) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py index 1af97f52e3590b..c5f7083e3f03b8 100755 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py @@ -18,7 +18,6 @@ from op_test import OpTest import paddle -import paddle.fluid as fluid paddle.enable_static() @@ -250,8 +249,8 @@ def executed_api(self): def test_api(self): input = np.random.random([3, 2, 5]).astype("float64") x = paddle.static.data(name='x', shape=[3, 2, 5], dtype="float64") - positive_3_int32 = fluid.layers.fill_constant([1], "int32", 3) - positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1) + positive_3_int32 = paddle.tensor.fill_constant([1], "int32", 3) + positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) axes_tensor_int32 = paddle.static.data( name='axes_tensor_int32', shape=[3], dtype="int32" ) diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index a87861497abf0c..714e541f581acc 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -38,9 +38,11 @@ def body(i): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = layers.fill_constant(shape=[1], dtype='int64', value=0) - one = layers.fill_constant(shape=[1], dtype='int64', value=1) - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) + one = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) out = paddle.static.nn.while_loop(cond, body, (i,)) place = ( @@ -67,9 +69,13 @@ def body(i, mem): startup_program = Program() with program_guard(main_program, startup_program): i = layers.zeros(shape=[1], dtype='int64') - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) mem = fluid.data(name='mem', shape=[10], dtype='float32') - one = layers.fill_constant(shape=[10], dtype='float32', value=1) + one = paddle.tensor.fill_constant( + shape=[10], dtype='float32', value=1 + ) out = paddle.static.nn.while_loop(cond, body, [i, mem]) data = np.random.rand(10).astype('float32') @@ -108,16 +114,22 @@ def body(i, ten, test_dict, test_list, test_list_dict): startup_program = Program() with program_guard(main_program, startup_program): i = layers.zeros(shape=[1], dtype='int64') - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) - test_data = layers.fill_constant(shape=[1], dtype='int64', value=0) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) + test_data = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=0 + ) test_dict = {"test_key": test_data} test_list = [ - layers.fill_constant(shape=[1, 2], dtype='int64', value=0) + paddle.tensor.fill_constant( + shape=[1, 2], dtype='int64', value=0 + ) ] test_list_dict = [ { - "test_key": layers.fill_constant( + "test_key": paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0 ) } @@ -195,9 +207,15 @@ def internal_body(j, init, sums): j = layers.zeros(shape=[1], dtype='int64') init = fluid.data(name='init', shape=[3, 3], dtype='float32') sums = fluid.data(name='sums', shape=[3, 3], dtype='float32') - loop_len1 = layers.fill_constant(shape=[1], dtype='int64', value=2) - loop_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) - ones = layers.fill_constant(shape=[3, 3], dtype='float32', value=1) + loop_len1 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=2 + ) + loop_len2 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) + ones = paddle.tensor.fill_constant( + shape=[3, 3], dtype='float32', value=1 + ) out = paddle.static.nn.while_loop( external_cond, external_body, [i, j, init, sums] @@ -238,8 +256,12 @@ def body(i, x): with fluid.program_guard(main_program, startup_program): i = fluid.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False - eleven = layers.fill_constant(shape=[1], dtype='float32', value=11) - one = layers.fill_constant(shape=[1], dtype='float32', value=1) + eleven = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=11 + ) + one = paddle.tensor.fill_constant( + shape=[1], dtype='float32', value=1 + ) x = fluid.data(name='x', shape=[1], dtype='float32') x.stop_gradient = False @@ -360,10 +382,14 @@ def internal_body(j, x, mem_array): paddle.tensor.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True - array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) - j = layers.fill_constant(shape=[1], dtype='int64', value=1) + array_len = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=1 + ) + j = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True - array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) + array_len2 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) out = paddle.static.nn.while_loop( external_cond, external_body, [i, j, x, mem_array] @@ -422,10 +448,14 @@ def fn_add_one(): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - i = layers.fill_constant(shape=[1], dtype='int64', value=1) - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) - three = layers.fill_constant(shape=[1], dtype='int64', value=3) - one = layers.fill_constant(shape=[1], dtype='int64', value=1) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) + three = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) + one = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) out = paddle.static.nn.while_loop(cond, body, [i]) place = ( @@ -471,7 +501,7 @@ def cond_returns_with_mutable_dict(i, test_dict): return i > 0 def body_returns_with_mutable_dict(i, test_dict): - test_dict['new_key'] = layers.fill_constant( + test_dict['new_key'] = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=1 ) return paddle.increment(i), test_dict @@ -481,18 +511,28 @@ def cond_returns_with_mutable_list(i, test_list): def body_returns_with_mutable_list(i, test_list): test_list.append( - layers.fill_constant(shape=[1], dtype='int64', value=1) + paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) ) return paddle.increment(i), test_list main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - data = layers.fill_constant(shape=[1], dtype='int64', value=1) - data_1d = layers.fill_constant(shape=[1], dtype='int64', value=1) - data_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=1) - ten = layers.fill_constant(shape=[1], dtype='int64', value=10) - ten_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=10) + data = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=1 + ) + data_1d = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=1 + ) + data_2d = paddle.tensor.fill_constant( + shape=[2, 2], dtype='int64', value=1 + ) + ten = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=10 + ) + ten_2d = paddle.tensor.fill_constant( + shape=[2, 2], dtype='int64', value=10 + ) # The type of `cond` in Op(while_loop) must be callable def type_error_cond(): @@ -567,7 +607,7 @@ def value_error_body_returns_error_type(): # The length of `output_vars` with mutable value should keep same with `loop_vars` def value_error_body_returns_with_mutable_dict(): test_dict = { - "int_constant": layers.fill_constant( + "int_constant": paddle.tensor.fill_constant( shape=[2, 2], dtype='int64', value=1 ) } @@ -583,7 +623,9 @@ def value_error_body_returns_with_mutable_dict(): def value_error_body_returns_with_mutable_list(): test_list = [ - layers.fill_constant(shape=[2, 2], dtype='int64', value=1) + paddle.tensor.fill_constant( + shape=[2, 2], dtype='int64', value=1 + ) ] out = paddle.static.nn.while_loop( cond_returns_with_mutable_list, @@ -610,9 +652,9 @@ def body(z, i): startup_program = Program() with program_guard(main_program, startup_program): x = paddle.static.data(name='x', shape=[-1, 5], dtype='int32') - z = fluid.layers.fill_constant([1], 'int32', 0) + z = paddle.tensor.fill_constant([1], 'int32', 0) x_shape = paddle.shape(x) - i = fluid.layers.fill_constant([1], 'int32', 0) + i = paddle.tensor.fill_constant([1], 'int32', 0) z, _ = paddle.static.nn.while_loop(cond, body, [z, i]) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index ede0df752a3347..104e9358ee4919 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -42,12 +42,16 @@ def simple_net(self): paddle.tensor.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True - array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) + array_len = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=1 + ) array_len.stop_gradient = True cond = paddle.less_than(x=i, y=array_len) - j = layers.fill_constant(shape=[1], dtype='int64', value=1) + j = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True - array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) + array_len2 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) array_len2.stop_gradient = True cond2 = paddle.less_than(x=j, y=array_len2) while_op = paddle.static.nn.control_flow.While(cond=cond) @@ -113,7 +117,9 @@ def test_simple_net_forward(self): def test_exceptions(self): i = layers.zeros(shape=[2], dtype='int64') - array_len = layers.fill_constant(shape=[2], dtype='int64', value=1) + array_len = paddle.tensor.fill_constant( + shape=[2], dtype='int64', value=1 + ) cond = paddle.less_than(x=i, y=array_len) with self.assertRaises(TypeError): paddle.static.nn.control_flow.While(cond=cond) @@ -151,8 +157,8 @@ def body_func(i, ten, batch_info, origin_seq): y.desc.set_need_check_feed(False) temp = paddle.concat([x, y], axis=-1) - i = layers.fill_constant(shape=[1], value=0, dtype='int32') - num = layers.fill_constant(shape=[1], value=5, dtype='int32') + i = paddle.tensor.fill_constant(shape=[1], value=0, dtype='int32') + num = paddle.tensor.fill_constant(shape=[1], value=5, dtype='int32') i, ten, shuffle_temp, y = paddle.static.nn.while_loop( cond, body_func, [i, num, temp, y] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py index b1f1cef64a0c12..650658d77e549b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py index d5b18f37181357..29ae86b2a03802 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py @@ -206,7 +206,7 @@ def test_static(self): dtype="float32", ) - positive_2 = fluid.layers.fill_constant([1], "int32", 12) + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) expand_shape = paddle.static.data( name="expand_shape", shape=[2], diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py index 2a5f81f58c1e7f..7280a4e80f0d14 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py @@ -182,9 +182,9 @@ def init_data(self): # Test python API class TestGaussianRandomAPI(unittest.TestCase): def test_api(self): - positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000) + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) - positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) shape_tensor_int32 = fluid.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py index 0a3eb065d0283a..d8094e7ad71922 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py @@ -1255,17 +1255,17 @@ def test_static_graph(self): numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape) def op1(x): - value = paddle.fluid.layers.fill_constant([1], "float32", 1) + value = paddle.tensor.fill_constant([1], "float32", 1) # test stop_gradient value.stop_gradient = True x.stop_gradient = False - start = paddle.fluid.layers.fill_constant( + start = paddle.tensor.layers.fill_constant( [1], "int32", 5, force_cpu=True ) - end = paddle.fluid.layers.fill_constant( + end = paddle.tensor.layers.fill_constant( [1], "int32", 0, force_cpu=True ) - step = paddle.fluid.layers.fill_constant( + step = paddle.tensor.layers.fill_constant( [1], "int32", -2, force_cpu=True ) @@ -1296,9 +1296,7 @@ def op1(x): return y, value def op2(x): - value = paddle.fluid.layers.fill_constant( - [1, 3, 2], "float32", 1 - ) + value = paddle.tensor.fill_constant([1, 3, 2], "float32", 1) # test stop_gradient value.stop_gradient = False x.stop_gradient = False @@ -1326,16 +1324,16 @@ def op2(x): return y, value def op3(x): - value = paddle.fluid.layers.fill_constant([1], "float32", 1) + value = paddle.tensor.fill_constant([1], "float32", 1) x.stop_gradient = True value.stop_gradient = False - start = paddle.fluid.layers.fill_constant( + start = paddle.tensor.fill_constant( [1], "int32", 0, force_cpu=True ) - end = paddle.fluid.layers.fill_constant( + end = paddle.tensor.fill_constant( [1], "int32", 5, force_cpu=True ) - step = paddle.fluid.layers.fill_constant( + step = paddle.tensor.fill_constant( [1], "int32", 3, force_cpu=True ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py index b859c06b7e1635..12190514ba021f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py @@ -68,8 +68,8 @@ def conf(self): class TestSGDOpWithLargeInput(unittest.TestCase): def runTest(self): - data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') - label = fluid.layers.fill_constant( + data = paddle.tensor.fill_constant(shape=[1], value=128, dtype='int64') + label = paddle.tensor.fill_constant( shape=[1, 150], value=0.5, dtype='float32' ) emb = paddle.static.nn.embedding( diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py index 971ad5b5415c01..285d7cbfb007d1 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py @@ -104,10 +104,10 @@ def test_w_is_selected_rows(self): class API_Test_Add_n(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input0 = fluid.layers.fill_constant( + input0 = paddle.tensor.fill_constant( shape=[2, 3], dtype='int64', value=5 ) - input1 = fluid.layers.fill_constant( + input1 = paddle.tensor.fill_constant( shape=[2, 3], dtype='int64', value=3 ) expected_result = np.empty((2, 3)) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py index 2441eea76c074a..a148d10c80538a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py @@ -41,12 +41,16 @@ def simple_net(self): paddle.tensor.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True - array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) + array_len = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=1 + ) array_len.stop_gradient = True cond = paddle.less_than(x=i, y=array_len) - j = layers.fill_constant(shape=[1], dtype='int64', value=1) + j = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True - array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) + array_len2 = paddle.tensor.fill_constant( + shape=[1], dtype='int64', value=3 + ) array_len2.stop_gradient = True cond2 = paddle.less_than(x=j, y=array_len2) while_op = paddle.static.nn.control_flow.While(cond=cond) @@ -112,7 +116,9 @@ def test_simple_net_forward(self): def test_exceptions(self): i = layers.zeros(shape=[2], dtype='int64') - array_len = layers.fill_constant(shape=[2], dtype='int64', value=1) + array_len = paddle.tensor.fill_constant( + shape=[2], dtype='int64', value=1 + ) cond = paddle.less_than(x=i, y=array_len) with self.assertRaises(TypeError): paddle.static.nn.control_flow.While(cond=cond) diff --git a/python/paddle/incubate/autograd/composite_rules.py b/python/paddle/incubate/autograd/composite_rules.py index 02a88b155bba81..c479416357dbeb 100644 --- a/python/paddle/incubate/autograd/composite_rules.py +++ b/python/paddle/incubate/autograd/composite_rules.py @@ -97,6 +97,7 @@ def composite_batchnorm( batch_mean = zeros(run_mean.shape, run_mean.dtype) batch_var = zeros(run_var.shape, run_var.dtype) if not use_run_stat: + batch_mean = mean(x, reduce_axes, keepdim=True) temp = mean(x * x, reduce_axes, keepdim=True) batch_var = temp - batch_mean * batch_mean diff --git a/python/paddle/incubate/autograd/primitives.py b/python/paddle/incubate/autograd/primitives.py index 152d920681be57..547d8a61768a64 100644 --- a/python/paddle/incubate/autograd/primitives.py +++ b/python/paddle/incubate/autograd/primitives.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.layers.tensor import fill_constant # noqa: F401 from paddle.tensor import abs # noqa: F401 from paddle.tensor import acos # noqa: F401 from paddle.tensor import acosh # noqa: F401 @@ -33,6 +32,7 @@ from paddle.tensor import erfinv # noqa: F401 from paddle.tensor import exp # noqa: F401 from paddle.tensor import expm1 # noqa: F401 +from paddle.tensor import fill_constant # noqa: F401 from paddle.tensor import full # noqa: F401 from paddle.tensor import gather # noqa: F401 from paddle.tensor import greater_equal # noqa: F401 diff --git a/python/paddle/jit/api.py b/python/paddle/jit/api.py index c0fd1892cf1ddc..e90677538aecd4 100644 --- a/python/paddle/jit/api.py +++ b/python/paddle/jit/api.py @@ -126,6 +126,7 @@ def _dygraph_to_static_func_(dygraph_func): Examples: .. code-block:: python + import paddle import paddle.fluid as fluid import numpy as np from paddle.jit.api import dygraph_to_static_func @@ -139,7 +140,7 @@ def func(x): return x_v - x = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='float64') + x = paddle.full(shape=[3, 3], fill_value=0, dtype='float64') x_v = func(x) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 1f3b6d5657cf79..2e9d9a13a735a0 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -18,7 +18,7 @@ from paddle.fluid.data_feeder import convert_dtype from paddle.fluid.dygraph.base import _convert_into_variable from paddle.fluid.framework import Variable, core -from paddle.fluid.layers import Print, control_flow, fill_constant +from paddle.fluid.layers import Print, control_flow from paddle.fluid.layers.control_flow import while_loop from .utils import ( @@ -798,6 +798,8 @@ def body(i, new_array): if idx < 0: idx = idx + arr_len else: + from paddle.tensor import fill_constant + idx = fill_constant(shape=[1], dtype="int64", value=idx) pop_item = paddle.tensor.array_read(array, idx) diff --git a/python/paddle/jit/dy2static/origin_info.py b/python/paddle/jit/dy2static/origin_info.py index 8d6b01a04e481c..ba917d666373d2 100644 --- a/python/paddle/jit/dy2static/origin_info.py +++ b/python/paddle/jit/dy2static/origin_info.py @@ -286,7 +286,7 @@ def get_new_op_callstack(callstack): An example of callstack: File "path1/to/file.py", line 10, in func_1 - y = fluid.layers.fill_constant(x, shape=[1], dtype="int32") + y = paddle.tensor.fill_constant(x, shape=[1], dtype="int32") File "path2/to/file.py", line 740, in fill_constant stop_gradient=True) File "path3/to/file.py", line 43, in append_op diff --git a/python/paddle/jit/dy2static/utils.py b/python/paddle/jit/dy2static/utils.py index 28c0a0cfd16454..52e0672e5cdea1 100644 --- a/python/paddle/jit/dy2static/utils.py +++ b/python/paddle/jit/dy2static/utils.py @@ -262,7 +262,7 @@ def make_hashable(x, error_msg=None): # NOTE(Aurelius84): Consider the following paddle inner API as common case to # apply @to_static code transformation as usual. Because they contains # user-defined layer, like paddle.distributed.auto_parallel.helper.ProxyLayer. -AS_NOT_INNER_FUNC_LIST = set() +AS_NOT_INNER_FUNC_LIST = {"paddle.nn.layer.container.Sequential"} def as_not_paddle_func(path): @@ -293,6 +293,8 @@ def in_white_list(module, func_name): if inspect.ismethod(func): func_name = func.__self__.__class__.__name__ func = func.__func__ + elif hasattr(func, '__class__'): # for nn.Sequential + func_name = func.__class__.__name__ m = inspect.getmodule(func) flag = m is not None and m.__name__.startswith(PADDLE_MODULE_PREFIX) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index f18a7749791e7b..7fb0293f9b5221 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -18,7 +18,6 @@ from paddle import _C_ops, _legacy_C_ops from paddle.common_ops_import import Variable, default_main_program from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.layers.tensor import fill_constant from paddle.framework import core, in_dynamic_mode from paddle.tensor.creation import full @@ -529,7 +528,7 @@ def _is_list_or_turple_(data): temp_out = helper.create_variable_for_type_inference( 'int32' ) - fill_constant( + paddle.tensor.fill_constant( [1], 'int32', dim, force_cpu=True, out=temp_out ) new_size_tensor.append(temp_out) diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index bcb3cfc130fcd0..b2d10abaf87b55 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -14,12 +14,11 @@ """ All layers just related to metric. """ - +import paddle from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.framework import Variable, _non_static_mode, _varbase_creator from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.layers import tensor from paddle.nn.initializer import ConstantInitializer __all__ = [] @@ -226,7 +225,7 @@ def auc( helper = LayerHelper("auc", **locals()) if ins_tag_weight is None: - ins_tag_weight = tensor.fill_constant( + ins_tag_weight = paddle.tensor.fill_constant( shape=[1, 1], dtype="float32", value=1.0 ) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'auc') diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 29afaf38ca3480..896cf84f2606b1 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -26,6 +26,7 @@ from .creation import diagflat # noqa: F401 from .creation import eye # noqa: F401 from .creation import linspace # noqa: F401 +from .creation import fill_constant # noqa: F401 from .creation import ones # noqa: F401 from .creation import ones_like # noqa: F401 from .creation import zeros # noqa: F401 diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 2c14e1b8a096a0..d06f873050dd57 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -22,7 +22,6 @@ import paddle from paddle import _C_ops -from paddle.common_ops_import import fill_constant from ..fluid.data_feeder import ( check_dtype, @@ -857,6 +856,90 @@ def full_like(x, fill_value, dtype=None, name=None): return out +def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): + if in_dygraph_mode(): + place = _current_expected_place() + if force_cpu: + place = core.CPUPlace() + if isinstance(shape, (list, tuple)): + shape = paddle.utils.convert_shape_to_list(shape) + + if not isinstance(dtype, core.VarDesc.VarType): + dtype = convert_np_dtype_to_dtype_(dtype) + + if out is None: + out = _C_ops.full(shape, float(value), dtype, place) + out.stop_gradient = True + return out + + if out is not None: + # final state mode is support out is not None. + _C_ops.full_(out, shape, float(value), dtype, place) + out.stop_gradient = True + return out + else: + attrs = {'force_cpu': force_cpu} + dtype = convert_dtype(dtype) + if not isinstance(value, Variable): + if dtype in ['uint8', 'int16', 'int32', 'int64']: + attrs['str_value'] = str(int(value)) + attrs['value'] = int(value) + else: + attrs['str_value'] = str(float(value)) + attrs['value'] = float(value) + + helper = LayerHelper("fill_constant", **locals()) + inputs = {} + if isinstance(value, Variable): + if convert_dtype(value.dtype) != dtype: + value = paddle.cast(value, dtype) + inputs['ValueTensor'] = value + + paddle.utils.check_shape(shape) + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'uint8', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + 'uint16', + ], + 'fill_constant', + ) + check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant') + + if out is not None: + check_variable_and_dtype( + out, 'out', [convert_dtype(dtype)], 'fill_constant' + ) + + helper = LayerHelper("fill_constant", **locals()) + paddle.utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' + ) + + if out is None: + out = helper.create_variable_for_type_inference(dtype=dtype) + attrs['dtype'] = out.dtype + helper.append_op( + type='fill_constant', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + stop_gradient=True, + ) + out.stop_gradient = True + return out + + def ones(shape, dtype=None, name=None): """ Create a Tensor of specified :attr:`shape` and :attr:`dtype` and fill it with 1. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 848d73d80f7ea6..935f6fbe7bf3aa 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -18,15 +18,16 @@ import paddle from paddle import _C_ops +from paddle.tensor import fill_constant from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only -from ..common_ops_import import Variable, fill_constant from ..fluid.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, convert_dtype, ) +from ..fluid.framework import Variable from ..framework import ( LayerHelper, convert_np_dtype_to_dtype_, diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index d82c242c075158..48ca3836f651c6 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -804,6 +804,7 @@ def load( extra_cuda_cflags=None, extra_ldflags=None, extra_include_paths=None, + extra_library_paths=None, build_directory=None, verbose=False, ): @@ -879,10 +880,13 @@ def load( extra_include_paths(list[str], optional): Specify additional include path used to search header files. By default all basic headers are included implicitly from ``site-package/paddle/include`` . Default is None. + extra_library_paths(list[str], optional): Specify additional library path used to search library files. By default + all basic libraries are included implicitly from ``site-packages/paddle/libs`` . + Default is None. build_directory(str, optional): Specify root directory path to put shared library file. If set None, it will use ``PADDLE_EXTENSION_DIR`` from os.environ. Use ``paddle.utils.cpp_extension.get_build_directory()`` to see the location. Default is None. - verbose(bool, optional): whether to verbose compiled log information. Default is False + verbose(bool, optional): whether to verbose compiled log information. Default is False. Returns: Module: A callable python module contains all CustomOp Layer APIs. @@ -931,6 +935,7 @@ def load( file_path, build_base_dir, extra_include_paths, + extra_library_paths, extra_cxx_cflags, extra_cuda_cflags, extra_ldflags, diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index 13650374be4ab5..0d956c1f459bda 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -1160,6 +1160,7 @@ def _write_setup_file( file_path, build_dir, include_dirs, + library_dirs, extra_cxx_cflags, extra_cuda_cflags, link_args, @@ -1181,6 +1182,7 @@ def _write_setup_file( {prefix}Extension( sources={sources}, include_dirs={include_dirs}, + library_dirs={library_dirs}, extra_compile_args={{'cxx':{extra_cxx_cflags}, 'nvcc':{extra_cuda_cflags}}}, extra_link_args={extra_link_args})], cmdclass={{"build_ext" : BuildExtension.with_options( @@ -1199,6 +1201,7 @@ def _write_setup_file( prefix='CUDA' if with_cuda else 'Cpp', sources=list2str(sources), include_dirs=list2str(include_dirs), + library_dirs=list2str(library_dirs), extra_cxx_cflags=list2str(extra_cxx_cflags), extra_cuda_cflags=list2str(extra_cuda_cflags), extra_link_args=list2str(link_args), diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index 4ff9deaa6fbe2e..98b9089d83f06c 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -380,7 +380,7 @@ def _contain_var(list_or_tuple): def get_shape_tensor_inputs(inputs, attrs, shape, op_type): - from ..fluid.layers.tensor import fill_constant + from paddle.tensor import fill_constant def _get_attr_shape(list_shape): attr_shape = [] @@ -435,7 +435,7 @@ def _convert_to_tensor_list(old_list, dtype="int32"): """ Converts all elements of a list to Variable. """ - from ..fluid.layers.tensor import fill_constant + from paddle.tensor import fill_constant new_list_tensor = [] for ele in old_list: