From 0dee5c9d8ab5c48f7afd33f4ccefbfa900ee4768 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Mon, 22 Jul 2024 14:50:13 -0400 Subject: [PATCH] Update outdated custom ops tutorials to point to the new landing page (#2953) * Update outdated custom ops tutorials to point to the new landing page * Also turns on verification for the python custom ops tutorials. * Update intermediate_source/torch_export_tutorial.py --------- Co-authored-by: Svetlana Karslioglu --- .jenkins/validate_tutorials_built.py | 1 - advanced_source/cpp_custom_ops.rst | 2 +- advanced_source/cpp_extension.rst | 6 +++++- advanced_source/custom_ops_landing_page.rst | 13 +++++++------ advanced_source/dispatcher.rst | 5 +++++ advanced_source/python_custom_ops.py | 2 +- advanced_source/torch_script_custom_ops.rst | 5 +++++ intermediate_source/torch_export_tutorial.py | 14 ++++---------- 8 files changed, 28 insertions(+), 20 deletions(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 119dd079d9..67fe182287 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -29,7 +29,6 @@ "intermediate_source/fx_conv_bn_fuser", "intermediate_source/_torch_export_nightly_tutorial", # does not work on release "advanced_source/super_resolution_with_onnxruntime", - "advanced_source/python_custom_ops", # https://github.com/pytorch/pytorch/issues/127443 "advanced_source/usb_semisup_learn", # fails with CUDA OOM error, should try on a different worker "prototype_source/fx_graph_mode_ptq_dynamic", "prototype_source/vmap_recipe", diff --git a/advanced_source/cpp_custom_ops.rst b/advanced_source/cpp_custom_ops.rst index fa56a0cc21..435ff088bc 100644 --- a/advanced_source/cpp_custom_ops.rst +++ b/advanced_source/cpp_custom_ops.rst @@ -417,4 +417,4 @@ Conclusion In this tutorial, we went over the recommended approach to integrating Custom C++ and CUDA operators with PyTorch. The ``TORCH_LIBRARY/torch.library`` APIs are fairly low-level. For more information about how to use the API, see -`The Custom Operators Manual `_. +`The Custom Operators Manual `_. diff --git a/advanced_source/cpp_extension.rst b/advanced_source/cpp_extension.rst index cb0e990797..96cbb9f5cc 100644 --- a/advanced_source/cpp_extension.rst +++ b/advanced_source/cpp_extension.rst @@ -2,6 +2,10 @@ Custom C++ and CUDA Extensions ============================== **Author**: `Peter Goldsborough `_ +.. warning:: + + This tutorial is deprecated as of PyTorch 2.4. Please see :ref:`custom-ops-landing-page` + for the newest up-to-date guides on extending PyTorch with Custom C++/CUDA Extensions. PyTorch provides a plethora of operations related to neural networks, arbitrary tensor algebra, data wrangling and other purposes. However, you may still find @@ -225,7 +229,7 @@ Instead of: Currently open issue for nvcc bug `here `_. Complete workaround code example `here -`_. +`_. Forward Pass ************ diff --git a/advanced_source/custom_ops_landing_page.rst b/advanced_source/custom_ops_landing_page.rst index ebb238ef63..0f21ee2c4c 100644 --- a/advanced_source/custom_ops_landing_page.rst +++ b/advanced_source/custom_ops_landing_page.rst @@ -1,7 +1,7 @@ .. _custom-ops-landing-page: -PyTorch Custom Operators Landing Page -===================================== +PyTorch Custom Operators +=========================== PyTorch offers a large library of operators that work on Tensors (e.g. ``torch.add``, ``torch.sum``, etc). However, you may wish to bring a new custom operation to PyTorch @@ -10,8 +10,7 @@ In order to do so, you must register the custom operation with PyTorch via the P `torch.library docs `_ or C++ ``TORCH_LIBRARY`` APIs. -TL;DR ------ + Authoring a custom operator from Python ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -19,10 +18,11 @@ Authoring a custom operator from Python Please see :ref:`python-custom-ops-tutorial`. You may wish to author a custom operator from Python (as opposed to C++) if: + - you have a Python function you want PyTorch to treat as an opaque callable, especially with -respect to ``torch.compile`` and ``torch.export``. + respect to ``torch.compile`` and ``torch.export``. - you have some Python bindings to C++/CUDA kernels and want those to compose with PyTorch -subsystems (like ``torch.compile`` or ``torch.autograd``) + subsystems (like ``torch.compile`` or ``torch.autograd``) Integrating custom C++ and/or CUDA code with PyTorch ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -30,6 +30,7 @@ Integrating custom C++ and/or CUDA code with PyTorch Please see :ref:`cpp-custom-ops-tutorial`. You may wish to author a custom operator from C++ (as opposed to Python) if: + - you have custom C++ and/or CUDA code. - you plan to use this code with ``AOTInductor`` to do Python-less inference. diff --git a/advanced_source/dispatcher.rst b/advanced_source/dispatcher.rst index 0b5fd3c8af..4b03803c15 100644 --- a/advanced_source/dispatcher.rst +++ b/advanced_source/dispatcher.rst @@ -1,6 +1,11 @@ Registering a Dispatched Operator in C++ ======================================== +.. warning:: + + This tutorial is deprecated as of PyTorch 2.4. Please see :ref:`custom-ops-landing-page` + for the newest up-to-date guides on extending PyTorch with Custom Operators. + The dispatcher is an internal component of PyTorch which is responsible for figuring out what code should actually get run when you call a function like ``torch::add``. This can be nontrivial, because PyTorch operations need diff --git a/advanced_source/python_custom_ops.py b/advanced_source/python_custom_ops.py index 9111e1f43f..1e429b76b3 100644 --- a/advanced_source/python_custom_ops.py +++ b/advanced_source/python_custom_ops.py @@ -260,5 +260,5 @@ def f(x): # For more detailed information, see: # # - `the torch.library documentation `_ -# - `the Custom Operators Manual `_ +# - `the Custom Operators Manual `_ # diff --git a/advanced_source/torch_script_custom_ops.rst b/advanced_source/torch_script_custom_ops.rst index 55497d5def..0a0e6e2bd7 100644 --- a/advanced_source/torch_script_custom_ops.rst +++ b/advanced_source/torch_script_custom_ops.rst @@ -1,6 +1,11 @@ Extending TorchScript with Custom C++ Operators =============================================== +.. warning:: + + This tutorial is deprecated as of PyTorch 2.4. Please see :ref:`custom-ops-landing-page` + for the newest up-to-date guides on PyTorch Custom Operators. + The PyTorch 1.0 release introduced a new programming model to PyTorch called `TorchScript `_. TorchScript is a subset of the Python programming language which can be parsed, compiled and diff --git a/intermediate_source/torch_export_tutorial.py b/intermediate_source/torch_export_tutorial.py index 98016833c4..dc5e226f86 100644 --- a/intermediate_source/torch_export_tutorial.py +++ b/intermediate_source/torch_export_tutorial.py @@ -544,17 +544,11 @@ def suggested_fixes(): # # Currently, the steps to register a custom op for use by ``torch.export`` are: # -# - Define the custom op using ``torch.library`` (`reference `__) +# - Define the custom op using ``torch.library`` (`reference `__) # as with any other custom op -from torch.library import Library, impl, impl_abstract - -m = Library("my_custom_library", "DEF") - -m.define("custom_op(Tensor input) -> Tensor") - -@impl(m, "custom_op", "CompositeExplicitAutograd") -def custom_op(x): +@torch.library.custom_op("my_custom_library::custom_op", mutates_args={}) +def custom_op(input: torch.Tensor) -> torch.Tensor: print("custom_op called!") return torch.relu(x) @@ -562,7 +556,7 @@ def custom_op(x): # - Define a ``"Meta"`` implementation of the custom op that returns an empty # tensor with the same shape as the expected output -@impl_abstract("my_custom_library::custom_op") +@custom_op.register_fake def custom_op_meta(x): return torch.empty_like(x)