From 90ed2df1a77c5bc4a6c5fceed4b8ed61867f7bf8 Mon Sep 17 00:00:00 2001 From: "Yu, Guangye" Date: Tue, 26 Nov 2024 16:03:44 +0000 Subject: [PATCH] [2/N] Refine beginner tutorial by accelerator api --- beginner_source/basics/quickstart_tutorial.py | 14 ++++---------- beginner_source/basics/tensorqs_tutorial.py | 16 ++++++++-------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/beginner_source/basics/quickstart_tutorial.py b/beginner_source/basics/quickstart_tutorial.py index df7628081b..5cce8dcfe9 100644 --- a/beginner_source/basics/quickstart_tutorial.py +++ b/beginner_source/basics/quickstart_tutorial.py @@ -84,16 +84,10 @@ # To define a neural network in PyTorch, we create a class that inherits # from `nn.Module `_. We define the layers of the network # in the ``__init__`` function and specify how data will pass through the network in the ``forward`` function. To accelerate -# operations in the neural network, we move it to the GPU or MPS if available. - -# Get cpu, gpu or mps device for training. -device = ( - "cuda" - if torch.cuda.is_available() - else "mps" - if torch.backends.mps.is_available() - else "cpu" -) +# operations in the neural network, we move it to the `accelerator `__ +# such as CUDA, MPS, MTIA, or XPU. If the current accelerator is available, we will use it. Otherwise, we use the CPU. + +device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu" print(f"Using {device} device") # Define model diff --git a/beginner_source/basics/tensorqs_tutorial.py b/beginner_source/basics/tensorqs_tutorial.py index 70a966d9f8..30e05cb10d 100644 --- a/beginner_source/basics/tensorqs_tutorial.py +++ b/beginner_source/basics/tensorqs_tutorial.py @@ -99,20 +99,20 @@ # Operations on Tensors # ~~~~~~~~~~~~~~~~~~~~~~~ # -# Over 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing, +# Over 1200 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing, # indexing, slicing), sampling and more are # comprehensively described `here `__. # -# Each of these operations can be run on the GPU (at typically higher speeds than on a -# CPU). If you’re using Colab, allocate a GPU by going to Runtime > Change runtime type > GPU. +# Each of these operations can be run on the CPU and `Accelerator `__ +# such as CUDA, MPS, MTIA, or XPU. If you’re using Colab, allocate an accelerator by going to Runtime > Change runtime type > GPU. # -# By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using -# ``.to`` method (after checking for GPU availability). Keep in mind that copying large tensors +# By default, tensors are created on the CPU. We need to explicitly move tensors to the accelerator using +# ``.to`` method (after checking for accelerator availability). Keep in mind that copying large tensors # across devices can be expensive in terms of time and memory! -# We move our tensor to the GPU if available -if torch.cuda.is_available(): - tensor = tensor.to("cuda") +# We move our tensor to the current accelerator if available +if torch.accelerator.is_available(): + tensor = tensor.to(torch.accelerator.current_accelerator()) ######################################################################