Skip to content

Commit

Permalink
Merge branch 'master' into tohtana/fix_offload_states_assert
Browse files Browse the repository at this point in the history
  • Loading branch information
loadams authored Dec 14, 2024
2 parents aa743e4 + fc7c070 commit b1bbbe3
Showing 1 changed file with 8 additions and 10 deletions.
18 changes: 8 additions & 10 deletions accelerator/real_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,9 @@ def get_accelerator():
if accelerator_name is None:
try:
import intel_extension_for_pytorch as ipex

if ipex._C._has_xpu():
accelerator_name = "xpu"
else:
accelerator_name = "cpu"
except ImportError as e:
pass
if accelerator_name is None:
Expand Down Expand Up @@ -162,7 +161,6 @@ def get_accelerator():
except ImportError as e:
pass
if accelerator_name is None:
# borrow this log from PR#5084
try:
import torch

Expand All @@ -174,16 +172,16 @@ def get_accelerator():
# For reference: https://github.com/microsoft/DeepSpeed/pull/6810
if torch.cuda.device_count() > 0 and torch.cuda.is_available(): #ignore-cuda
accelerator_name = "cuda"
else:
if accel_logger is not None:
accel_logger.warn(
"Setting accelerator to CPU. If you have GPU or other accelerator, we were unable to detect it."
)
accelerator_name = "cpu"
except (RuntimeError, ImportError) as e:
# TODO need a more decent way to detect which accelerator to use, consider using nvidia-smi command for detection
accelerator_name = "cuda"
pass
if accelerator_name is None:
# borrow this log from PR#5084
if accel_logger is not None:
accel_logger.warn(
"Setting accelerator to CPU. If you have GPU or other accelerator, we were unable to detect it.")
# cpu added as catch-all when accelerator detection fails
accelerator_name = "cpu"

ds_set_method = "auto detect"

Expand Down

0 comments on commit b1bbbe3

Please sign in to comment.