Skip to content

Commit

Permalink
Merge branch 'master' into alvieirajr-swapping
Browse files Browse the repository at this point in the history
  • Loading branch information
loadams authored Apr 26, 2024
2 parents 1f5384f + 4c15ad9 commit b08b723
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 2 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/nv-ds-chat.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ on:
required: false
default: 'master'
type: string
pull_request:
paths:
- "deepspeed/runtime/zero/stage_1_and_2.py"
- "deepspeed/runtime/zero/stage3.py"

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
Expand Down
4 changes: 4 additions & 0 deletions build_win.bat
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
@echo off

set DS_BUILD_AIO=0
set DS_BUILD_CUTLASS_OPS=0
set DS_BUILD_EVOFORMER_ATTN=0
set DS_BUILD_FP_QUANTIZER=0
set DS_BUILD_RAGGED_DEVICE_OPS=0
set DS_BUILD_SPARSE_ATTN=0

echo Administrative permissions required. Detecting permissions...
Expand Down
4 changes: 2 additions & 2 deletions deepspeed/runtime/zero/stage3.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from deepspeed.utils import logger
from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler
from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce
from deepspeed.runtime.utils import inf, is_model_parallel_parameter, get_only_unique_item
from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter, get_only_unique_item
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
Expand Down Expand Up @@ -2027,7 +2027,7 @@ def step(self, closure=None):
return

norm_groups = self._get_norm_groups()
scaled_global_grad_norm = torch.norm(torch.stack(norm_groups))
scaled_global_grad_norm = get_global_norm(norm_list=norm_groups)

# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
Expand Down

0 comments on commit b08b723

Please sign in to comment.