From 1ac8e318bb8ee895b8eaf848c81d967add187508 Mon Sep 17 00:00:00 2001 From: Shashank Rajput <144760128+ShashankMosaicML@users.noreply.github.com> Date: Wed, 14 Aug 2024 23:53:16 -0700 Subject: [PATCH] Update configuration_mpt.py --- llmfoundry/models/mpt/configuration_mpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llmfoundry/models/mpt/configuration_mpt.py b/llmfoundry/models/mpt/configuration_mpt.py index 02eace638b..759f347e89 100644 --- a/llmfoundry/models/mpt/configuration_mpt.py +++ b/llmfoundry/models/mpt/configuration_mpt.py @@ -333,7 +333,7 @@ def _validate_config(self) -> None: 'attn_impl' ] == 'flash' and not is_flash_v2_installed(v2_version='v2.3.0',): raise NotImplementedError( - 'sliding window attention only implemented with for torch attention or flash attention (v2.3.0 or higher).', + 'sliding window attention only implemented for torch attention and flash attention (v2.3.0 or higher).', ) if self.embedding_fraction > 1 or self.embedding_fraction <= 0: raise ValueError(