diff --git a/llmfoundry/utils/builders.py b/llmfoundry/utils/builders.py index 631d25bc60..dcffbf6caa 100644 --- a/llmfoundry/utils/builders.py +++ b/llmfoundry/utils/builders.py @@ -38,6 +38,7 @@ ) from llmfoundry.utils.config_utils import to_dict_container, to_list_container from llmfoundry.utils.registry_utils import construct_from_registry +from llmfoundry.utils.warnings import experimental_function log = logging.getLogger(__name__) @@ -705,6 +706,7 @@ def _validate_cfg(icl_cfg: dict[str, Any]): return evaluators, logger_keys +@experimental_function('tp_strategy') def build_tp_strategy( name: str, model: ComposerModel, diff --git a/tests/models/utils/test_tp_strategy.py b/tests/models/utils/test_tp_strategy.py index 073a8ff782..19bad8abfd 100644 --- a/tests/models/utils/test_tp_strategy.py +++ b/tests/models/utils/test_tp_strategy.py @@ -21,6 +21,9 @@ @pytest.mark.gpu +@pytest.mark.filterwarnings( + 'ignore:tp_strategy is experimental and may change with future versions.' +) def test_ffn_tp_strategy_layer_plan(): # Actual layer plan from tp_strategy=fnn tp_config = { @@ -128,3 +131,7 @@ def test_no_tp_with_moes(): match='Tensor Parallelism is not currently supported for MoE models.', ): process_init_device(model_cfg, fsdp_cfg, tp_cfg) + + +# if __name__ == '__main__': +# test_ffn_tp_strategy_layer_plan()