diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index cdf6325c4b4ae6..4dd69ca72ef908 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -482,6 +482,11 @@ class TrainingArguments: - sync_module_states (`bool`, *optional*, defaults to `True`) If `"True"`, each individually wrapped FSDP unit will broadcast module parameters from rank 0 to ensure they are the same across all ranks after initialization + - cpu_ram_efficient_loading (`bool`, *optional*, defaults to `False`) + If `"True"`, only the first process loads the pretrained model checkpoint while all other processes + have empty weights. When this setting is True, `sync_module_states` also must to be True, + otherwise all the processes except the main process would have random weights leading to unexpected + behaviour during training. - activation_checkpointing (`bool`, *optional*, defaults to `False`): If `"True"`, activation checkpointing is a technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra @@ -1768,6 +1773,7 @@ def __post_init__(self): os.environ[f"{prefix}BACKWARD_PREFETCH"] = prefetch_policy.upper() os.environ[f"{prefix}FORWARD_PREFETCH"] = self.fsdp_config.get("forward_prefetch", "false") os.environ[f"{prefix}SYNC_MODULE_STATES"] = self.fsdp_config.get("sync_module_states", "true") + os.environ[f"{prefix}CPU_RAM_EFFICIENT_LOADING"] = self.fsdp_config.get("cpu_ram_efficient_loading", "false") os.environ[f"{prefix}USE_ORIG_PARAMS"] = self.fsdp_config.get("use_orig_params", "true") if is_accelerate_available(): diff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py index aeb232fd9e8e0b..d9dea012f13bf8 100644 --- a/tests/fsdp/test_fsdp.py +++ b/tests/fsdp/test_fsdp.py @@ -144,6 +144,7 @@ def setUp(self): "limit_all_gathers": "False", "use_orig_params": "True", "sync_module_states": "True", + "cpu_ram_efficient_loading": "True", "activation_checkpointing": "False", "min_num_params": 1, } @@ -208,6 +209,7 @@ def test_fsdp_config_transformers_auto_wrap(self, sharding_strategy, dtype): self.assertEqual(os.environ[f"{prefix}FORWARD_PREFETCH"], fsdp_config["forward_prefetch"]) self.assertEqual(os.environ[f"{prefix}USE_ORIG_PARAMS"], fsdp_config["use_orig_params"]) self.assertEqual(os.environ[f"{prefix}SYNC_MODULE_STATES"], fsdp_config["sync_module_states"]) + self.assertEqual(os.environ[f"{prefix}CPU_RAM_EFFICIENT_LOADING"], fsdp_config["cpu_ram_efficient_loading"]) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func)