diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index e1871eaf8c3583..e585520cf4ac8e 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -2175,7 +2175,7 @@ def _inner_training_loop( grad_norm: Optional[float] = None self.control = self.callback_handler.on_train_begin(args, self.state, self.control) - if args.sanity_evaluation: + if args.eval_on_start: self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True) total_batched_samples = 0 diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index b506e4b1507320..0b1f5bc9d1313c 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -772,8 +772,8 @@ class TrainingArguments: that takes a boolean argument `compute_result`, which when passed `True`, will trigger the final global summary statistics from the batch-level summary statistics you've accumulated over the evaluation set. - sanity_evaluation(`bool`, *optional*, defaults to `False`): - Whether or not to perform a sanity check to ensure that the validation steps works correctly. It will be performed before the training. + eval_on_start(`bool`, *optional*, defaults to `False`): + Whether to perform a evaluation step (sanity check) before the training to ensure the validation steps works correctly. """ framework = "pt" @@ -1457,7 +1457,7 @@ class TrainingArguments: metadata={"help": "Break eval metrics calculation into batches to save memory."}, ) - sanity_evaluation: bool = field( + eval_on_start: bool = field( default=False, metadata={ "help": "Whether to run through the entire `evaluation` step at the very beginning of training as a sanity check."