diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 859a006dbddc00..94ee5106d4a91d 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -853,7 +853,7 @@ def blockwise_data_loader( yield batch # Metric - metric = evaluate.load("rouge") + metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] diff --git a/examples/flax/question-answering/run_qa.py b/examples/flax/question-answering/run_qa.py index d08e7f01fd5165..fdba1c3ba49fdb 100644 --- a/examples/flax/question-answering/run_qa.py +++ b/examples/flax/question-answering/run_qa.py @@ -807,7 +807,9 @@ def post_processing_function(examples, features, predictions, stage="eval"): references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) - metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") + metric = evaluate.load( + "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir + ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) diff --git a/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py b/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py index ec7be4bc5535a5..5172bcb0beba0f 100644 --- a/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py +++ b/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py @@ -577,7 +577,7 @@ def is_audio_in_length_range(length): return # 8. Load Metric - metric = evaluate.load("wer") + metric = evaluate.load("wer", cache_dir=model_args.cache_dir) def compute_metrics(preds, labels): # replace padded labels by the padding token diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index f39882362e2678..9bb60ade084328 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -710,7 +710,7 @@ def preprocess_function(examples): ) # Metric - metric = evaluate.load("rouge") + metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 823eed2459a1bc..9c51c828363515 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -599,9 +599,9 @@ def eval_step(state, batch): p_eval_step = jax.pmap(eval_step, axis_name="batch") if data_args.task_name is not None: - metric = evaluate.load("glue", data_args.task_name) + metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) else: - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info(f"===== Starting training ({num_epochs} epochs) =====") train_time = 0 diff --git a/examples/flax/token-classification/run_flax_ner.py b/examples/flax/token-classification/run_flax_ner.py index d5ae59d9b1ec65..ac14b5c2854702 100644 --- a/examples/flax/token-classification/run_flax_ner.py +++ b/examples/flax/token-classification/run_flax_ner.py @@ -676,7 +676,7 @@ def eval_step(state, batch): p_eval_step = jax.pmap(eval_step, axis_name="batch") - metric = evaluate.load("seqeval") + metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir) def get_labels(y_pred, y_true): # Transform predictions and references tensos to numpy arrays diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index 900bf4950c242c..da31bd0ec29687 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -349,7 +349,7 @@ def val_transforms(batch): id2label[str(i)] = label # Load the accuracy metric from the datasets package - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 95ffdbf04ed61b..07942aa7e242e8 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -287,7 +287,7 @@ def main(): id2label[str(i)] = label # Load the accuracy metric from the datasets package - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index a9e0758ee7c24f..186bbfd507540d 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -282,7 +282,6 @@ def main(): dataset = load_dataset( "imagefolder", data_files=data_files, - cache_dir=args.cache_dir, task="image-classification", ) # See more about loading custom images at diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 8521f2e8746d92..e9e44af3a37aac 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -583,7 +583,7 @@ def preprocess_logits_for_metrics(logits, labels): logits = logits[0] return logits.argmax(dim=-1) - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) def compute_metrics(eval_preds): preds, labels = eval_preds diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 98739ec62eb91b..87898963fe89f9 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -590,7 +590,7 @@ def preprocess_logits_for_metrics(logits, labels): logits = logits[0] return logits.argmax(dim=-1) - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) def compute_metrics(eval_preds): preds, labels = eval_preds diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index a7153287b00c94..b134d95765c538 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -627,7 +627,9 @@ def post_processing_function(examples, features, predictions, stage="eval"): references = [{"id": str(ex["id"]), "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) - metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") + metric = evaluate.load( + "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir + ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 7eeca98a967ab5..23a2231e9acc3d 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -647,7 +647,9 @@ def post_processing_function(examples, features, predictions, stage="eval"): references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) - metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") + metric = evaluate.load( + "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir + ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 42788b6886e0c3..92ba31efdd8312 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -631,7 +631,9 @@ def preprocess_validation_function(examples): pad_to_multiple_of=8 if training_args.fp16 else None, ) - metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") + metric = evaluate.load( + "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir + ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index 4c9c16254fd1df..5b12a98c7e0a68 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -366,7 +366,7 @@ def main(): label2id = {v: str(k) for k, v in id2label.items()} # Load the mean IoU metric from the datasets package - metric = evaluate.load("mean_iou") + metric = evaluate.load("mean_iou", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index a3c045b49f07b2..99e24de7312229 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -530,7 +530,7 @@ def preprocess_val(example_batch): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Instantiate metric - metric = evaluate.load("mean_iou") + metric = evaluate.load("mean_iou", cache_dir=args.cache_dir) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index 47c08fc5f9453f..1c658904e71e30 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -680,7 +680,7 @@ def is_audio_in_length_range(length): # instantiate a data collator and the trainer # Define evaluation metrics during training, *i.e.* word error rate, character error rate - eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics} + eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics} # for large datasets it is advised to run the preprocessing on a # single machine first with ``args.preprocessing_only`` since there will mostly likely diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index a3d8a7b46efb3b..5708f524a3180f 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -702,7 +702,7 @@ def is_audio_in_length_range(length): # instantiate a data collator and the trainer # Define evaluation metrics during training, *i.e.* word error rate, character error rate - eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics} + eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics} # for large datasets it is advised to run the preprocessing on a # single machine first with ``args.preprocessing_only`` since there will mostly likely diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 555ecb39a01634..9ffb48638d3672 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -520,7 +520,7 @@ def is_audio_in_length_range(length): return # 8. Load Metric - metric = evaluate.load("wer") + metric = evaluate.load("wer", cache_dir=model_args.cache_dir) def compute_metrics(pred): pred_ids = pred.predictions diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index f14783a78ff81f..fcd6c69de848d1 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -645,7 +645,7 @@ def preprocess_function(examples): ) # Metric - metric = evaluate.load("rouge") + metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index f278a5a7b46fe2..4ce7bfab3a518b 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -633,23 +633,23 @@ def preprocess_function(examples): if data_args.metric_name is not None: metric = ( - evaluate.load(data_args.metric_name, config_name="multilabel") + evaluate.load(data_args.metric_name, config_name="multilabel", cache_dir=model_args.cache_dir) if is_multi_label - else evaluate.load(data_args.metric_name) + else evaluate.load(data_args.metric_name, cache_dir=model_args.cache_dir) ) logger.info(f"Using metric {data_args.metric_name} for evaluation.") else: if is_regression: - metric = evaluate.load("mse") + metric = evaluate.load("mse", cache_dir=model_args.cache_dir) logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.") else: if is_multi_label: - metric = evaluate.load("f1", config_name="multilabel") + metric = evaluate.load("f1", config_name="multilabel", cache_dir=model_args.cache_dir) logger.info( "Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite." ) else: - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.") def compute_metrics(p: EvalPrediction): diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 0fdeef7d18b023..8fa821c49ae89f 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -514,11 +514,11 @@ def preprocess_function(examples): # Get the metric function if data_args.task_name is not None: - metric = evaluate.load("glue", data_args.task_name) + metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) elif is_regression: - metric = evaluate.load("mse") + metric = evaluate.load("mse", cache_dir=model_args.cache_dir) else: - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index d7c2c3fa816336..8260645764184a 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -385,7 +385,7 @@ def preprocess_function(examples): ) # Get the metric function - metric = evaluate.load("xnli") + metric = evaluate.load("xnli", cache_dir=model_args.cache_dir) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 318d373483a117..40028f779cc14e 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -539,7 +539,7 @@ def tokenize_and_align_labels(examples): data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) # Metrics - metric = evaluate.load("seqeval") + metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir) def compute_metrics(p): predictions, labels = p diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index a18c86a1ecbbe7..cb9fa48e84a747 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -564,7 +564,7 @@ def preprocess_function(examples): ) # Metric - metric = evaluate.load("sacrebleu") + metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] diff --git a/examples/tensorflow/image-classification/run_image_classification.py b/examples/tensorflow/image-classification/run_image_classification.py index dfc8bd4844128a..41cb0ffe9568c8 100644 --- a/examples/tensorflow/image-classification/run_image_classification.py +++ b/examples/tensorflow/image-classification/run_image_classification.py @@ -440,7 +440,7 @@ def val_transforms(example_batch): collate_fn = DefaultDataCollator(return_tensors="np") # Load the accuracy metric from the datasets package - metric = evaluate.load("accuracy") + metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index 70a65bed465ae3..6aaf45f00fd3b0 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -631,7 +631,9 @@ def post_processing_function(examples, features, predictions, stage="eval"): references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) - metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") + metric = evaluate.load( + "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir + ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index c4bf4e35d2f4e9..39c8f7f89f4b89 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -627,7 +627,7 @@ def postprocess_text(preds, labels): # region Metric and KerasMetricCallback if training_args.do_eval: - metric = evaluate.load("rouge") + metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) if data_args.val_max_target_length is None: data_args.val_max_target_length = data_args.max_target_length diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index 0bcaf56170a89c..3662d6aaac10a3 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -379,7 +379,7 @@ def preprocess_function(examples): # endregion # region Metric function - metric = evaluate.load("glue", data_args.task_name) + metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) def compute_metrics(preds, label_ids): preds = preds["logits"] diff --git a/examples/tensorflow/token-classification/run_ner.py b/examples/tensorflow/token-classification/run_ner.py index 31dff57862c7d5..84b2ab702a17ab 100644 --- a/examples/tensorflow/token-classification/run_ner.py +++ b/examples/tensorflow/token-classification/run_ner.py @@ -511,7 +511,7 @@ def tokenize_and_align_labels(examples): # endregion # Metrics - metric = evaluate.load("seqeval") + metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir) def get_labels(y_pred, y_true): # Transform predictions and references tensos to numpy arrays diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index 42b96c5515bea7..b34a8624051909 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -589,7 +589,7 @@ def preprocess_function(examples): # region Metric and postprocessing if training_args.do_eval: - metric = evaluate.load("sacrebleu") + metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir) if data_args.val_max_target_length is None: data_args.val_max_target_length = data_args.max_target_length