From aec1ca3a588bc6c65f7886e3d3eaa74901a6356f Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Fri, 12 Jul 2024 05:22:26 +0800 Subject: [PATCH] [Bug Fix] fix qa pipeline tensor to numpy (#31585) * fix qa pipeline * fix tensor to numpy --- src/transformers/pipelines/question_answering.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 4ac5d252b1139e..4397307013d8ad 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -118,7 +118,7 @@ def select_starts_ends( max_answer_len (`int`): Maximum size of the answer to extract from the model's output. """ # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. - undesired_tokens = np.abs(np.array(p_mask) - 1) + undesired_tokens = np.abs(p_mask.numpy() - 1) if attention_mask is not None: undesired_tokens = undesired_tokens & attention_mask