Skip to content

Commit

Permalink
Use generation_config
Browse files Browse the repository at this point in the history
  • Loading branch information
NielsRogge committed Apr 29, 2024
1 parent 72a98c5 commit 8c8c7a5
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 2 deletions.
1 change: 1 addition & 0 deletions src/transformers/models/idefics2/fine_tune_idefics2.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,7 @@ def compute_metrics(eval_preds):
report_to="none",
eval_do_concat_batches=False,
predict_with_generate=True,
generation_config={"max_new_tokens": 200},
)

# important: we need to disable caching during training
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/trainer_seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,8 +308,6 @@ def prediction_step(
k: v for k, v in inputs.items() if k not in ("decoder_input_ids", "decoder_attention_mask")
}

# TODO fix this
gen_kwargs["max_new_tokens"] = 200
generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs)

# Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop
Expand Down

0 comments on commit 8c8c7a5

Please sign in to comment.