From c1775a67a36123ec8be7f302c6335652e83da213 Mon Sep 17 00:00:00 2001 From: Tom Aarsen Date: Mon, 11 Nov 2024 13:02:58 +0100 Subject: [PATCH 1/3] Increment the development version --- pyproject.toml | 2 +- sentence_transformers/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 19741b5ad..f6e2cf867 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "sentence-transformers" -version = "3.3.0.dev0" +version = "3.4.0.dev0" description = "State-of-the-Art Text Embeddings" license = { text = "Apache 2.0" } readme = "README.md" diff --git a/sentence_transformers/__init__.py b/sentence_transformers/__init__.py index 488ecdfe9..9a4e02323 100644 --- a/sentence_transformers/__init__.py +++ b/sentence_transformers/__init__.py @@ -1,6 +1,6 @@ from __future__ import annotations -__version__ = "3.3.0.dev0" +__version__ = "3.4.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib From e28f97da71f3ab910ea9cc5c078ce758295c162e Mon Sep 17 00:00:00 2001 From: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> Date: Fri, 15 Nov 2024 10:53:02 +0100 Subject: [PATCH 2/3] [`docs`] Fix the prompt link to the training script (#3060) This automatic linking doesn't work in an .rst section --- examples/training/prompts/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/training/prompts/README.md b/examples/training/prompts/README.md index 81779e78a..860e0bd2e 100644 --- a/examples/training/prompts/README.md +++ b/examples/training/prompts/README.md @@ -96,7 +96,7 @@ Additionally, some research papers (`INSTRUCTOR `_: This script finetunes `mpnet-base `_ on 100k query-answer pairs from the `natural-questions `_ dataset using the :class:`~sentence_transformers.losses.CachedMultipleNegativesRankingLoss` loss. The model is evaluated during training using the :class:`~sentence_transformers.evaluation.NanoBEIREvaluator`. +* `training_nq_prompts.py `_: This script finetunes `mpnet-base `_ on 100k query-answer pairs from the `natural-questions `_ dataset using the :class:`~sentence_transformers.losses.CachedMultipleNegativesRankingLoss` loss. The model is evaluated during training using the :class:`~sentence_transformers.evaluation.NanoBEIREvaluator`. This script has two variables that affect 1) whether prompts are used and 2) whether prompts are included in the pooling. I have finetuned both ``mpnet-base`` and ``bert-base-uncased`` under the various different settings, resulting in a 0.66% and 0.90% relative improvements on ``NDCG@10`` at no extra cost. From e156f38b1007e7860218c27223e1d577f3a021fe Mon Sep 17 00:00:00 2001 From: pesuchin <7578373+pesuchin@users.noreply.github.com> Date: Fri, 15 Nov 2024 20:10:37 +0900 Subject: [PATCH 3/3] [Fix] Resolve loading private Transformer model in version 3.3.0 (#3058) * add: Add token and local_files_only to the find_adapter_config_file arguments. * add: Add revision to the find_adapter_config_file arguments. * For some reason, the part I didn't fix got fixed, so I put it back in. * Set False as the default for local_files_only --------- Co-authored-by: ryoji.nagata Co-authored-by: Tom Aarsen --- sentence_transformers/models/Transformer.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sentence_transformers/models/Transformer.py b/sentence_transformers/models/Transformer.py index 1af4dccb7..48a3bc768 100644 --- a/sentence_transformers/models/Transformer.py +++ b/sentence_transformers/models/Transformer.py @@ -101,7 +101,15 @@ def __init__( def _load_config(self, model_name_or_path: str, cache_dir: str | None, backend: str, config_args: dict[str, Any]): """Loads the configuration of a model""" - if find_adapter_config_file(model_name_or_path) is not None: + if ( + find_adapter_config_file( + model_name_or_path, + token=config_args.get("token"), + revision=config_args.get("revision"), + local_files_only=config_args.get("local_files_only", False), + ) + is not None + ): if not is_peft_available(): raise Exception( "Loading a PEFT model requires installing the `peft` package. You can install it via `pip install peft`."