From 090327fd352eb28a7b321a02b8295b842dfeba04 Mon Sep 17 00:00:00 2001 From: Tom Aarsen Date: Tue, 19 Nov 2024 10:47:42 +0100 Subject: [PATCH] Remove some dead code from distillation script --- examples/training/distillation/model_distillation.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/examples/training/distillation/model_distillation.py b/examples/training/distillation/model_distillation.py index b3b6f69dd..9f89af3d2 100644 --- a/examples/training/distillation/model_distillation.py +++ b/examples/training/distillation/model_distillation.py @@ -56,16 +56,6 @@ inference_batch_size = 64 train_batch_size = 64 -# We use AllNLI as a source of sentences for the distillation -nli_dataset_path = "datasets/AllNLI.tsv.gz" - -# Further, we use sentences extracted from the English Wikipedia to train the distillation -wikipedia_dataset_path = "datasets/wikipedia-en-sentences.txt.gz" - -# We use the STS benchmark dataset to see how much performance we loose -sts_dataset_path = "datasets/stsbenchmark.tsv.gz" - - logging.info("Load the AllNLI dataset") # Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli nli_train_dataset = load_dataset("sentence-transformers/all-nli", "pair-score", split="train")