diff --git a/flair/trainers/plugins/__init__.py b/flair/trainers/plugins/__init__.py index be02970a0..925b30cf8 100644 --- a/flair/trainers/plugins/__init__.py +++ b/flair/trainers/plugins/__init__.py @@ -1,5 +1,4 @@ from .base import BasePlugin, Pluggable, TrainerPlugin, TrainingInterrupt -from .functional.amp import AmpPlugin from .functional.anneal_on_plateau import AnnealingPlugin from .functional.checkpoints import CheckpointPlugin from .functional.linear_scheduler import LinearSchedulerPlugin @@ -11,7 +10,6 @@ from .metric_records import MetricName, MetricRecord __all__ = [ - "AmpPlugin", "AnnealingPlugin", "CheckpointPlugin", "LinearSchedulerPlugin", diff --git a/flair/trainers/plugins/functional/anneal_on_plateau.py b/flair/trainers/plugins/functional/anneal_on_plateau.py index 0bdf1e6ea..e0bff9a19 100644 --- a/flair/trainers/plugins/functional/anneal_on_plateau.py +++ b/flair/trainers/plugins/functional/anneal_on_plateau.py @@ -35,6 +35,7 @@ def __init__( self.anneal_factor = anneal_factor self.patience = patience self.initial_extra_patience = initial_extra_patience + self.scheduler: AnnealOnPlateau def store_learning_rate(self): optimizer = self.trainer.optimizer @@ -117,6 +118,4 @@ def get_state(self) -> Dict[str, Any]: "patience": self.patience, "initial_extra_patience": self.initial_extra_patience, "anneal_with_restarts": self.anneal_with_restarts, - "bad_epochs": self.scheduler.num_bad_epochs, - "current_best": self.scheduler.best, } diff --git a/test_emb.py b/test_emb.py new file mode 100644 index 000000000..362db8562 --- /dev/null +++ b/test_emb.py @@ -0,0 +1,27 @@ +from flair.data import Sentence +from flair.embeddings import TransformerWordEmbeddings + +phrase_0 = Sentence("a uui") +embeddings_a = TransformerWordEmbeddings( + 'roberta-base', + use_context=True, + use_context_separator=False, +) +ebd_a = embeddings_a.embed(phrase_0) + +phrase_1 = Sentence("a uui") +embeddings_b = TransformerWordEmbeddings( + 'roberta-base', + use_context=True, + use_context_separator=False, +) +ebd_b = embeddings_b.embed(phrase_1) +ebd_b = [phrase_1] +ebd_a = [phrase_0] + +print( + "token run 0:", ebd_a[-1][-1], "\n", + "embedding end run 0:", ebd_a[-1][-1].embedding.tolist()[-2:], "\n", + "token run 1: ", ebd_b[-1][-1], "\n", + "embedding end run 1:", ebd_b[-1][-1].embedding.tolist()[-2:] +) \ No newline at end of file