diff --git a/CHANGELOG.md b/CHANGELOG.md index f9e5689..d58a7ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,9 @@ # Changelog + +## 1.1.7 / 2023-08-11 +### What’s Changed +- Hotfix cutoff limit by @Eugene-hu in #126 + ## 1.1.6 / 2023-08-10 ### What’s Changed - Diversity regularization by @isabella618033 in https://github.com/opentensor/validators/pull/124 diff --git a/openvalidators/__init__.py b/openvalidators/__init__.py index 81643aa..10d715d 100644 --- a/openvalidators/__init__.py +++ b/openvalidators/__init__.py @@ -28,6 +28,6 @@ from . import weights from . import event -__version__ = "1.1.6" +__version__ = "1.1.7" version_split = __version__.split(".") __spec_version__ = (1000 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2])) diff --git a/openvalidators/reward/diversity.py b/openvalidators/reward/diversity.py index 051b781..242f302 100644 --- a/openvalidators/reward/diversity.py +++ b/openvalidators/reward/diversity.py @@ -55,7 +55,7 @@ def __init__( self, device: str ): self.device = device self.tokenizer = AutoTokenizer.from_pretrained( DiversityRewardModel.diversity_model_path ) self.model = AutoModel.from_pretrained( DiversityRewardModel.diversity_model_path ).to(self.device) - self.reward_bottom_k = 3 + self.reward_bottom_k = 2 self.history_reward_bottom_k = 2 self.historic_embeddings = torch.tensor([]).to(self.device) self.history_range = (500, 15500)