diff --git a/CHANGELOG.md b/CHANGELOG.md
index 27ad1ae..a4b745d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,12 @@
# Changelog
-## 1.1.4 / 2023-08-07
+## 1.1.5 / 2023-08-08
### What’s Changed
+- Adds new keywords for the task validator by @p-ferreira in #119
+- Save historic embeddings on disk by @opentaco in #121
+- Updates relevance mechanism by @Eugene-hu in #122
+
+## 1.1.4 / 2023-08-07
- HOTFIX: create and serve the axon at startup by @robertalanm in #120
diff --git a/openvalidators/__init__.py b/openvalidators/__init__.py
index 7820940..4609769 100644
--- a/openvalidators/__init__.py
+++ b/openvalidators/__init__.py
@@ -28,6 +28,6 @@
from . import weights
from . import event
-__version__ = "1.1.4"
+__version__ = "1.1.5"
version_split = __version__.split(".")
__spec_version__ = (1000 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))
diff --git a/openvalidators/forward.py b/openvalidators/forward.py
index 376fc8c..59589b2 100644
--- a/openvalidators/forward.py
+++ b/openvalidators/forward.py
@@ -62,7 +62,11 @@ def get_random_uids(self, k: int, exclude: List[int] = None) -> torch.LongTensor
return uids
-async def run_step(self, prompt: str, k: int, timeout: float, name: str, exclude: list = []):
+async def run_step(self, prompt: str, k: int, timeout: float, name: str, exclude: list = [], base_prompt = None):
+
+ if base_prompt == None:
+ base_prompt = prompt
+
bt.logging.debug("run_step", name)
# Record event start time.
@@ -90,7 +94,7 @@ async def run_step(self, prompt: str, k: int, timeout: float, name: str, exclude
bt.logging.trace(str(reward_fn_i.name), reward_i.tolist())
for masking_fn_i in self.masking_functions:
- mask_i = masking_fn_i.apply(prompt, responses, name).to(self.device)
+ mask_i = masking_fn_i.apply(base_prompt, responses, name).to(self.device)
rewards *= mask_i # includes diversity
if not self.config.neuron.disable_log_rewards:
event[masking_fn_i.name] = mask_i.tolist()
@@ -168,6 +172,7 @@ async def forward(self):
)
base_text = augment_event["best"]
+ base_prompt = augment_event["best"]
exclude = augment_event["uids"]
for k in range(self.config.neuron.num_followup_steps):
@@ -180,6 +185,7 @@ async def forward(self):
k=self.config.neuron.followup_sample_size,
timeout=self.config.neuron.followup_timeout,
exclude=exclude,
+ base_prompt=base_prompt
)
exclude += followup_event["uids"]
@@ -192,6 +198,7 @@ async def forward(self):
k=self.config.neuron.answer_sample_size,
timeout=self.config.neuron.answer_timeout,
exclude=exclude,
+ base_prompt=followup_event["best"]
)
exclude += answer_event["uids"]
@@ -205,3 +212,4 @@ async def forward(self):
)
else:
base_text = base_text + "\nQuestion:" + followup_event["best"] + "\nAnswer:" + answer_event["best"]
+
\ No newline at end of file
diff --git a/openvalidators/neuron.py b/openvalidators/neuron.py
index d38be22..a52da3c 100644
--- a/openvalidators/neuron.py
+++ b/openvalidators/neuron.py
@@ -208,7 +208,7 @@ def __init__(self):
RelevanceRewardModel(device=self.device) if not self.config.neuron.relevance_off
else MockRewardModel(RewardModelType.relevance.value)
)
- diversity_model = (
+ self.diversity_model = (
DiversityRewardModel(device=self.device) if not self.config.neuron.diversity_off
else MockRewardModel(RewardModelType.diversity.value)
)
@@ -217,7 +217,7 @@ def __init__(self):
else MockRewardModel(RewardModelType.nsfw.value)
)
- self.masking_functions = [self.blacklist, task_validator, relevance_model, diversity_model, nsfw_model]
+ self.masking_functions = [self.blacklist, task_validator, relevance_model, self.diversity_model, nsfw_model]
bt.logging.debug(str(self.reward_functions))
bt.logging.debug(str(self.masking_functions))
diff --git a/openvalidators/utils.py b/openvalidators/utils.py
index 340a020..d008f55 100644
--- a/openvalidators/utils.py
+++ b/openvalidators/utils.py
@@ -194,7 +194,10 @@ def save_state(self):
prefix="Saved model",
sufix=f"{ self.config.neuron.full_path }/model.torch",
)
+ except Exception as e:
+ bt.logging.warning(f"Failed to save model with error: {e}")
+ try:
# Save the gating model.
gating_model_linear_layer_dict = self.gating_model.linear.state_dict()
gating_model_name = self.config.gating.model_name.replace("/", "_")
@@ -205,7 +208,7 @@ def save_state(self):
wandb.log({
"step": self.step,
"block": ttl_get_block(self),
- **neuron_state_dict
+ **neuron_state_dict
})
if not self.config.wandb.off and self.config.wandb.track_gating_model:
model_artifact = wandb.Artifact(f"{gating_model_name}_gating_linear_layer", type="model")
@@ -213,12 +216,23 @@ def save_state(self):
self.wandb.log_artifact(model_artifact)
bt.logging.success(prefix="Saved gating model", sufix=f"{gating_model_file_path}")
+ except Exception as e:
+ bt.logging.warning(f"Failed to save gating model with error: {e}")
- #empty cache
- torch.cuda.empty_cache()
-
+ try:
+ # Save diversity model.
+ diversity_model_dict = {"historic_embeddings": self.diversity_model.historic_embeddings.to('cpu')}
+ diversity_model_file_path = f"{self.config.neuron.full_path}/diversity_model.pth"
+ torch.save(diversity_model_dict, diversity_model_file_path)
+ bt.logging.success(
+ prefix="Saved diversity model",
+ sufix=f"{diversity_model_file_path} {list(self.diversity_model.historic_embeddings.shape)}",
+ )
except Exception as e:
- bt.logging.warning(f"Failed to save model with error: {e}")
+ bt.logging.warning(f"Failed to save diversity model with error: {e}")
+
+ # empty cache
+ torch.cuda.empty_cache()
def load_state(self):
@@ -227,8 +241,9 @@ def load_state(self):
try:
state_dict = torch.load(f"{self.config.neuron.full_path}/model.torch")
# Check for nans in saved state dict
- if not torch.isnan(state_dict["neuron_weights"]).any():
- self.moving_averaged_scores = state_dict["neuron_weights"].clone().detach()
+ neuron_weights = torch.tensor(state_dict["neuron_weights"])
+ if not torch.isnan(neuron_weights).any():
+ self.moving_averaged_scores = neuron_weights.to(self.device)
self.hotkeys = state_dict["neuron_hotkeys"]
bt.logging.success(
prefix="Reloaded model",
@@ -236,3 +251,15 @@ def load_state(self):
)
except Exception as e:
bt.logging.warning(f"Failed to load model with error: {e}")
+
+ try:
+ # Load diversity model.
+ diversity_model_file_path = f"{self.config.neuron.full_path}/diversity_model.pth"
+ diversity_model_dict = torch.load(diversity_model_file_path)
+ self.diversity_model.historic_embeddings = diversity_model_dict["historic_embeddings"].to(self.device)
+ bt.logging.success(
+ prefix="Reloaded diversity model",
+ sufix=f"{diversity_model_file_path} {list(self.diversity_model.historic_embeddings.shape)}",
+ )
+ except Exception as e:
+ bt.logging.warning(f"Failed to load diversity model with error: {e}")