Skip to content

Commit

Permalink
Exception for providing Run instead of Experiment (#127)
Browse files Browse the repository at this point in the history
  • Loading branch information
Raalsky authored Jul 6, 2021
1 parent da73ee7 commit 1c38958
Show file tree
Hide file tree
Showing 12 changed files with 112 additions and 3 deletions.
37 changes: 37 additions & 0 deletions neptunecontrib/monitoring/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from neptune.exceptions import NeptuneException, STYLES


class NeptuneLegacyIncompatibilityException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneLegacyIncompatibilityException----------------------------------------
{end}
It seems you are passing a Run object, to a legacy integration which expects Experiment object.
What can I do?
- Update your code to use the updated integration:
https://docs.neptune.ai/integrations-and-supported-tools/intro
- If you prefer to use the legacy integration, you can find their examples how to use theme here:
https://docs-legacy.neptune.ai/integrations/index.html
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
inputs = dict(list({}.items()) + list(STYLES.items()))
super().__init__(message.format(**inputs))
3 changes: 2 additions & 1 deletion neptunecontrib/monitoring/fairness.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import matplotlib.pyplot as plt
import seaborn as sns

from neptunecontrib.monitoring.utils import send_figure
from neptunecontrib.monitoring.utils import send_figure, expect_not_a_run


def log_fairness_classification_metrics(y_true, y_pred_class, y_pred_score, sensitive_attributes,
Expand Down Expand Up @@ -78,6 +78,7 @@ def log_fairness_classification_metrics(y_true, y_pred_class, y_pred_score, sens
"""
_exp = experiment if experiment else neptune
expect_not_a_run(_exp)

bias_info = {'favorable_label': favorable_label,
'unfavorable_label': unfavorable_label,
Expand Down
5 changes: 5 additions & 0 deletions neptunecontrib/monitoring/fastai.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

import neptune

from neptunecontrib.monitoring.utils import expect_not_a_run

if sys.version_info[0] == 3 and sys.version_info[1] >= 6:
from fastai.basic_train import LearnerCallback
else:
Expand Down Expand Up @@ -79,6 +81,9 @@ class NeptuneMonitor(LearnerCallback):
def __init__(self, learn=None, experiment=None, prefix=''):
self._exp = experiment if experiment else neptune
self._prefix = prefix

expect_not_a_run(self._exp)

if learn is not None:
super().__init__(learn)

Expand Down
4 changes: 4 additions & 0 deletions neptunecontrib/monitoring/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
pip install tensorflow"""
raise ModuleNotFoundError(msg) # pylint:disable=undefined-variable

from neptunecontrib.monitoring.utils import expect_not_a_run


class NeptuneMonitor(Callback):
"""Logs Keras metrics to Neptune.
Expand Down Expand Up @@ -90,6 +92,8 @@ def __init__(self, experiment=None, prefix=''):
self._exp = experiment if experiment else neptune
self._prefix = prefix

expect_not_a_run(self._exp)

def _log_metrics(self, logs, trigger):
if not logs:
return
Expand Down
4 changes: 4 additions & 0 deletions neptunecontrib/monitoring/kerastuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

from kerastuner.engine.logger import Logger

from neptunecontrib.monitoring.utils import expect_not_a_run


class NeptuneLogger(Logger):
"""Logs hyperparameter optimization process to Neptune.
Expand Down Expand Up @@ -58,6 +60,8 @@ class NeptuneLogger(Logger):
def __init__(self, experiment=None):
self.exp = experiment if experiment else neptune

expect_not_a_run(self.exp)

def report_trial_state(self, trial_id, trial_state):
"""Gives the logger information about trial status."""

Expand Down
3 changes: 3 additions & 0 deletions neptunecontrib/monitoring/lightgbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

import neptune

from neptunecontrib.monitoring.utils import expect_not_a_run


def neptune_monitor(experiment=None, prefix=''):
"""Logs lightGBM learning curves to Neptune.
Expand Down Expand Up @@ -78,6 +80,7 @@ def neptune_monitor(experiment=None, prefix=''):
"""

_exp = experiment if experiment else neptune
expect_not_a_run(_exp)

def callback(env):
for name, loss_name, loss_value, _ in env.evaluation_result_list:
Expand Down
28 changes: 27 additions & 1 deletion neptunecontrib/monitoring/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#
import matplotlib.pyplot as plt
import neptune
from neptunecontrib.monitoring.utils import send_figure
from neptunecontrib.monitoring.utils import send_figure, expect_not_a_run
import numpy as np
import pandas as pd
import scikitplot.metrics as plt_metrics
Expand Down Expand Up @@ -78,6 +78,8 @@ def log_binary_classification_metrics(y_true, y_pred, threshold=0.5, experiment=

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

log_confusion_matrix(y_true, y_pred[:, 1] > threshold, experiment=_exp, prefix=prefix)
log_classification_report(y_true, y_pred[:, 1] > threshold, experiment=_exp, prefix=prefix)
log_class_metrics(y_true, y_pred[:, 1] > threshold, experiment=_exp, prefix=prefix)
Expand Down Expand Up @@ -134,6 +136,8 @@ def log_confusion_matrix(y_true, y_pred_class, experiment=None, channel_name='me

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

fig, ax = plt.subplots()
_plot_confusion_matrix(y_true, y_pred_class, ax=ax)
send_figure(fig, channel_name=prefix + channel_name, experiment=_exp)
Expand Down Expand Up @@ -182,6 +186,8 @@ def log_classification_report(y_true, y_pred_class, experiment=None, channel_nam

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

fig = _plot_classification_report(y_true, y_pred_class)
send_figure(fig, channel_name=prefix + channel_name, experiment=_exp)
plt.close()
Expand Down Expand Up @@ -232,6 +238,8 @@ def log_class_metrics(y_true, y_pred_class, experiment=None, prefix=''):

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

scores = _class_metrics(y_true, y_pred_class)
for metric_name, score in scores.items():
_exp.log_metric(prefix + metric_name, score)
Expand Down Expand Up @@ -283,6 +291,8 @@ def log_class_metrics_by_threshold(y_true, y_pred_pos, experiment=None, channel_

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

figs = _plot_class_metrics_by_threshold(y_true, y_pred_pos)

for fig in figs:
Expand Down Expand Up @@ -332,6 +342,8 @@ def log_roc_auc(y_true, y_pred, experiment=None, channel_name='metric_charts', p

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

roc_auc = sk_metrics.roc_auc_score(y_true, y_pred[:, 1])
_exp.log_metric(prefix + 'roc_auc', roc_auc)

Expand Down Expand Up @@ -383,6 +395,8 @@ def log_precision_recall_auc(y_true, y_pred, experiment=None, channel_name='metr

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

avg_precision = sk_metrics.average_precision_score(y_true, y_pred[:, 1])
_exp.log_metric(prefix + 'avg_precision', avg_precision)

Expand Down Expand Up @@ -433,6 +447,8 @@ def log_brier_loss(y_true, y_pred_pos, experiment=None, prefix=''):

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

brier = sk_metrics.brier_score_loss(y_true, y_pred_pos)
_exp.log_metric(prefix + 'brier_loss', brier)

Expand Down Expand Up @@ -478,6 +494,8 @@ def log_log_loss(y_true, y_pred, experiment=None, prefix=''):

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

log_loss = sk_metrics.log_loss(y_true, y_pred)
_exp.log_metric(prefix + 'log_loss', log_loss)

Expand Down Expand Up @@ -528,6 +546,8 @@ def log_ks_statistic(y_true, y_pred, experiment=None, channel_name='metric_chart

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

res = binary_ks_curve(y_true, y_pred[:, 1])
ks_stat = res[3]
_exp.log_metric(prefix + 'ks_statistic', ks_stat)
Expand Down Expand Up @@ -580,6 +600,8 @@ def log_cumulative_gain(y_true, y_pred, experiment=None, channel_name='metric_ch

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

fig, ax = plt.subplots()
plt_metrics.plot_cumulative_gain(y_true, y_pred, ax=ax)
send_figure(fig, channel_name=prefix + channel_name, experiment=_exp)
Expand Down Expand Up @@ -628,6 +650,8 @@ def log_lift_curve(y_true, y_pred, experiment=None, channel_name='metric_charts'

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

fig, ax = plt.subplots()
plt_metrics.plot_lift_curve(y_true, y_pred, ax=ax)
send_figure(fig, channel_name=prefix + channel_name, experiment=_exp)
Expand Down Expand Up @@ -670,6 +694,8 @@ def log_prediction_distribution(y_true, y_pred_pos, experiment=None, channel_nam

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

fig, ax = plt.subplots()
_plot_prediction_distribution(y_true, y_pred_pos, ax=ax)
send_figure(fig, channel_name=prefix + channel_name, experiment=_exp)
Expand Down
5 changes: 5 additions & 0 deletions neptunecontrib/monitoring/optuna.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import neptune

from neptunecontrib.api import log_chart, pickle_and_log_artifact
from neptunecontrib.monitoring.utils import expect_not_a_run


class NeptuneCallback:
Expand Down Expand Up @@ -77,6 +78,8 @@ def __init__(self, experiment=None,
self.exp = experiment if experiment else neptune
self.log_study = log_study

expect_not_a_run(self.exp)

if log_charts:

message = """log_charts argument is depraceted and will be removed in future releases.
Expand Down Expand Up @@ -170,6 +173,8 @@ def log_study_info(study, experiment=None,

_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

_exp.log_metric('best_score', study.best_value)
_exp.set_property('best_parameters', study.best_params)

Expand Down
3 changes: 3 additions & 0 deletions neptunecontrib/monitoring/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@

from neptunecontrib.api.table import log_csv
from neptunecontrib.api.utils import log_pickle
from neptunecontrib.monitoring.utils import expect_not_a_run


def log_regressor_summary(regressor, X_train, X_test, y_train, y_test,
Expand Down Expand Up @@ -1228,6 +1229,8 @@ def log_silhouette_chart(model, X, experiment=None, **kwargs):

def _validate_experiment(experiment):
if experiment is not None:
expect_not_a_run(experiment)

if not isinstance(experiment, neptune.experiments.Experiment):
ValueError('Passed experiment is not Neptune experiment. Create one by using "create_experiment()"')
else:
Expand Down
13 changes: 12 additions & 1 deletion neptunecontrib/monitoring/skopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import skopt.plots as sk_plots
from skopt.utils import dump

from neptunecontrib.monitoring.utils import axes2fig
from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run


class NeptuneCallback:
Expand Down Expand Up @@ -55,6 +55,9 @@ class NeptuneCallback:

def __init__(self, experiment=None, log_checkpoint=True):
self._exp = experiment if experiment else neptune

expect_not_a_run(self._exp)

self.log_checkpoint = log_checkpoint
self._iteration = 0

Expand Down Expand Up @@ -114,6 +117,8 @@ def log_results(results, experiment=None, log_plots=True, log_pickle=True):
"""
_exp = experiment if experiment else neptune

expect_not_a_run(_exp)

_log_best_score(results, _exp)
_log_best_parameters(results, _exp)

Expand All @@ -135,6 +140,7 @@ def NeptuneMonitor(*args, **kwargs):


def _log_best_parameters(results, experiment):
expect_not_a_run(experiment)
named_params = ([(dimension.name, param) for dimension, param in zip(results.space, results.x)])
experiment.set_property('best_parameters', str(named_params))

Expand All @@ -144,25 +150,29 @@ def _log_best_score(results, experiment):


def _log_plot_convergence(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_convergence(results, ax=ax)
experiment.log_image(name, fig)


def _log_plot_regret(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_regret(results, ax=ax)
experiment.log_image(name, fig)


def _log_plot_evaluations(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig)
experiment.log_image(name, fig)


def _log_plot_objective(results, experiment, name='diagnostics'):
try:
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_objective(results), fig=fig)
experiment.log_image(name, fig)
Expand All @@ -171,6 +181,7 @@ def _log_plot_objective(results, experiment, name='diagnostics'):


def _log_results_object(results, experiment=None):
expect_not_a_run(experiment)
experiment.log_artifact(_export_results_object(results), 'results.pkl')


Expand Down
6 changes: 6 additions & 0 deletions neptunecontrib/monitoring/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import neptune

from neptunecontrib.api import pickle_and_log_artifact
from neptunecontrib.monitoring.exceptions import NeptuneLegacyIncompatibilityException


def axes2fig(axes, fig=None):
Expand Down Expand Up @@ -83,3 +84,8 @@ def pickle_and_send_artifact(obj, filename, experiment=None):
warnings.warn(message)

pickle_and_log_artifact(obj, filename, experiment)


def expect_not_a_run(experiment):
if type(experiment).__name__ == 'Run':
raise NeptuneLegacyIncompatibilityException()
Loading

0 comments on commit 1c38958

Please sign in to comment.