From e15e401bfa959b8a92a268586bdd17f06e65b4e2 Mon Sep 17 00:00:00 2001 From: Hubert Jaworski Date: Fri, 19 Feb 2021 09:58:22 +0100 Subject: [PATCH] Allow user to disable visualizations (#125) * Allow user to disable visualizations * Review changes --- neptunecontrib/monitoring/sklearn.py | 49 +++++++++++++++++++++------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/neptunecontrib/monitoring/sklearn.py b/neptunecontrib/monitoring/sklearn.py index 65a8b56..c1e4ca0 100644 --- a/neptunecontrib/monitoring/sklearn.py +++ b/neptunecontrib/monitoring/sklearn.py @@ -33,7 +33,7 @@ def log_regressor_summary(regressor, X_train, X_test, y_train, y_test, - model_name=None, nrows=1000, experiment=None): + model_name=None, nrows=1000, experiment=None, log_charts=True): """Log sklearn regressor summary. This method automatically logs all regressor parameters, pickled estimator (model), @@ -65,6 +65,17 @@ def log_regressor_summary(regressor, X_train, X_test, y_train, y_test, experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``): | Neptune ``Experiment`` object to control to which experiment you log the data. | If ``None``, log to currently active, and most recent experiment. + log_charts (:bool:, optional, default is ``True``): + | If True, calculate and send chart visualizations. + | + | NOTE: calculating visualizations is potentially expensive depending on input data and regressor, and + | may take some time to finish. + | + | This is equivalent to calling log_learning_curve_chart, log_feature_importance_chart, + | log_residuals_chart, log_prediction_error_chart, log_cooks_distance_chart functions from this module. + | + | If not all visualizations are needed, it's recommended to set this parameter to ``False`` and call + | only the desired log functions Returns: ``None`` @@ -94,15 +105,16 @@ def log_regressor_summary(regressor, X_train, X_test, y_train, y_test, log_scores(regressor, X_test, y_test, y_pred=y_pred, name='test', experiment=exp) # visualizations - log_learning_curve_chart(regressor, X_train, y_train, experiment=exp) - log_feature_importance_chart(regressor, X_train, y_train, experiment=exp) - log_residuals_chart(regressor, X_train, X_test, y_train, y_test, experiment=exp) - log_prediction_error_chart(regressor, X_train, X_test, y_train, y_test, experiment=exp) - log_cooks_distance_chart(regressor, X_train, y_train, experiment=exp) + if log_charts: + log_learning_curve_chart(regressor, X_train, y_train, experiment=exp) + log_feature_importance_chart(regressor, X_train, y_train, experiment=exp) + log_residuals_chart(regressor, X_train, X_test, y_train, y_test, experiment=exp) + log_prediction_error_chart(regressor, X_train, X_test, y_train, y_test, experiment=exp) + log_cooks_distance_chart(regressor, X_train, y_train, experiment=exp) def log_classifier_summary(classifier, X_train, X_test, y_train, y_test, - model_name=None, nrows=1000, experiment=None): + model_name=None, nrows=1000, experiment=None, log_charts=True): """Log sklearn classifier summary. This method automatically logs all classifier parameters, pickled estimator (model), @@ -134,6 +146,18 @@ def log_classifier_summary(classifier, X_train, X_test, y_train, y_test, experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``): | Neptune ``Experiment`` object to control to which experiment you log the data. | If ``None``, log to currently active, and most recent experiment. + log_charts (:bool:, optional, default is ``True``): + | If True, calculate and send chart visualizations. + | + | NOTE: calculating visualizations is potentially expensive depending on input data and classifier, and + | may take some time to finish. + | + | This is equivalent to calling log_classification_report_chart, log_confusion_matrix_chart, + | log_roc_auc_chart, log_precision_recall_chart, log_class_prediction_error_chart functions from this + | module. + | + | If not all visualizations are needed, it's recommended to set this parameter to ``False`` and call + | only the desired log functions Returns: ``None`` @@ -164,11 +188,12 @@ def log_classifier_summary(classifier, X_train, X_test, y_train, y_test, log_scores(classifier, X_test, y_test, y_pred=y_pred, name='test', experiment=exp) # visualizations - log_classification_report_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) - log_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) - log_roc_auc_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) - log_precision_recall_chart(classifier, X_test, y_test, experiment=exp) - log_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) + if log_charts: + log_classification_report_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) + log_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) + log_roc_auc_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) + log_precision_recall_chart(classifier, X_test, y_test, experiment=exp) + log_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp) def log_estimator_params(estimator, experiment=None):