From f82e9213ad20ed8c3c46f9243d98b5c00e4361e7 Mon Sep 17 00:00:00 2001 From: Jakub Date: Fri, 13 Sep 2019 11:54:47 +0200 Subject: [PATCH] fixed naming and docstrings (#52) * bumped versions * fixed varname --- docs/conf.py | 2 +- neptunecontrib/monitoring/fairness.py | 12 ++++++------ setup.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 2955ada..ba80a64 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -49,7 +49,7 @@ # The short X.Y version version = '0.13' # The full version, including alpha/beta/rc tags -release = '0.13.1' +release = '0.13.2' # -- General configuration --------------------------------------------------- diff --git a/neptunecontrib/monitoring/fairness.py b/neptunecontrib/monitoring/fairness.py index f36a98e..40995c5 100644 --- a/neptunecontrib/monitoring/fairness.py +++ b/neptunecontrib/monitoring/fairness.py @@ -24,7 +24,7 @@ from neptunecontrib.monitoring.utils import send_figure -def log_fairness_classification_metrics(y_true, y_pred_class, sensitive, +def log_fairness_classification_metrics(y_true, y_pred_class, sensitive_attributes, favorable_label, unfavorable_label, privileged_groups, unprivileged_groups, experiment=None, prefix=''): @@ -47,7 +47,7 @@ def log_fairness_classification_metrics(y_true, y_pred_class, sensitive, Args: y_true (array-like, shape (n_samples)): Ground truth (correct) target values. y_pred_class (array-like, shape (n_samples)): Class predictions with values 0 or 1. - sensitive (pandas.DataFrame, shape (n_samples, k)): datafame containing only sensitive columns. + sensitive_attributes (pandas.DataFrame, shape (n_samples, k)): datafame containing only sensitive columns. favorable_label (str or int): label that is favorable, brings positive value to a person being classified. unfavorable_label (str or int): label that is unfavorable, brings positive value to a person being classified. privileged_groups (dict): dictionary with column names and list of values for those columns that @@ -66,7 +66,7 @@ def log_fairness_classification_metrics(y_true, y_pred_class, sensitive, neptune.init() with neptune.create_experiment(): - log_fairness_classification_metrics(y_test, y_test_pred_class, test['race'], + log_fairness_classification_metrics(y_test, y_test_pred_class, test[['race']], favorable_label='granted_parole', unfavorable_label='not_granted_parole', privileged_groups={'race':['Caucasian']}, @@ -80,15 +80,15 @@ def log_fairness_classification_metrics(y_true, y_pred_class, sensitive, bias_info = {'favorable_label': favorable_label, 'unfavorable_label': unfavorable_label, - 'protected_columns': sensitive.columns.tolist()} + 'protected_columns': sensitive_attributes.columns.tolist()} privileged_info = _fmt_priveleged_info(privileged_groups, unprivileged_groups) data = pd.DataFrame() data['ground_truth'] = y_true.values data['prediction'] = y_pred_class.values - for col in sensitive.columns: - data[col] = sensitive[col].values + for col in sensitive_attributes.columns: + data[col] = sensitive_attributes[col].values ground_truth_test = _make_dataset(data, 'ground_truth', **bias_info, **privileged_info) prediction_test = _make_dataset(data, 'prediction', **bias_info, **privileged_info) diff --git a/setup.py b/setup.py index 055f94b..216380b 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ def main(): setup( name='neptune-contrib', - version='0.13.1', + version='0.13.2', description='Neptune Python library contributions', author='neptune.ml', author_email='contact@neptune.ml',