Skip to content

Commit

Permalink
pytest warnings fix
Browse files Browse the repository at this point in the history
  • Loading branch information
MichaelFu512 committed May 22, 2024
1 parent 14854eb commit e807ba8
Show file tree
Hide file tree
Showing 9 changed files with 30 additions and 24 deletions.
6 changes: 3 additions & 3 deletions evalml/tests/automl_tests/test_automl.py
Original file line number Diff line number Diff line change
Expand Up @@ -3846,13 +3846,13 @@ def test_score_batch_before_fitting_yields_error_nan_scores(
def test_high_cv_check_no_warning_for_divide_by_zero(X_y_binary, dummy_binary_pipeline):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary")
with pytest.warns(None) as warnings:
with warnings.catch_warnings(record=True) as automl_warnings:
# mean is 0 but std is not
automl._check_for_high_variance(
dummy_binary_pipeline,
cv_scores=[0.0, 1.0, -1.0],
)
assert len(warnings) == 0
assert len(automl_warnings) == 0


@pytest.mark.parametrize(
Expand Down Expand Up @@ -4367,7 +4367,7 @@ def dummy_mock_get_preprocessing_components(*args, **kwargs):
mock_get_preprocessing_components.side_effect = (
dummy_mock_get_preprocessing_components
)
with pytest.warns(None) as warnings_logged:
with warnings.catch_warnings(record=True) as warnings_logged:
automl = AutoMLSearch(
X_train=X,
y_train=y,
Expand Down
6 changes: 3 additions & 3 deletions evalml/tests/automl_tests/test_pipeline_search_plots.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import warnings
from unittest.mock import MagicMock, patch

import pandas as pd
import pytest

from evalml.automl.pipeline_search_plots import SearchIterationPlot

Expand Down Expand Up @@ -53,12 +53,12 @@ def test_jupyter(import_check, jupyter_check):
mock_data = MagicMock()

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
SearchIterationPlot(mock_data.results, mock_data.objective)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)

jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
SearchIterationPlot(mock_data.results, mock_data.objective)
assert len(graph_valid) == 0
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings

Check warning on line 1 in evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py#L1

Added line #L1 was not covered by tests
from itertools import product
from unittest.mock import patch

Expand Down Expand Up @@ -79,7 +80,7 @@ def test_force_plot_binary(
else:
# Code chunk to test where initjs is called if jupyter is recognized
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 83 in evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py#L83

Added line #L83 was not covered by tests
results = graph_force_plot(
pipeline,
rows_to_explain=rows_to_explain,
Expand All @@ -88,11 +89,11 @@ def test_force_plot_binary(
matplotlib=False,
)
assert not initjs.called
warnings = set([str(gv) for gv in graph_valid.list])
assert all(["DeprecationWarning" in w for w in warnings])
warnings_deprecated = set([str(gv) for gv in graph_valid.list])
assert all(["DeprecationWarning" in w for w in warnings_deprecated])

Check warning on line 93 in evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py#L92-L93

Added lines #L92 - L93 were not covered by tests

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 96 in evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py#L96

Added line #L96 was not covered by tests
results = graph_force_plot(
pipeline,
rows_to_explain=rows_to_explain,
Expand All @@ -101,8 +102,8 @@ def test_force_plot_binary(
matplotlib=False,
)
assert initjs.called
warnings = set([str(gv) for gv in graph_valid.list])
assert all(["DeprecationWarning" in w for w in warnings])
warnings_deprecated = set([str(gv) for gv in graph_valid.list])
assert all(["DeprecationWarning" in w for w in warnings_deprecated])

Check warning on line 106 in evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py#L105-L106

Added lines #L105 - L106 were not covered by tests

# Should have a result per row to explain.
assert len(results) == len(rows_to_explain)
Expand Down
8 changes: 4 additions & 4 deletions evalml/tests/model_understanding_tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,22 +611,22 @@ def test_jupyter_graph_check(
y = y.ww.iloc[:20]
logistic_regression_binary_pipeline.fit(X, y)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 614 in evalml/tests/model_understanding_tests/test_metrics.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_metrics.py#L614

Added line #L614 was not covered by tests
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 619 in evalml/tests/model_understanding_tests/test_metrics.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_metrics.py#L619

Added line #L619 was not covered by tests
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_precision_recall_curve(y, y_pred_proba)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 625 in evalml/tests/model_understanding_tests/test_metrics.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_metrics.py#L625

Added line #L625 was not covered by tests
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 629 in evalml/tests/model_understanding_tests/test_metrics.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_metrics.py#L629

Added line #L629 was not covered by tests
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_roc_curve(y, y_pred_proba)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import collections
import re
import warnings

Check warning on line 3 in evalml/tests/model_understanding_tests/test_partial_dependence.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_partial_dependence.py#L3

Added line #L3 was not covered by tests
from unittest.mock import patch

import featuretools as ft
Expand Down Expand Up @@ -2465,7 +2466,7 @@ def test_partial_dependence_jupyter_graph_check(
logistic_regression_binary_pipeline.fit(X, y)

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 2469 in evalml/tests/model_understanding_tests/test_partial_dependence.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_partial_dependence.py#L2469

Added line #L2469 was not covered by tests
graph_partial_dependence(
logistic_regression_binary_pipeline,
X,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings

Check warning on line 1 in evalml/tests/model_understanding_tests/test_permutation_importance.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_permutation_importance.py#L1

Added line #L1 was not covered by tests
from unittest.mock import PropertyMock, patch

import numpy as np
Expand Down Expand Up @@ -923,7 +924,7 @@ def test_jupyter_graph_check(
y = y.ww.iloc[:20]
logistic_regression_binary_pipeline.fit(X, y)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 927 in evalml/tests/model_understanding_tests/test_permutation_importance.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_permutation_importance.py#L927

Added line #L927 was not covered by tests
graph_permutation_importance(
logistic_regression_binary_pipeline,
X,
Expand All @@ -933,7 +934,7 @@ def test_jupyter_graph_check(
assert len(graph_valid) == 0

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 937 in evalml/tests/model_understanding_tests/test_permutation_importance.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_permutation_importance.py#L937

Added line #L937 was not covered by tests
graph_permutation_importance(
logistic_regression_binary_pipeline,
X,
Expand Down
5 changes: 3 additions & 2 deletions evalml/tests/model_understanding_tests/test_visualizations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import warnings

Check warning on line 2 in evalml/tests/model_understanding_tests/test_visualizations.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_visualizations.py#L2

Added line #L2 was not covered by tests
from collections import OrderedDict
from unittest.mock import patch

Expand Down Expand Up @@ -236,7 +237,7 @@ def test_jupyter_graph_check(
false_negative=-2,
)
jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 240 in evalml/tests/model_understanding_tests/test_visualizations.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_visualizations.py#L240

Added line #L240 was not covered by tests
graph_binary_objective_vs_threshold(
logistic_regression_binary_pipeline,
X,
Expand All @@ -248,7 +249,7 @@ def test_jupyter_graph_check(
import_check.assert_called_with("ipywidgets", warning=True)

Xr, yr = X_y_regression
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 252 in evalml/tests/model_understanding_tests/test_visualizations.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_visualizations.py#L252

Added line #L252 was not covered by tests
rs = get_random_state(42)
y_preds = yr * rs.random(yr.shape)
graph_prediction_vs_actual(yr, y_preds)
Expand Down
3 changes: 2 additions & 1 deletion evalml/tests/objective_tests/test_standard_metrics.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from itertools import product

import numpy as np
Expand Down Expand Up @@ -708,7 +709,7 @@ def test_mse_linear_model():
def test_mcc_catches_warnings():
y_true = [1, 0, 1, 1]
y_predicted = [0, 0, 0, 0]
with pytest.warns(None) as record:
with warnings.catch_warnings(record=True) as record:
MCCBinary().objective_function(y_true, y_predicted)
MCCMulticlass().objective_function(y_true, y_predicted)
assert len(record) == 0
Expand Down
5 changes: 3 additions & 2 deletions evalml/tests/pipeline_tests/test_graphs.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import warnings
from unittest.mock import patch

import numpy as np
Expand Down Expand Up @@ -157,12 +158,12 @@ def test_jupyter_graph_check(import_check, jupyter_check, X_y_binary, test_pipel
clf = test_pipeline
clf.fit(X, y)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
clf.graph_feature_importance()
assert len(graph_valid) == 0

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
clf.graph_feature_importance()
import_check.assert_called_with("ipywidgets", warning=True)

Expand Down

0 comments on commit e807ba8

Please sign in to comment.