Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated Latest Dependency Updates #4459

Merged
merged 6 commits into from
Aug 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions docs/source/user_guide/automl.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -266,9 +266,6 @@
"metadata": {},
"outputs": [],
"source": [
"from evalml.pipelines import MulticlassClassificationPipeline\n",
"\n",
"\n",
"automl_custom = evalml.automl.AutoMLSearch(\n",
" X_train=X_train,\n",
" y_train=y_train,\n",
Expand Down Expand Up @@ -368,6 +365,11 @@
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logger = logging.getLogger(__name__)\n",
"\n",
"\n",
"# error_callback example; this is implemented in the evalml library\n",
"def raise_error_callback(exception, traceback, automl, **kwargs):\n",
" \"\"\"Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error.\"\"\"\n",
Expand Down
1 change: 0 additions & 1 deletion docs/source/user_guide/data_check_actions.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
"outputs": [],
"source": [
"import woodwork as ww\n",
"import pandas as pd\n",
"from evalml import AutoMLSearch\n",
"from evalml.demos import load_fraud\n",
"from evalml.preprocessing import split_data"
Expand Down
2 changes: 1 addition & 1 deletion docs/source/user_guide/data_checks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@
"metadata": {},
"outputs": [],
"source": [
"from evalml.data_checks import NoVarianceDataCheck, DataCheckWarning\n",
"from evalml.data_checks import NoVarianceDataCheck, DataCheckError\n",
"\n",
"X = pd.DataFrame(\n",
" {\n",
Expand Down
2 changes: 0 additions & 2 deletions docs/source/user_guide/model_understanding.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -928,8 +928,6 @@
"metadata": {},
"outputs": [],
"source": [
"import shap\n",
"\n",
"from evalml.model_understanding.force_plots import graph_force_plot\n",
"\n",
"rows_to_explain = [0] # Should be a list of integer indices of the rows to explain.\n",
Expand Down
2 changes: 0 additions & 2 deletions docs/source/user_guide/pipelines.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -586,9 +586,7 @@
"source": [
"from evalml.pipelines.utils import generate_pipeline_code\n",
"from evalml.pipelines import BinaryClassificationPipeline\n",
"import pandas as pd\n",
"from evalml.utils import infer_feature_types\n",
"from skopt.space import Integer\n",
"\n",
"\n",
"class MyDropNullColumns(Transformer):\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/source/user_guide/timeseries.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@
"outputs": [],
"source": [
"stl = STLDecomposer()\n",
"assert stl.period == None\n",
"assert stl.period is None\n",
"stl.fit(X_stl, y_stl)\n",
"print(stl.period)"
]
Expand Down
22 changes: 11 additions & 11 deletions evalml/tests/dependency_update_check/latest_dependency_versions.txt
Original file line number Diff line number Diff line change
@@ -1,30 +1,30 @@
black==24.4.2
black==24.8.0
catboost==1.2.5
category-encoders==2.5.1.post0
click==8.1.7
cloudpickle==3.0.0
colorama==0.4.6
dask==2024.6.2
dask-expr==1.1.6
distributed==2024.6.2
dask==2024.8.0
dask-expr==1.1.10
distributed==2024.8.0
featuretools==1.31.0
graphviz==0.20.3
holidays==0.52
holidays==0.55
imbalanced-learn==0.12.3
ipywidgets==8.1.3
ipywidgets==8.1.5
kaleido==0.2.1
lightgbm==4.4.0
lightgbm==4.5.0
lime==0.2.0.1
matplotlib==3.9.1
matplotlib==3.9.2
matplotlib-inline==0.1.7
networkx==3.2.1
nlp-primitives==2.13.0
numpy==1.26.4
packaging==24.1
pandas==2.0.3
plotly==5.22.0
plotly==5.23.0
pmdarima==2.0.4
pyzmq==26.0.3
pyzmq==26.2.0
scikit-learn==1.4.2
scikit-optimize==0.10.2
scipy==1.13.1
Expand All @@ -35,4 +35,4 @@ statsmodels==0.14.2
texttable==1.7.0
tomli==2.0.1
woodwork==0.31.0
xgboost==2.1.0
xgboost==2.1.1
Original file line number Diff line number Diff line change
Expand Up @@ -2881,11 +2881,14 @@
index="index",
make_index=True,
)
X_fm, features = ft.dfs(
entityset=es,
target_dataframe_name="X",
trans_primitives=["LSA"],
)
try:
X_fm, features = ft.dfs(

Check warning on line 2885 in evalml/tests/model_understanding_tests/test_partial_dependence.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_partial_dependence.py#L2884-L2885

Added lines #L2884 - L2885 were not covered by tests
entityset=es,
target_dataframe_name="X",
trans_primitives=["LSA"],
)
except ValueError:
pytest.xfail("NLTK bug within nlp-primitives")

Check warning on line 2891 in evalml/tests/model_understanding_tests/test_partial_dependence.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/model_understanding_tests/test_partial_dependence.py#L2890-L2891

Added lines #L2890 - L2891 were not covered by tests

dfs_transformer = DFSTransformer(features=features)
pipeline = RegressionPipeline(
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ dependencies = [

[project.optional-dependencies]
test = [
"pytest >= 7.1.2",
"pytest >= 7.1.2, < 8.3.0",
"pytest-xdist >= 2.1.0",
"pytest-timeout >= 1.4.2",
"pytest-cov >= 2.10.1",
Expand Down Expand Up @@ -171,7 +171,7 @@ src = ["evalml"]
"evalml/tests/**" = ["D"]
"evalml/utils/**" = ["D"]
".github/**" = ["D"]
"docs/**" = ["D"]
"docs/**" = ["D", "I001", "E402", "F811", "F401"]

[tool.ruff.lint.isort]
known-first-party = ["evalml"]
Expand Down
Loading