Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Require Python 3.11, update ruff and pyproject.toml #678

Merged
merged 4 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12" ]
python-version: ["3.11", "3.12" ]
os: [ ubuntu-latest ]
include:
- python-version: "3.12"
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.1
rev: v0.8.2
hooks:
- id: ruff
args: [ --fix ]
Expand Down
45 changes: 22 additions & 23 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ authors = [
{ name = "Equinor ASA", email = "[email protected]" },
]
description = "Forward models and workflows for Ert."
requires-python = ">=3.8"
requires-python = ">=3.11"
readme = "README.md"
license = { text = "GPL-3.0" }
classifiers = [
Expand All @@ -18,17 +18,15 @@ classifiers = [
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
]
dynamic = ["version"]
dependencies = [
"ert>=11.1.0-b3",
"cvxpy",
"ert>=12.0.0rc0",
"fmu-ensemble>1.6.5",
"importlib_metadata",
"importlib_resources",
Expand All @@ -42,26 +40,25 @@ dependencies = [
"segyio",
"xlrd",
"xtgeo>=2.15",
"cvxpy",
]

[project.urls]
repository = "https://github.com/equinor/semeio"

[project.entry-points."ert"]
semeio_forward_models = "semeio.hook_implementations.forward_models"
CsvExport2Job = "semeio.workflows.csv_export2.csv_export2"
AhmAnalysisJob = "semeio.workflows.ahm_analysis.ahmanalysis"
CsvExport2Job = "semeio.workflows.csv_export2.csv_export2"
semeio_forward_models = "semeio.hook_implementations.forward_models"

[project.entry-points."console_scripts"]
csv_export2 = "semeio.workflows.csv_export2.csv_export2:cli"
overburden_timeshift = "semeio.forward_models.scripts.overburden_timeshift:main_entry_point"
design2params = "semeio.forward_models.scripts.design2params:main_entry_point"
gendata_rft = "semeio.forward_models.scripts.gendata_rft:main_entry_point"
design_kw = "semeio.forward_models.scripts.design_kw:main_entry_point"
fm_pyscal = "semeio.forward_models.scripts.fm_pyscal:main_entry_point"
replace_string = "semeio.forward_models.scripts.replace_string:main_entry_point"
fmudesign = "semeio.fmudesign.fmudesignrunner:main"
gendata_rft = "semeio.forward_models.scripts.gendata_rft:main_entry_point"
overburden_timeshift = "semeio.forward_models.scripts.overburden_timeshift:main_entry_point"
replace_string = "semeio.forward_models.scripts.replace_string:main_entry_point"

[tool.setuptools_scm]
write_to = "src/semeio/version.py"
Expand Down Expand Up @@ -90,16 +87,16 @@ style = [
types = [
"mypy",
"pandas-stubs",
"types-setuptools",
"types-PyYAML",
"types-openpyxl",
"types-setuptools",
]

[tool.pytest.ini_options]
addopts = "-ra --strict-markers"
markers = [
"ert_integration: Involves running the ert application explicitly",
"equinor_test: Requires presences of test data in specified location",
"ert_integration: Involves running the ert application explicitly",
"integration_test: Not a unit test",
]

Expand All @@ -110,20 +107,22 @@ extend-exclude = ["tests/legacy_test_data"]

[tool.ruff.lint]
select = [
"W", # pycodestyle
"I", # isort
"B", # flake-8-bugbear
"SIM", # flake-8-simplify
"F", # pyflakes
"C4", # flake8-comprehensions
"PL", # pylint
"F", # pyflakes
"I", # isort
"NPY", # numpy specific rules
"PL", # pylint
"SIM", # flake-8-simplify
"UP", # pyupgrade
"W", # pycodestyle
]
ignore = ["PLW2901", # redefined-loop-name
"PLR2004", # magic-value-comparison
"PLR0915", # too-many-statements
"PLR0912", # too-many-branches
"PLR0911", # too-many-return-statements
ignore = [
"PLR0911", # too-many-return-statements
"PLR0912", # too-many-branches
"PLR0915", # too-many-statements
"PLR2004", # magic-value-comparison
"PLW2901", # redefined-loop-name
]

[tool.ruff.lint.pylint]
Expand Down
5 changes: 2 additions & 3 deletions src/semeio/_docs_utils/_json_schema_2_rst.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from copy import deepcopy
from typing import List, Optional, Union


def _insert_ref(schema: dict, defs: dict) -> dict:
Expand Down Expand Up @@ -66,8 +65,8 @@ def _create_docs(schema: dict) -> str:


def _make_documentation(
schema: Union[list, dict, str],
required: Optional[List[str]] = None,
schema: list | dict | str,
required: list[str] | None = None,
level: int = 0,
preface: str = "",
element_seperator: str = "\n\n",
Expand Down
74 changes: 33 additions & 41 deletions src/semeio/fmudesign/_excel2dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,7 @@ def _find_geninput_sheetname(input_filename):

if len(general_input_sheet) > 1:
raise ValueError(
"More than one sheet with general input. Sheetnames are {} ".format(
general_input_sheet
)
f"More than one sheet with general input. Sheetnames are {general_input_sheet} "
)

if not general_input_sheet:
Expand Down Expand Up @@ -120,9 +118,7 @@ def _find_onebyone_defaults_sheet(input_filename):
default_values_sheet.append(sheet)
if len(default_values_sheet) > 1:
raise ValueError(
"More than one sheet with default values. Sheetnames are {} ".format(
default_values_sheet
)
f"More than one sheet with default values. Sheetnames are {default_values_sheet} "
)

if len(default_values_sheet) == []:
Expand Down Expand Up @@ -175,9 +171,9 @@ def _check_designinput(dsgn_input):
if _has_value(row.sensname):
if row.sensname in sensitivity_names:
raise ValueError(
"sensname '{}' was found on more than one row in designinput "
f"sensname '{row.sensname}' was found on more than one row in designinput "
"sheet. Two sensitivities can not share the same sensname. "
"Please correct this and rerun".format(row.sensname)
"Please correct this and rerun"
)
sensitivity_names.append(row.sensname)

Expand All @@ -189,10 +185,10 @@ def _check_for_mixed_sensitivities(sens_name, sens_group):
types = sens_group.groupby("type", sort=False)
if len(types) > 1:
raise ValueError(
"The sensitivity with sensname '{}' in designinput sheet contains more "
f"The sensitivity with sensname '{sens_name}' in designinput sheet contains more "
"than one sensitivity type. For each sensname all parameters must be "
"specified using the same type (seed, scenario, dist, ref, background, "
"extern)".format(sens_name)
"extern)"
)


Expand Down Expand Up @@ -393,11 +389,9 @@ def _read_defaultvalues(filename, sheetname):
for row in default_df.itertuples():
if str(row[0]) in default_dict:
print(
"WARNING: The default value '{}' "
"is listed twice in the sheet '{}'. "
"Only the first entry will be used in output file".format(
row[0], sheetname
)
f"WARNING: The default value '{row[0]}' "
f"is listed twice in the sheet '{sheetname}'. "
"Only the first entry will be used in output file"
)
else:
default_dict[str(row[0])] = row[1]
Expand Down Expand Up @@ -432,9 +426,9 @@ def _read_dependencies(filename, sheetname, from_parameter):
depend_dict["to_params"][key] = depend_df[key].tolist()
else:
raise ValueError(
"Parameter {} specified to have derived parameters, "
"but the sheet specifying the dependencies {} does "
"not contain the input parameter. ".format(from_parameter, sheetname)
f"Parameter {from_parameter} specified to have derived parameters, "
f"but the sheet specifying the dependencies {sheetname} does "
"not contain the input parameter. "
)
return depend_dict

Expand Down Expand Up @@ -479,25 +473,25 @@ def _read_background(inp_filename, bck_sheet):
)
if not _has_value(row.dist_param1):
raise ValueError(
"Parameter {} has been input "
f"Parameter {row.param_name} has been input "
"in background sheet but with empty "
"first distribution parameter ".format(row.param_name)
"first distribution parameter "
)
if not _has_value(row.dist_param2) and _has_value(row.dist_param3):
raise ValueError(
"Parameter {} has been input in "
f"Parameter {row.param_name} has been input in "
"background sheet with "
'value for "dist_param3" while '
'"dist_param2" is empty. This is not '
"allowed".format(row.param_name)
"allowed"
)
if not _has_value(row.dist_param3) and _has_value(row.dist_param4):
raise ValueError(
"Parameter {} has been input in "
f"Parameter {row.param_name} has been input in "
"background sheet with "
'value for "dist_param4" while '
'"dist_param3" is empty. This is not '
"allowed".format(row.param_name)
"allowed"
)
distparams = [
item
Expand Down Expand Up @@ -545,15 +539,15 @@ def _read_scenario_sensitivity(sensgroup):
for row in sensgroup.itertuples():
if not _has_value(row.param_name):
raise ValueError(
"Scenario sensitivity {} specified "
f"Scenario sensitivity {row.sensname} specified "
"where one line has empty parameter "
"name ".format(row.sensname)
"name "
)
if not _has_value(row.value1):
raise ValueError(
"Parameter {} har been input "
f"Parameter {row.param_name} har been input "
'as type "scenario" but with empty '
"value in value1 column ".format(row.param_name)
"value in value1 column "
)
casedict1[str(row.param_name)] = row.value1

Expand Down Expand Up @@ -596,19 +590,17 @@ def _read_constants(sensgroup):
for row in sensgroup.itertuples():
if not _has_value(row.dist_param1):
raise ValueError(
"Parameter name {} has been input "
f"Parameter name {row.param_name} has been input "
'in a sensitivity of type "seed". \n'
"If {} was meant to be the name of "
f"If {row.param_name} was meant to be the name of "
"the seed parameter, this is "
"unfortunately not allowed. "
"The seed parameter name is standardised "
"to RMS_SEED and should not be specified.\n "
"If you instead meant to specify a constant "
"value for another parameter in the seed "
'sensitivity, please remember "const" in '
'dist_name and a value in "dist_param1". '.format(
row.param_name, row.param_name
)
'dist_name and a value in "dist_param1". '
)
distparams = row.dist_param1
paramdict[str(row.param_name)] = [str(row.dist_name), distparams]
Expand All @@ -631,29 +623,29 @@ def _read_dist_sensitivity(sensgroup):
for row in sensgroup.itertuples():
if not _has_value(row.param_name):
raise ValueError(
"Dist sensitivity {} specified "
f"Dist sensitivity {row.sensname} specified "
"where one line has empty parameter "
"name ".format(row.sensname)
"name "
)
if not _has_value(row.dist_param1):
raise ValueError(
"Parameter {} has been input "
f"Parameter {row.param_name} has been input "
'as type "dist" but with empty '
"first distribution parameter ".format(row.param_name)
"first distribution parameter "
)
if not _has_value(row.dist_param2) and _has_value(row.dist_param3):
raise ValueError(
"Parameter {} has been input with "
f"Parameter {row.param_name} has been input with "
'value for "dist_param3" while '
'"dist_param2" is empty. This is not '
"allowed".format(row.param_name)
"allowed"
)
if not _has_value(row.dist_param3) and _has_value(row.dist_param4):
raise ValueError(
"Parameter {} has been input with "
f"Parameter {row.param_name} has been input with "
'value for "dist_param4" while '
'"dist_param3" is empty. This is not '
"allowed".format(row.param_name)
"allowed"
)
distparams = [
item
Expand Down
4 changes: 2 additions & 2 deletions src/semeio/fmudesign/_tornado_onebyone.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def calc_tornadoinput(
avg1 = 0
print(
"Warning: Number of ok realizations is 0 in"
"sensitivity {} case1".format(sensname)
f"sensitivity {sensname} case1"
)

if designsummary.loc[sensno]["senstype"] == "mc":
Expand Down Expand Up @@ -227,7 +227,7 @@ def calc_tornadoinput(
avg2 = 0
print(
"Warning: Number of ok realizations is 0 in"
"sensitivity {} case2".format(sensname)
f"sensitivity {sensname} case2"
)
subset2name = designsummary.loc[sensno]["casename2"]
else:
Expand Down
Loading
Loading