From f1fec0d63a01bd010824e8d82d6c4bd7177cbffc Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 18 Apr 2024 11:09:54 +0200 Subject: [PATCH 01/50] Make name column a mixin --- ixmp4/data/db/iamc/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ixmp4/data/db/iamc/base.py b/ixmp4/data/db/iamc/base.py index 98b56581..65ec5567 100644 --- a/ixmp4/data/db/iamc/base.py +++ b/ixmp4/data/db/iamc/base.py @@ -7,6 +7,7 @@ Deleter, Enumerator, Lister, + NameMixin, Retriever, Selecter, Tabulator, From 66ebd6a58b8cf18d90ec1c8e4114091a2fa91617 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 18 Apr 2024 11:11:00 +0200 Subject: [PATCH 02/50] Make optimization columns mixins * Covers: * run__id, data, name, uniqueness of name together with run__id * Adapts tests since default order of columns changes --- ixmp4/data/db/optimization/base.py | 3 +++ ixmp4/data/db/optimization/indexset/model.py | 2 +- ixmp4/data/db/optimization/scalar/model.py | 2 +- ixmp4/data/db/optimization/table/model.py | 7 ++++++- tests/core/test_optimization_table.py | 6 ++++++ tests/data/test_optimization_table.py | 6 ++++++ 6 files changed, 23 insertions(+), 3 deletions(-) diff --git a/ixmp4/data/db/optimization/base.py b/ixmp4/data/db/optimization/base.py index 7c3cc084..924a12e8 100644 --- a/ixmp4/data/db/optimization/base.py +++ b/ixmp4/data/db/optimization/base.py @@ -12,7 +12,10 @@ Deleter, Enumerator, Lister, + OptimizationDataMixin, + OptimizationNameMixin, Retriever, + RunIDMixin, Selecter, Tabulator, ) diff --git a/ixmp4/data/db/optimization/indexset/model.py b/ixmp4/data/db/optimization/indexset/model.py index 896692a4..f7d3f4a7 100644 --- a/ixmp4/data/db/optimization/indexset/model.py +++ b/ixmp4/data/db/optimization/indexset/model.py @@ -10,7 +10,7 @@ from .. import base -class IndexSet(base.BaseModel): +class IndexSet(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.IndexSet.NotFound NotUnique: ClassVar = abstract.IndexSet.NotUnique DataInvalid: ClassVar = OptimizationDataValidationError diff --git a/ixmp4/data/db/optimization/scalar/model.py b/ixmp4/data/db/optimization/scalar/model.py index c364f807..2f719468 100644 --- a/ixmp4/data/db/optimization/scalar/model.py +++ b/ixmp4/data/db/optimization/scalar/model.py @@ -8,7 +8,7 @@ from .. import base -class Scalar(base.BaseModel): +class Scalar(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.Scalar.NotFound NotUnique: ClassVar = abstract.Scalar.NotUnique DeletionPrevented: ClassVar = abstract.Scalar.DeletionPrevented diff --git a/ixmp4/data/db/optimization/table/model.py b/ixmp4/data/db/optimization/table/model.py index ea99cd11..352bd5cb 100644 --- a/ixmp4/data/db/optimization/table/model.py +++ b/ixmp4/data/db/optimization/table/model.py @@ -11,7 +11,12 @@ from .. import Column, base, utils -class Table(base.BaseModel): +class Table( + base.BaseModel, + base.OptimizationDataMixin, + base.RunIDMixin, + base.UniqueNameRunIDMixin, +): # NOTE: These might be mixin-able, but would require some abstraction NotFound: ClassVar = abstract.Table.NotFound NotUnique: ClassVar = abstract.Table.NotUnique diff --git a/tests/core/test_optimization_table.py b/tests/core/test_optimization_table.py index 57110950..9c7d320d 100644 --- a/tests/core/test_optimization_table.py +++ b/tests/core/test_optimization_table.py @@ -21,8 +21,11 @@ def df_from_list(tables: list[Table]): table.data, table.name, table.id, + table.name, table.created_at, table.created_by, + table.data, + table.run_id, ] for table in tables ], @@ -31,8 +34,11 @@ def df_from_list(tables: list[Table]): "data", "name", "id", + "name", "created_at", "created_by", + "data", + "run__id", ], ) diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index f4643da7..ee084b76 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -19,8 +19,11 @@ def df_from_list(tables: list): table.data, table.name, table.id, + table.name, table.created_at, table.created_by, + table.data, + table.run__id, ] for table in tables ], @@ -29,8 +32,11 @@ def df_from_list(tables: list): "data", "name", "id", + "name", "created_at", "created_by", + "data", + "run__id", ], ) From 2c67afd5c899f26fea769ab56832b8db03a3bfcb Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 18 Apr 2024 11:46:49 +0200 Subject: [PATCH 03/50] Inherit mixin requirements directly --- ixmp4/data/db/optimization/indexset/model.py | 2 +- ixmp4/data/db/optimization/scalar/model.py | 2 +- ixmp4/data/db/optimization/table/model.py | 1 - tests/core/test_optimization_table.py | 4 ++-- tests/data/test_optimization_table.py | 4 ++-- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ixmp4/data/db/optimization/indexset/model.py b/ixmp4/data/db/optimization/indexset/model.py index f7d3f4a7..9bb0f41d 100644 --- a/ixmp4/data/db/optimization/indexset/model.py +++ b/ixmp4/data/db/optimization/indexset/model.py @@ -10,7 +10,7 @@ from .. import base -class IndexSet(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): +class IndexSet(base.BaseModel, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.IndexSet.NotFound NotUnique: ClassVar = abstract.IndexSet.NotUnique DataInvalid: ClassVar = OptimizationDataValidationError diff --git a/ixmp4/data/db/optimization/scalar/model.py b/ixmp4/data/db/optimization/scalar/model.py index 2f719468..3dac531e 100644 --- a/ixmp4/data/db/optimization/scalar/model.py +++ b/ixmp4/data/db/optimization/scalar/model.py @@ -8,7 +8,7 @@ from .. import base -class Scalar(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): +class Scalar(base.BaseModel, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.Scalar.NotFound NotUnique: ClassVar = abstract.Scalar.NotUnique DeletionPrevented: ClassVar = abstract.Scalar.DeletionPrevented diff --git a/ixmp4/data/db/optimization/table/model.py b/ixmp4/data/db/optimization/table/model.py index 352bd5cb..654f5dea 100644 --- a/ixmp4/data/db/optimization/table/model.py +++ b/ixmp4/data/db/optimization/table/model.py @@ -14,7 +14,6 @@ class Table( base.BaseModel, base.OptimizationDataMixin, - base.RunIDMixin, base.UniqueNameRunIDMixin, ): # NOTE: These might be mixin-able, but would require some abstraction diff --git a/tests/core/test_optimization_table.py b/tests/core/test_optimization_table.py index 9c7d320d..af804046 100644 --- a/tests/core/test_optimization_table.py +++ b/tests/core/test_optimization_table.py @@ -21,10 +21,10 @@ def df_from_list(tables: list[Table]): table.data, table.name, table.id, + table.data, table.name, table.created_at, table.created_by, - table.data, table.run_id, ] for table in tables @@ -34,10 +34,10 @@ def df_from_list(tables: list[Table]): "data", "name", "id", + "data", "name", "created_at", "created_by", - "data", "run__id", ], ) diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index ee084b76..5806f3f4 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -19,10 +19,10 @@ def df_from_list(tables: list): table.data, table.name, table.id, + table.data, table.name, table.created_at, table.created_by, - table.data, table.run__id, ] for table in tables @@ -32,10 +32,10 @@ def df_from_list(tables: list): "data", "name", "id", + "data", "name", "created_at", "created_by", - "data", "run__id", ], ) From d358641334cedca4f02eab6070e4749aadd1919f Mon Sep 17 00:00:00 2001 From: Fridolin Glatter <83776373+glatterf42@users.noreply.github.com> Date: Fri, 28 Jun 2024 10:45:38 +0200 Subject: [PATCH 04/50] Include optimization parameter basis (#79) * Make Column generic enough for multiple parents * Introduce optimization.Parameter * Add tests for add_data * Enable remaining parameter tests (#86) * Enable remaining parameter tests * Include optimization parameter api layer (#89) * Bump several dependency versions * Let api/column handle both tables and parameters * Make api-layer tests pass * Include optimization parameter core layer (#90) * Enable parameter core layer and test it * Fix things after rebase * Ensure all intended changes survive the rebase * Adapt data validation function for parameters * Allow tests to pass again --- ixmp4/data/db/iamc/base.py | 1 - ixmp4/data/db/optimization/base.py | 3 --- ixmp4/data/db/optimization/indexset/model.py | 2 +- ixmp4/data/db/optimization/scalar/model.py | 2 +- ixmp4/data/db/optimization/table/model.py | 6 +----- tests/core/test_optimization_table.py | 6 ------ tests/data/test_optimization_table.py | 6 ------ 7 files changed, 3 insertions(+), 23 deletions(-) diff --git a/ixmp4/data/db/iamc/base.py b/ixmp4/data/db/iamc/base.py index 65ec5567..98b56581 100644 --- a/ixmp4/data/db/iamc/base.py +++ b/ixmp4/data/db/iamc/base.py @@ -7,7 +7,6 @@ Deleter, Enumerator, Lister, - NameMixin, Retriever, Selecter, Tabulator, diff --git a/ixmp4/data/db/optimization/base.py b/ixmp4/data/db/optimization/base.py index 924a12e8..7c3cc084 100644 --- a/ixmp4/data/db/optimization/base.py +++ b/ixmp4/data/db/optimization/base.py @@ -12,10 +12,7 @@ Deleter, Enumerator, Lister, - OptimizationDataMixin, - OptimizationNameMixin, Retriever, - RunIDMixin, Selecter, Tabulator, ) diff --git a/ixmp4/data/db/optimization/indexset/model.py b/ixmp4/data/db/optimization/indexset/model.py index 9bb0f41d..896692a4 100644 --- a/ixmp4/data/db/optimization/indexset/model.py +++ b/ixmp4/data/db/optimization/indexset/model.py @@ -10,7 +10,7 @@ from .. import base -class IndexSet(base.BaseModel, base.UniqueNameRunIDMixin): +class IndexSet(base.BaseModel): NotFound: ClassVar = abstract.IndexSet.NotFound NotUnique: ClassVar = abstract.IndexSet.NotUnique DataInvalid: ClassVar = OptimizationDataValidationError diff --git a/ixmp4/data/db/optimization/scalar/model.py b/ixmp4/data/db/optimization/scalar/model.py index 3dac531e..c364f807 100644 --- a/ixmp4/data/db/optimization/scalar/model.py +++ b/ixmp4/data/db/optimization/scalar/model.py @@ -8,7 +8,7 @@ from .. import base -class Scalar(base.BaseModel, base.UniqueNameRunIDMixin): +class Scalar(base.BaseModel): NotFound: ClassVar = abstract.Scalar.NotFound NotUnique: ClassVar = abstract.Scalar.NotUnique DeletionPrevented: ClassVar = abstract.Scalar.DeletionPrevented diff --git a/ixmp4/data/db/optimization/table/model.py b/ixmp4/data/db/optimization/table/model.py index 654f5dea..ea99cd11 100644 --- a/ixmp4/data/db/optimization/table/model.py +++ b/ixmp4/data/db/optimization/table/model.py @@ -11,11 +11,7 @@ from .. import Column, base, utils -class Table( - base.BaseModel, - base.OptimizationDataMixin, - base.UniqueNameRunIDMixin, -): +class Table(base.BaseModel): # NOTE: These might be mixin-able, but would require some abstraction NotFound: ClassVar = abstract.Table.NotFound NotUnique: ClassVar = abstract.Table.NotUnique diff --git a/tests/core/test_optimization_table.py b/tests/core/test_optimization_table.py index af804046..57110950 100644 --- a/tests/core/test_optimization_table.py +++ b/tests/core/test_optimization_table.py @@ -21,11 +21,8 @@ def df_from_list(tables: list[Table]): table.data, table.name, table.id, - table.data, - table.name, table.created_at, table.created_by, - table.run_id, ] for table in tables ], @@ -34,11 +31,8 @@ def df_from_list(tables: list[Table]): "data", "name", "id", - "data", - "name", "created_at", "created_by", - "run__id", ], ) diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index 5806f3f4..f4643da7 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -19,11 +19,8 @@ def df_from_list(tables: list): table.data, table.name, table.id, - table.data, - table.name, table.created_at, table.created_by, - table.run__id, ] for table in tables ], @@ -32,11 +29,8 @@ def df_from_list(tables: list): "data", "name", "id", - "data", - "name", "created_at", "created_by", - "run__id", ], ) From 53fb371a941d0fac82ea4c777f8737361330e059 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Tue, 2 Jul 2024 13:17:41 +0200 Subject: [PATCH 05/50] Introduce optimization.Variable --- ixmp4/core/__init__.py | 2 +- tests/core/test_optimizationvariable.py | 318 ++++++++++++++++++++++++ 2 files changed, 319 insertions(+), 1 deletion(-) create mode 100644 tests/core/test_optimizationvariable.py diff --git a/ixmp4/core/__init__.py b/ixmp4/core/__init__.py index a86c4ce2..da879fe8 100644 --- a/ixmp4/core/__init__.py +++ b/ixmp4/core/__init__.py @@ -3,9 +3,9 @@ from .model import Model as Model from .optimization.equation import Equation as Equation from .optimization.indexset import IndexSet as IndexSet +from .optimization.parameter import Parameter as Parameter from .optimization.scalar import Scalar as Scalar from .optimization.table import Table as Table -from .optimization.parameter import Parameter as Parameter # TODO Is this really the name we want to use? from .optimization.variable import Variable as OptimizationVariable diff --git a/tests/core/test_optimizationvariable.py b/tests/core/test_optimizationvariable.py new file mode 100644 index 00000000..6a174d18 --- /dev/null +++ b/tests/core/test_optimizationvariable.py @@ -0,0 +1,318 @@ +import pandas as pd +import pytest + +from ixmp4 import Platform +from ixmp4.core import OptimizationVariable + +from ..utils import all_platforms + + +def df_from_list(variables: list): + return pd.DataFrame( + [ + [ + variable.run_id, + variable.data, + variable.name, + variable.id, + variable.created_at, + variable.created_by, + ] + for variable in variables + ], + columns=[ + "run__id", + "data", + "name", + "id", + "created_at", + "created_by", + ], + ) + + +@all_platforms +class TestDataOptimizationVariable: + def test_create_variable(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + + # Test normal creation + indexset_1 = run.optimization.indexsets.create("Indexset") + variable = run.optimization.variables.create( + name="Variable", + constrained_to_indexsets=["Indexset"], + ) + + assert variable.run_id == run.id + assert variable.name == "Variable" + assert variable.data == {} # JsonDict type currently requires a dict, not None + assert variable.columns[0].name == "Indexset" + assert variable.constrained_to_indexsets == [indexset_1.name] + assert variable.levels == [] + assert variable.marginals == [] + + # Test duplicate name raises + with pytest.raises(OptimizationVariable.NotUnique): + _ = run.optimization.variables.create( + "Variable", constrained_to_indexsets=["Indexset"] + ) + + # Test mismatch in constrained_to_indexsets and column_names raises + with pytest.raises(ValueError, match="not equal in length"): + _ = run.optimization.variables.create( + "Variable 2", + constrained_to_indexsets=["Indexset"], + column_names=["Dimension 1", "Dimension 2"], + ) + + # Test columns_names are used for names if given + variable_2 = run.optimization.variables.create( + "Variable 2", + constrained_to_indexsets=[indexset_1.name], + column_names=["Column 1"], + ) + assert variable_2.columns[0].name == "Column 1" + + # Test duplicate column_names raise + with pytest.raises(ValueError, match="`column_names` are not unique"): + _ = run.optimization.variables.create( + name="Variable 3", + constrained_to_indexsets=[indexset_1.name, indexset_1.name], + column_names=["Column 1", "Column 1"], + ) + + # Test column.dtype is registered correctly + indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset_2.add(elements=2024) + variable_3 = run.optimization.variables.create( + "Variable 5", + constrained_to_indexsets=["Indexset", indexset_2.name], + ) + # If indexset doesn't have elements, a generic dtype is registered + assert variable_3.columns[0].dtype == "object" + assert variable_3.columns[1].dtype == "int64" + + def test_get_variable(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + _ = run.optimization.variables.create( + name="Variable", constrained_to_indexsets=["Indexset"] + ) + variable = run.optimization.variables.get(name="Variable") + assert variable.run_id == run.id + assert variable.id == 1 + assert variable.name == "Variable" + assert variable.data == {} + assert variable.levels == [] + assert variable.marginals == [] + assert variable.columns[0].name == indexset.name + assert variable.constrained_to_indexsets == [indexset.name] + + with pytest.raises(OptimizationVariable.NotFound): + _ = run.optimization.variables.get("Variable 2") + + def test_variable_add_data(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset_1 = run.optimization.indexsets.create("Indexset") + indexset_1.add(elements=["foo", "bar", ""]) + indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset_2.add(elements=[1, 2, 3]) + # pandas can only convert dicts to dataframes if the values are lists + # or if index is given. But maybe using read_json instead of from_dict + # can remedy this. Or maybe we want to catch the resulting + # "ValueError: If using all scalar values, you must pass an index" and + # reraise a custom informative error? + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "levels": [3.14], + "marginals": [0.000314], + } + variable = run.optimization.variables.create( + "Variable", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + variable.add(data=test_data_1) + assert variable.data == test_data_1 + assert variable.levels == test_data_1["levels"] + assert variable.marginals == test_data_1["marginals"] + + variable_2 = run.optimization.variables.create( + name="Variable 2", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): marginals!" + ): + variable_2.add( + pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "levels": [1], + } + ), + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): levels!" + ): + variable_2.add( + data=pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "marginals": [0], + } + ), + ) + + # By converting data to pd.DataFrame, we automatically enforce equal length + # of new columns, raises All arrays must be of the same length otherwise: + with pytest.raises(ValueError, match="All arrays must be of the same length"): + variable_2.add( + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "levels": [1, 2], + "marginals": [3], + }, + ) + + with pytest.raises(ValueError, match="contains duplicate rows"): + variable_2.add( + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "levels": [1, 2], + "marginals": [3.4, 5.6], + }, + ) + + # Test that order is conserved + test_data_2 = { + "Indexset": ["", "", "foo", "foo", "bar", "bar"], + "Indexset 2": [3, 1, 2, 1, 2, 3], + "levels": [6, 5, 4, 3, 2, 1], + "marginals": [1, 3, 5, 6, 4, 2], + } + variable_2.add(test_data_2) + assert variable_2.data == test_data_2 + assert variable_2.levels == test_data_2["levels"] + assert variable_2.marginals == test_data_2["marginals"] + + # Test order is conserved with varying types and upon later addition of data + variable_3 = run.optimization.variables.create( + name="Variable 3", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + column_names=["Column 1", "Column 2"], + ) + + test_data_3 = { + "Column 1": ["bar", "foo", ""], + "Column 2": [2, 3, 1], + "levels": [3, 2.0, 1], + "marginals": [100000, 1, 0.00001], + } + variable_3.add(data=test_data_3) + assert variable_3.data == test_data_3 + assert variable_3.levels == test_data_3["levels"] + assert variable_3.marginals == test_data_3["marginals"] + + test_data_4 = { + "Column 1": ["foo", "", "bar"], + "Column 2": [2, 3, 1], + "levels": [3.14, 2, 1.0], + "marginals": [1, 0.00001, 100000], + } + variable_3.add(data=test_data_4) + test_data_5 = test_data_3.copy() + for key, value in test_data_4.items(): + test_data_5[key].extend(value) + assert variable_3.data == test_data_5 + assert variable_3.levels == test_data_5["levels"] + assert variable_3.marginals == test_data_5["marginals"] + + def test_list_variable(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + # Per default, list() lists scalars for `default` version runs: + run.set_as_default() + _ = run.optimization.indexsets.create("Indexset") + _ = run.optimization.indexsets.create("Indexset 2") + variable = run.optimization.variables.create( + "Variable", constrained_to_indexsets=["Indexset"] + ) + variable_2 = run.optimization.variables.create( + "Variable 2", constrained_to_indexsets=["Indexset 2"] + ) + expected_ids = [variable.id, variable_2.id] + list_ids = [variable.id for variable in run.optimization.variables.list()] + assert not (set(expected_ids) ^ set(list_ids)) + + # Test retrieving just one result by providing a name + expected_id = [variable.id] + list_id = [ + variable.id for variable in run.optimization.variables.list(name="Variable") + ] + assert not (set(expected_id) ^ set(list_id)) + + def test_tabulate_variable(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + # Per default, tabulate() lists scalars for `default` version runs: + run.set_as_default() + indexset = run.optimization.indexsets.create("Indexset") + indexset_2 = run.optimization.indexsets.create("Indexset 2") + variable = run.optimization.variables.create( + name="Variable", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + variable_2 = run.optimization.variables.create( + name="Variable 2", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + pd.testing.assert_frame_equal( + df_from_list([variable_2]), + run.optimization.variables.tabulate(name="Variable 2"), + ) + + indexset.add(elements=["foo", "bar"]) + indexset_2.add(elements=[1, 2, 3]) + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "levels": [314], + "marginals": [2.0], + } + variable.add(data=test_data_1) + + test_data_2 = { + "Indexset 2": [2, 3], + "Indexset": ["foo", "bar"], + "levels": [1, -2.0], + "marginals": [0, 10], + } + variable_2.add(data=test_data_2) + pd.testing.assert_frame_equal( + df_from_list([variable, variable_2]), + run.optimization.variables.tabulate(), + ) + + def test_variable_docs(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + variable_1 = run.optimization.variables.create( + "Variable 1", constrained_to_indexsets=[indexset.name] + ) + docs = "Documentation of Variable 1" + variable_1.docs = docs + assert variable_1.docs == docs + + variable_1.docs = None + assert variable_1.docs is None From c5dfe302dca3d025491ba13543800f44ded80413 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 3 Jul 2024 13:50:07 +0200 Subject: [PATCH 06/50] Streamline naming in tests --- tests/core/test_optimizationvariable.py | 318 ------------------------ 1 file changed, 318 deletions(-) delete mode 100644 tests/core/test_optimizationvariable.py diff --git a/tests/core/test_optimizationvariable.py b/tests/core/test_optimizationvariable.py deleted file mode 100644 index 6a174d18..00000000 --- a/tests/core/test_optimizationvariable.py +++ /dev/null @@ -1,318 +0,0 @@ -import pandas as pd -import pytest - -from ixmp4 import Platform -from ixmp4.core import OptimizationVariable - -from ..utils import all_platforms - - -def df_from_list(variables: list): - return pd.DataFrame( - [ - [ - variable.run_id, - variable.data, - variable.name, - variable.id, - variable.created_at, - variable.created_by, - ] - for variable in variables - ], - columns=[ - "run__id", - "data", - "name", - "id", - "created_at", - "created_by", - ], - ) - - -@all_platforms -class TestDataOptimizationVariable: - def test_create_variable(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - - # Test normal creation - indexset_1 = run.optimization.indexsets.create("Indexset") - variable = run.optimization.variables.create( - name="Variable", - constrained_to_indexsets=["Indexset"], - ) - - assert variable.run_id == run.id - assert variable.name == "Variable" - assert variable.data == {} # JsonDict type currently requires a dict, not None - assert variable.columns[0].name == "Indexset" - assert variable.constrained_to_indexsets == [indexset_1.name] - assert variable.levels == [] - assert variable.marginals == [] - - # Test duplicate name raises - with pytest.raises(OptimizationVariable.NotUnique): - _ = run.optimization.variables.create( - "Variable", constrained_to_indexsets=["Indexset"] - ) - - # Test mismatch in constrained_to_indexsets and column_names raises - with pytest.raises(ValueError, match="not equal in length"): - _ = run.optimization.variables.create( - "Variable 2", - constrained_to_indexsets=["Indexset"], - column_names=["Dimension 1", "Dimension 2"], - ) - - # Test columns_names are used for names if given - variable_2 = run.optimization.variables.create( - "Variable 2", - constrained_to_indexsets=[indexset_1.name], - column_names=["Column 1"], - ) - assert variable_2.columns[0].name == "Column 1" - - # Test duplicate column_names raise - with pytest.raises(ValueError, match="`column_names` are not unique"): - _ = run.optimization.variables.create( - name="Variable 3", - constrained_to_indexsets=[indexset_1.name, indexset_1.name], - column_names=["Column 1", "Column 1"], - ) - - # Test column.dtype is registered correctly - indexset_2 = run.optimization.indexsets.create("Indexset 2") - indexset_2.add(elements=2024) - variable_3 = run.optimization.variables.create( - "Variable 5", - constrained_to_indexsets=["Indexset", indexset_2.name], - ) - # If indexset doesn't have elements, a generic dtype is registered - assert variable_3.columns[0].dtype == "object" - assert variable_3.columns[1].dtype == "int64" - - def test_get_variable(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") - _ = run.optimization.variables.create( - name="Variable", constrained_to_indexsets=["Indexset"] - ) - variable = run.optimization.variables.get(name="Variable") - assert variable.run_id == run.id - assert variable.id == 1 - assert variable.name == "Variable" - assert variable.data == {} - assert variable.levels == [] - assert variable.marginals == [] - assert variable.columns[0].name == indexset.name - assert variable.constrained_to_indexsets == [indexset.name] - - with pytest.raises(OptimizationVariable.NotFound): - _ = run.optimization.variables.get("Variable 2") - - def test_variable_add_data(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - indexset_1 = run.optimization.indexsets.create("Indexset") - indexset_1.add(elements=["foo", "bar", ""]) - indexset_2 = run.optimization.indexsets.create("Indexset 2") - indexset_2.add(elements=[1, 2, 3]) - # pandas can only convert dicts to dataframes if the values are lists - # or if index is given. But maybe using read_json instead of from_dict - # can remedy this. Or maybe we want to catch the resulting - # "ValueError: If using all scalar values, you must pass an index" and - # reraise a custom informative error? - test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], - "levels": [3.14], - "marginals": [0.000314], - } - variable = run.optimization.variables.create( - "Variable", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], - ) - variable.add(data=test_data_1) - assert variable.data == test_data_1 - assert variable.levels == test_data_1["levels"] - assert variable.marginals == test_data_1["marginals"] - - variable_2 = run.optimization.variables.create( - name="Variable 2", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], - ) - - with pytest.raises( - AssertionError, match=r"must include the column\(s\): marginals!" - ): - variable_2.add( - pd.DataFrame( - { - "Indexset": [None], - "Indexset 2": [2], - "levels": [1], - } - ), - ) - - with pytest.raises( - AssertionError, match=r"must include the column\(s\): levels!" - ): - variable_2.add( - data=pd.DataFrame( - { - "Indexset": [None], - "Indexset 2": [2], - "marginals": [0], - } - ), - ) - - # By converting data to pd.DataFrame, we automatically enforce equal length - # of new columns, raises All arrays must be of the same length otherwise: - with pytest.raises(ValueError, match="All arrays must be of the same length"): - variable_2.add( - data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], - "levels": [1, 2], - "marginals": [3], - }, - ) - - with pytest.raises(ValueError, match="contains duplicate rows"): - variable_2.add( - data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], - "levels": [1, 2], - "marginals": [3.4, 5.6], - }, - ) - - # Test that order is conserved - test_data_2 = { - "Indexset": ["", "", "foo", "foo", "bar", "bar"], - "Indexset 2": [3, 1, 2, 1, 2, 3], - "levels": [6, 5, 4, 3, 2, 1], - "marginals": [1, 3, 5, 6, 4, 2], - } - variable_2.add(test_data_2) - assert variable_2.data == test_data_2 - assert variable_2.levels == test_data_2["levels"] - assert variable_2.marginals == test_data_2["marginals"] - - # Test order is conserved with varying types and upon later addition of data - variable_3 = run.optimization.variables.create( - name="Variable 3", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], - column_names=["Column 1", "Column 2"], - ) - - test_data_3 = { - "Column 1": ["bar", "foo", ""], - "Column 2": [2, 3, 1], - "levels": [3, 2.0, 1], - "marginals": [100000, 1, 0.00001], - } - variable_3.add(data=test_data_3) - assert variable_3.data == test_data_3 - assert variable_3.levels == test_data_3["levels"] - assert variable_3.marginals == test_data_3["marginals"] - - test_data_4 = { - "Column 1": ["foo", "", "bar"], - "Column 2": [2, 3, 1], - "levels": [3.14, 2, 1.0], - "marginals": [1, 0.00001, 100000], - } - variable_3.add(data=test_data_4) - test_data_5 = test_data_3.copy() - for key, value in test_data_4.items(): - test_data_5[key].extend(value) - assert variable_3.data == test_data_5 - assert variable_3.levels == test_data_5["levels"] - assert variable_3.marginals == test_data_5["marginals"] - - def test_list_variable(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - # Per default, list() lists scalars for `default` version runs: - run.set_as_default() - _ = run.optimization.indexsets.create("Indexset") - _ = run.optimization.indexsets.create("Indexset 2") - variable = run.optimization.variables.create( - "Variable", constrained_to_indexsets=["Indexset"] - ) - variable_2 = run.optimization.variables.create( - "Variable 2", constrained_to_indexsets=["Indexset 2"] - ) - expected_ids = [variable.id, variable_2.id] - list_ids = [variable.id for variable in run.optimization.variables.list()] - assert not (set(expected_ids) ^ set(list_ids)) - - # Test retrieving just one result by providing a name - expected_id = [variable.id] - list_id = [ - variable.id for variable in run.optimization.variables.list(name="Variable") - ] - assert not (set(expected_id) ^ set(list_id)) - - def test_tabulate_variable(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - # Per default, tabulate() lists scalars for `default` version runs: - run.set_as_default() - indexset = run.optimization.indexsets.create("Indexset") - indexset_2 = run.optimization.indexsets.create("Indexset 2") - variable = run.optimization.variables.create( - name="Variable", - constrained_to_indexsets=["Indexset", "Indexset 2"], - ) - variable_2 = run.optimization.variables.create( - name="Variable 2", - constrained_to_indexsets=["Indexset", "Indexset 2"], - ) - pd.testing.assert_frame_equal( - df_from_list([variable_2]), - run.optimization.variables.tabulate(name="Variable 2"), - ) - - indexset.add(elements=["foo", "bar"]) - indexset_2.add(elements=[1, 2, 3]) - test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], - "levels": [314], - "marginals": [2.0], - } - variable.add(data=test_data_1) - - test_data_2 = { - "Indexset 2": [2, 3], - "Indexset": ["foo", "bar"], - "levels": [1, -2.0], - "marginals": [0, 10], - } - variable_2.add(data=test_data_2) - pd.testing.assert_frame_equal( - df_from_list([variable, variable_2]), - run.optimization.variables.tabulate(), - ) - - def test_variable_docs(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") - variable_1 = run.optimization.variables.create( - "Variable 1", constrained_to_indexsets=[indexset.name] - ) - docs = "Documentation of Variable 1" - variable_1.docs = docs - assert variable_1.docs == docs - - variable_1.docs = None - assert variable_1.docs is None From 28fd7f368e50f049bc613baca9f5fd65b3eea8a8 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 5 Jul 2024 10:29:36 +0200 Subject: [PATCH 07/50] Make constrained_to_indexset optional for scalar Variables --- tests/data/test_optimization_variable.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/data/test_optimization_variable.py b/tests/data/test_optimization_variable.py index ff8f3012..74166e4a 100644 --- a/tests/data/test_optimization_variable.py +++ b/tests/data/test_optimization_variable.py @@ -83,6 +83,18 @@ def test_create_variable(self, platform: ixmp4.Platform): column_names=["Dimension 1"], ) + # Test that giving column_names, but not constrained_to_indexsets raises + with pytest.raises( + ValueError, + match="Received `column_names` to name columns, but no " + "`constrained_to_indexsets`", + ): + _ = test_mp.backend.optimization.variables.create( + run_id=run.id, + name="Variable 0", + column_names=["Dimension 1"], + ) + # Test mismatch in constrained_to_indexsets and column_names raises with pytest.raises(OptimizationItemUsageError, match="not equal in length"): _ = platform.backend.optimization.variables.create( From 0fa58ab29a370b41e279ad75268df57d5895b3f3 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 5 Jul 2024 10:41:28 +0200 Subject: [PATCH 08/50] Enable import of typing.Never on Python 3.10 --- ixmp4/core/optimization/variable.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ixmp4/core/optimization/variable.py b/ixmp4/core/optimization/variable.py index cc14a337..fff31ac1 100644 --- a/ixmp4/core/optimization/variable.py +++ b/ixmp4/core/optimization/variable.py @@ -1,3 +1,4 @@ +import sys from datetime import datetime from typing import Any, ClassVar, Iterable @@ -9,6 +10,11 @@ from ixmp4.data.abstract import Run from ixmp4.data.abstract.optimization import Column +if sys.version_info >= (3, 11): + from typing import Never +else: + from typing import NoReturn as Never + class Variable(BaseModelFacade): _model: VariableModel From 7b8db2a03e72a0e03ddcdae410b9fec816361543 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 3 Jul 2024 13:28:38 +0200 Subject: [PATCH 09/50] Introduce optimization.Equation --- tests/core/test_equation.py | 318 ++++++++++++++++++++++++++++++++++++ 1 file changed, 318 insertions(+) create mode 100644 tests/core/test_equation.py diff --git a/tests/core/test_equation.py b/tests/core/test_equation.py new file mode 100644 index 00000000..874cef8a --- /dev/null +++ b/tests/core/test_equation.py @@ -0,0 +1,318 @@ +import pandas as pd +import pytest + +from ixmp4 import Platform +from ixmp4.core import Equation + +from ..utils import all_platforms + + +def df_from_list(equations: list): + return pd.DataFrame( + [ + [ + equation.run_id, + equation.data, + equation.name, + equation.id, + equation.created_at, + equation.created_by, + ] + for equation in equations + ], + columns=[ + "run__id", + "data", + "name", + "id", + "created_at", + "created_by", + ], + ) + + +@all_platforms +class TestCoreEquation: + def test_create_equation(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + + # Test normal creation + indexset_1 = run.optimization.indexsets.create("Indexset") + equation = run.optimization.equations.create( + name="Equation", + constrained_to_indexsets=["Indexset"], + ) + + assert equation.run_id == run.id + assert equation.name == "Equation" + assert equation.data == {} # JsonDict type currently requires a dict, not None + assert equation.columns[0].name == "Indexset" + assert equation.constrained_to_indexsets == [indexset_1.name] + assert equation.levels == [] + assert equation.marginals == [] + + # Test duplicate name raises + with pytest.raises(Equation.NotUnique): + _ = run.optimization.equations.create( + "Equation", constrained_to_indexsets=["Indexset"] + ) + + # Test mismatch in constrained_to_indexsets and column_names raises + with pytest.raises(ValueError, match="not equal in length"): + _ = run.optimization.equations.create( + "Equation 2", + constrained_to_indexsets=["Indexset"], + column_names=["Dimension 1", "Dimension 2"], + ) + + # Test columns_names are used for names if given + equation_2 = run.optimization.equations.create( + "Equation 2", + constrained_to_indexsets=[indexset_1.name], + column_names=["Column 1"], + ) + assert equation_2.columns[0].name == "Column 1" + + # Test duplicate column_names raise + with pytest.raises(ValueError, match="`column_names` are not unique"): + _ = run.optimization.equations.create( + name="Equation 3", + constrained_to_indexsets=[indexset_1.name, indexset_1.name], + column_names=["Column 1", "Column 1"], + ) + + # Test column.dtype is registered correctly + indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset_2.add(elements=2024) + equation_3 = run.optimization.equations.create( + "Equation 5", + constrained_to_indexsets=["Indexset", indexset_2.name], + ) + # If indexset doesn't have elements, a generic dtype is registered + assert equation_3.columns[0].dtype == "object" + assert equation_3.columns[1].dtype == "int64" + + def test_get_equation(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + _ = run.optimization.equations.create( + name="Equation", constrained_to_indexsets=["Indexset"] + ) + equation = run.optimization.equations.get(name="Equation") + assert equation.run_id == run.id + assert equation.id == 1 + assert equation.name == "Equation" + assert equation.data == {} + assert equation.levels == [] + assert equation.marginals == [] + assert equation.columns[0].name == indexset.name + assert equation.constrained_to_indexsets == [indexset.name] + + with pytest.raises(Equation.NotFound): + _ = run.optimization.equations.get("Equation 2") + + def test_equation_add_data(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset_1 = run.optimization.indexsets.create("Indexset") + indexset_1.add(elements=["foo", "bar", ""]) + indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset_2.add(elements=[1, 2, 3]) + # pandas can only convert dicts to dataframes if the values are lists + # or if index is given. But maybe using read_json instead of from_dict + # can remedy this. Or maybe we want to catch the resulting + # "ValueError: If using all scalar values, you must pass an index" and + # reraise a custom informative error? + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "levels": [3.14], + "marginals": [0.000314], + } + equation = run.optimization.equations.create( + "Equation", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + equation.add(data=test_data_1) + assert equation.data == test_data_1 + assert equation.levels == test_data_1["levels"] + assert equation.marginals == test_data_1["marginals"] + + equation_2 = run.optimization.equations.create( + name="Equation 2", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): marginals!" + ): + equation_2.add( + pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "levels": [1], + } + ), + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): levels!" + ): + equation_2.add( + data=pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "marginals": [0], + } + ), + ) + + # By converting data to pd.DataFrame, we automatically enforce equal length + # of new columns, raises All arrays must be of the same length otherwise: + with pytest.raises(ValueError, match="All arrays must be of the same length"): + equation_2.add( + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "levels": [1, 2], + "marginals": [3], + }, + ) + + with pytest.raises(ValueError, match="contains duplicate rows"): + equation_2.add( + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "levels": [1, 2], + "marginals": [3.4, 5.6], + }, + ) + + # Test that order is conserved + test_data_2 = { + "Indexset": ["", "", "foo", "foo", "bar", "bar"], + "Indexset 2": [3, 1, 2, 1, 2, 3], + "levels": [6, 5, 4, 3, 2, 1], + "marginals": [1, 3, 5, 6, 4, 2], + } + equation_2.add(test_data_2) + assert equation_2.data == test_data_2 + assert equation_2.levels == test_data_2["levels"] + assert equation_2.marginals == test_data_2["marginals"] + + # Test order is conserved with varying types and upon later addition of data + equation_3 = run.optimization.equations.create( + name="Equation 3", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + column_names=["Column 1", "Column 2"], + ) + + test_data_3 = { + "Column 1": ["bar", "foo", ""], + "Column 2": [2, 3, 1], + "levels": [3, 2.0, 1], + "marginals": [100000, 1, 0.00001], + } + equation_3.add(data=test_data_3) + assert equation_3.data == test_data_3 + assert equation_3.levels == test_data_3["levels"] + assert equation_3.marginals == test_data_3["marginals"] + + test_data_4 = { + "Column 1": ["foo", "", "bar"], + "Column 2": [2, 3, 1], + "levels": [3.14, 2, 1.0], + "marginals": [1, 0.00001, 100000], + } + equation_3.add(data=test_data_4) + test_data_5 = test_data_3.copy() + for key, value in test_data_4.items(): + test_data_5[key].extend(value) + assert equation_3.data == test_data_5 + assert equation_3.levels == test_data_5["levels"] + assert equation_3.marginals == test_data_5["marginals"] + + def test_list_equation(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + # Per default, list() lists scalars for `default` version runs: + run.set_as_default() + _ = run.optimization.indexsets.create("Indexset") + _ = run.optimization.indexsets.create("Indexset 2") + equation = run.optimization.equations.create( + "Equation", constrained_to_indexsets=["Indexset"] + ) + equation_2 = run.optimization.equations.create( + "Equation 2", constrained_to_indexsets=["Indexset 2"] + ) + expected_ids = [equation.id, equation_2.id] + list_ids = [equation.id for equation in run.optimization.equations.list()] + assert not (set(expected_ids) ^ set(list_ids)) + + # Test retrieving just one result by providing a name + expected_id = [equation.id] + list_id = [ + equation.id for equation in run.optimization.equations.list(name="Equation") + ] + assert not (set(expected_id) ^ set(list_id)) + + def test_tabulate_equation(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + # Per default, tabulate() lists scalars for `default` version runs: + run.set_as_default() + indexset = run.optimization.indexsets.create("Indexset") + indexset_2 = run.optimization.indexsets.create("Indexset 2") + equation = run.optimization.equations.create( + name="Equation", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + equation_2 = run.optimization.equations.create( + name="Equation 2", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + pd.testing.assert_frame_equal( + df_from_list([equation_2]), + run.optimization.equations.tabulate(name="Equation 2"), + ) + + indexset.add(elements=["foo", "bar"]) + indexset_2.add(elements=[1, 2, 3]) + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "levels": [314], + "marginals": [2.0], + } + equation.add(data=test_data_1) + + test_data_2 = { + "Indexset 2": [2, 3], + "Indexset": ["foo", "bar"], + "levels": [1, -2.0], + "marginals": [0, 10], + } + equation_2.add(data=test_data_2) + pd.testing.assert_frame_equal( + df_from_list([equation, equation_2]), + run.optimization.equations.tabulate(), + ) + + def test_equation_docs(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + equation_1 = run.optimization.equations.create( + "Equation 1", constrained_to_indexsets=[indexset.name] + ) + docs = "Documentation of Equation 1" + equation_1.docs = docs + assert equation_1.docs == docs + + equation_1.docs = None + assert equation_1.docs is None From 181dc75d35ccf6f4941f81d30aaf72079d0007cd Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 3 Jul 2024 13:53:44 +0200 Subject: [PATCH 10/50] Streamline naming in tests --- tests/core/test_equation.py | 318 ------------------------------------ 1 file changed, 318 deletions(-) delete mode 100644 tests/core/test_equation.py diff --git a/tests/core/test_equation.py b/tests/core/test_equation.py deleted file mode 100644 index 874cef8a..00000000 --- a/tests/core/test_equation.py +++ /dev/null @@ -1,318 +0,0 @@ -import pandas as pd -import pytest - -from ixmp4 import Platform -from ixmp4.core import Equation - -from ..utils import all_platforms - - -def df_from_list(equations: list): - return pd.DataFrame( - [ - [ - equation.run_id, - equation.data, - equation.name, - equation.id, - equation.created_at, - equation.created_by, - ] - for equation in equations - ], - columns=[ - "run__id", - "data", - "name", - "id", - "created_at", - "created_by", - ], - ) - - -@all_platforms -class TestCoreEquation: - def test_create_equation(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - - # Test normal creation - indexset_1 = run.optimization.indexsets.create("Indexset") - equation = run.optimization.equations.create( - name="Equation", - constrained_to_indexsets=["Indexset"], - ) - - assert equation.run_id == run.id - assert equation.name == "Equation" - assert equation.data == {} # JsonDict type currently requires a dict, not None - assert equation.columns[0].name == "Indexset" - assert equation.constrained_to_indexsets == [indexset_1.name] - assert equation.levels == [] - assert equation.marginals == [] - - # Test duplicate name raises - with pytest.raises(Equation.NotUnique): - _ = run.optimization.equations.create( - "Equation", constrained_to_indexsets=["Indexset"] - ) - - # Test mismatch in constrained_to_indexsets and column_names raises - with pytest.raises(ValueError, match="not equal in length"): - _ = run.optimization.equations.create( - "Equation 2", - constrained_to_indexsets=["Indexset"], - column_names=["Dimension 1", "Dimension 2"], - ) - - # Test columns_names are used for names if given - equation_2 = run.optimization.equations.create( - "Equation 2", - constrained_to_indexsets=[indexset_1.name], - column_names=["Column 1"], - ) - assert equation_2.columns[0].name == "Column 1" - - # Test duplicate column_names raise - with pytest.raises(ValueError, match="`column_names` are not unique"): - _ = run.optimization.equations.create( - name="Equation 3", - constrained_to_indexsets=[indexset_1.name, indexset_1.name], - column_names=["Column 1", "Column 1"], - ) - - # Test column.dtype is registered correctly - indexset_2 = run.optimization.indexsets.create("Indexset 2") - indexset_2.add(elements=2024) - equation_3 = run.optimization.equations.create( - "Equation 5", - constrained_to_indexsets=["Indexset", indexset_2.name], - ) - # If indexset doesn't have elements, a generic dtype is registered - assert equation_3.columns[0].dtype == "object" - assert equation_3.columns[1].dtype == "int64" - - def test_get_equation(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") - _ = run.optimization.equations.create( - name="Equation", constrained_to_indexsets=["Indexset"] - ) - equation = run.optimization.equations.get(name="Equation") - assert equation.run_id == run.id - assert equation.id == 1 - assert equation.name == "Equation" - assert equation.data == {} - assert equation.levels == [] - assert equation.marginals == [] - assert equation.columns[0].name == indexset.name - assert equation.constrained_to_indexsets == [indexset.name] - - with pytest.raises(Equation.NotFound): - _ = run.optimization.equations.get("Equation 2") - - def test_equation_add_data(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - indexset_1 = run.optimization.indexsets.create("Indexset") - indexset_1.add(elements=["foo", "bar", ""]) - indexset_2 = run.optimization.indexsets.create("Indexset 2") - indexset_2.add(elements=[1, 2, 3]) - # pandas can only convert dicts to dataframes if the values are lists - # or if index is given. But maybe using read_json instead of from_dict - # can remedy this. Or maybe we want to catch the resulting - # "ValueError: If using all scalar values, you must pass an index" and - # reraise a custom informative error? - test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], - "levels": [3.14], - "marginals": [0.000314], - } - equation = run.optimization.equations.create( - "Equation", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], - ) - equation.add(data=test_data_1) - assert equation.data == test_data_1 - assert equation.levels == test_data_1["levels"] - assert equation.marginals == test_data_1["marginals"] - - equation_2 = run.optimization.equations.create( - name="Equation 2", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], - ) - - with pytest.raises( - AssertionError, match=r"must include the column\(s\): marginals!" - ): - equation_2.add( - pd.DataFrame( - { - "Indexset": [None], - "Indexset 2": [2], - "levels": [1], - } - ), - ) - - with pytest.raises( - AssertionError, match=r"must include the column\(s\): levels!" - ): - equation_2.add( - data=pd.DataFrame( - { - "Indexset": [None], - "Indexset 2": [2], - "marginals": [0], - } - ), - ) - - # By converting data to pd.DataFrame, we automatically enforce equal length - # of new columns, raises All arrays must be of the same length otherwise: - with pytest.raises(ValueError, match="All arrays must be of the same length"): - equation_2.add( - data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], - "levels": [1, 2], - "marginals": [3], - }, - ) - - with pytest.raises(ValueError, match="contains duplicate rows"): - equation_2.add( - data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], - "levels": [1, 2], - "marginals": [3.4, 5.6], - }, - ) - - # Test that order is conserved - test_data_2 = { - "Indexset": ["", "", "foo", "foo", "bar", "bar"], - "Indexset 2": [3, 1, 2, 1, 2, 3], - "levels": [6, 5, 4, 3, 2, 1], - "marginals": [1, 3, 5, 6, 4, 2], - } - equation_2.add(test_data_2) - assert equation_2.data == test_data_2 - assert equation_2.levels == test_data_2["levels"] - assert equation_2.marginals == test_data_2["marginals"] - - # Test order is conserved with varying types and upon later addition of data - equation_3 = run.optimization.equations.create( - name="Equation 3", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], - column_names=["Column 1", "Column 2"], - ) - - test_data_3 = { - "Column 1": ["bar", "foo", ""], - "Column 2": [2, 3, 1], - "levels": [3, 2.0, 1], - "marginals": [100000, 1, 0.00001], - } - equation_3.add(data=test_data_3) - assert equation_3.data == test_data_3 - assert equation_3.levels == test_data_3["levels"] - assert equation_3.marginals == test_data_3["marginals"] - - test_data_4 = { - "Column 1": ["foo", "", "bar"], - "Column 2": [2, 3, 1], - "levels": [3.14, 2, 1.0], - "marginals": [1, 0.00001, 100000], - } - equation_3.add(data=test_data_4) - test_data_5 = test_data_3.copy() - for key, value in test_data_4.items(): - test_data_5[key].extend(value) - assert equation_3.data == test_data_5 - assert equation_3.levels == test_data_5["levels"] - assert equation_3.marginals == test_data_5["marginals"] - - def test_list_equation(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - # Per default, list() lists scalars for `default` version runs: - run.set_as_default() - _ = run.optimization.indexsets.create("Indexset") - _ = run.optimization.indexsets.create("Indexset 2") - equation = run.optimization.equations.create( - "Equation", constrained_to_indexsets=["Indexset"] - ) - equation_2 = run.optimization.equations.create( - "Equation 2", constrained_to_indexsets=["Indexset 2"] - ) - expected_ids = [equation.id, equation_2.id] - list_ids = [equation.id for equation in run.optimization.equations.list()] - assert not (set(expected_ids) ^ set(list_ids)) - - # Test retrieving just one result by providing a name - expected_id = [equation.id] - list_id = [ - equation.id for equation in run.optimization.equations.list(name="Equation") - ] - assert not (set(expected_id) ^ set(list_id)) - - def test_tabulate_equation(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - # Per default, tabulate() lists scalars for `default` version runs: - run.set_as_default() - indexset = run.optimization.indexsets.create("Indexset") - indexset_2 = run.optimization.indexsets.create("Indexset 2") - equation = run.optimization.equations.create( - name="Equation", - constrained_to_indexsets=["Indexset", "Indexset 2"], - ) - equation_2 = run.optimization.equations.create( - name="Equation 2", - constrained_to_indexsets=["Indexset", "Indexset 2"], - ) - pd.testing.assert_frame_equal( - df_from_list([equation_2]), - run.optimization.equations.tabulate(name="Equation 2"), - ) - - indexset.add(elements=["foo", "bar"]) - indexset_2.add(elements=[1, 2, 3]) - test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], - "levels": [314], - "marginals": [2.0], - } - equation.add(data=test_data_1) - - test_data_2 = { - "Indexset 2": [2, 3], - "Indexset": ["foo", "bar"], - "levels": [1, -2.0], - "marginals": [0, 10], - } - equation_2.add(data=test_data_2) - pd.testing.assert_frame_equal( - df_from_list([equation, equation_2]), - run.optimization.equations.tabulate(), - ) - - def test_equation_docs(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") - equation_1 = run.optimization.equations.create( - "Equation 1", constrained_to_indexsets=[indexset.name] - ) - docs = "Documentation of Equation 1" - equation_1.docs = docs - assert equation_1.docs == docs - - equation_1.docs = None - assert equation_1.docs is None From 34154e522e72a3fc251b0dc2931cd729667d13c5 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 10 Jul 2024 15:42:51 +0200 Subject: [PATCH 11/50] Validate var/equ data only for non-empty data --- ixmp4/data/db/optimization/equation/model.py | 11 ++++++----- ixmp4/data/db/optimization/variable/model.py | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ixmp4/data/db/optimization/equation/model.py b/ixmp4/data/db/optimization/equation/model.py index 2201d2f6..0cfc39f1 100644 --- a/ixmp4/data/db/optimization/equation/model.py +++ b/ixmp4/data/db/optimization/equation/model.py @@ -31,11 +31,12 @@ def validate_data(self, key, data: dict[str, Any]): data_to_validate = copy.deepcopy(data) del data_to_validate["levels"] del data_to_validate["marginals"] - _ = utils.validate_data( - host=self, - data=data_to_validate, - columns=self.columns, - ) + if data_to_validate != {}: + _ = utils.validate_data( + host=self, + data=data_to_validate, + columns=self.columns, + ) return data __table_args__ = (db.UniqueConstraint("name", "run__id"),) diff --git a/ixmp4/data/db/optimization/variable/model.py b/ixmp4/data/db/optimization/variable/model.py index 6c634f31..a608753d 100644 --- a/ixmp4/data/db/optimization/variable/model.py +++ b/ixmp4/data/db/optimization/variable/model.py @@ -32,11 +32,12 @@ def validate_data(self, key, data: dict[str, Any]): data_to_validate = copy.deepcopy(data) del data_to_validate["levels"] del data_to_validate["marginals"] - _ = utils.validate_data( - host=self, - data=data_to_validate, - columns=self.columns, - ) + if data_to_validate != {}: + _ = utils.validate_data( + host=self, + data=data_to_validate, + columns=self.columns, + ) return data __table_args__ = (db.UniqueConstraint("name", "run__id"),) From 521a9631365508dfd3e4a55508211619cf0552b7 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 10 Jul 2024 15:46:39 +0200 Subject: [PATCH 12/50] TEMPORARY Add all missing db migrations --- ...3f7467dab_temporary_create_all_missing_.py | 292 ++++++++++++++++++ 1 file changed, 292 insertions(+) create mode 100644 ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py diff --git a/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py b/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py new file mode 100644 index 00000000..cb2f19b4 --- /dev/null +++ b/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py @@ -0,0 +1,292 @@ +# type: ignore +"""TEMPORARY Create all missing optimization items for testing + +Revision ID: 0d73f7467dab +Revises: 081bbda6bb7b +Create Date: 2024-07-08 14:09:49.174145 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# Revision identifiers, used by Alembic. +revision = "0d73f7467dab" +down_revision = "081bbda6bb7b" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "optimization_equation", + sa.Column("run__id", sa.Integer(), nullable=False), + sa.Column( + "data", + sa.JSON().with_variant( + postgresql.JSONB(astext_type=sa.Text()), "postgresql" + ), + nullable=False, + ), + sa.Column("name", sa.String(length=255), nullable=False), + sa.Column( + "id", + sa.Integer(), + sa.Identity(always=False, on_null=True, start=1, increment=1), + nullable=False, + ), + sa.Column("created_at", sa.DateTime(), nullable=True), + sa.Column("created_by", sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint( + ["run__id"], ["run.id"], name=op.f("fk_optimization_equation_run__id_run") + ), + sa.PrimaryKeyConstraint("id", name=op.f("pk_optimization_equation")), + sa.UniqueConstraint( + "name", "run__id", name=op.f("uq_optimization_equation_name_run__id") + ), + ) + with op.batch_alter_table("optimization_equation", schema=None) as batch_op: + batch_op.create_index( + batch_op.f("ix_optimization_equation_run__id"), ["run__id"], unique=False + ) + + op.create_table( + "optimization_optimizationvariable", + sa.Column("run__id", sa.Integer(), nullable=False), + sa.Column( + "data", + sa.JSON().with_variant( + postgresql.JSONB(astext_type=sa.Text()), "postgresql" + ), + nullable=False, + ), + sa.Column("name", sa.String(length=255), nullable=False), + sa.Column( + "id", + sa.Integer(), + sa.Identity(always=False, on_null=True, start=1, increment=1), + nullable=False, + ), + sa.Column("created_at", sa.DateTime(), nullable=True), + sa.Column("created_by", sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint( + ["run__id"], + ["run.id"], + name=op.f("fk_optimization_optimizationvariable_run__id_run"), + ), + sa.PrimaryKeyConstraint( + "id", name=op.f("pk_optimization_optimizationvariable") + ), + sa.UniqueConstraint( + "name", + "run__id", + name=op.f("uq_optimization_optimizationvariable_name_run__id"), + ), + ) + with op.batch_alter_table( + "optimization_optimizationvariable", schema=None + ) as batch_op: + batch_op.create_index( + batch_op.f("ix_optimization_optimizationvariable_run__id"), + ["run__id"], + unique=False, + ) + + op.create_table( + "optimization_parameter", + sa.Column("run__id", sa.Integer(), nullable=False), + sa.Column( + "data", + sa.JSON().with_variant( + postgresql.JSONB(astext_type=sa.Text()), "postgresql" + ), + nullable=False, + ), + sa.Column("name", sa.String(length=255), nullable=False), + sa.Column( + "id", + sa.Integer(), + sa.Identity(always=False, on_null=True, start=1, increment=1), + nullable=False, + ), + sa.Column("created_at", sa.DateTime(), nullable=True), + sa.Column("created_by", sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint( + ["run__id"], ["run.id"], name=op.f("fk_optimization_parameter_run__id_run") + ), + sa.PrimaryKeyConstraint("id", name=op.f("pk_optimization_parameter")), + sa.UniqueConstraint( + "name", "run__id", name=op.f("uq_optimization_parameter_name_run__id") + ), + ) + with op.batch_alter_table("optimization_parameter", schema=None) as batch_op: + batch_op.create_index( + batch_op.f("ix_optimization_parameter_run__id"), ["run__id"], unique=False + ) + + op.create_table( + "optimization_equation_docs", + sa.Column("description", sa.Text(), nullable=False), + sa.Column("dimension__id", sa.Integer(), nullable=True), + sa.Column( + "id", + sa.Integer(), + sa.Identity(always=False, on_null=True, start=1, increment=1), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["dimension__id"], + ["optimization_equation.id"], + name=op.f( + "fk_optimization_equation_docs_dimension__id_optimization_equation" + ), + ), + sa.PrimaryKeyConstraint("id", name=op.f("pk_optimization_equation_docs")), + sa.UniqueConstraint( + "dimension__id", name=op.f("uq_optimization_equation_docs_dimension__id") + ), + ) + op.create_table( + "optimization_optimizationvariable_docs", + sa.Column("description", sa.Text(), nullable=False), + sa.Column("dimension__id", sa.Integer(), nullable=True), + sa.Column( + "id", + sa.Integer(), + sa.Identity(always=False, on_null=True, start=1, increment=1), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["dimension__id"], + ["optimization_optimizationvariable.id"], + name=op.f( + "fk_optimization_optimizationvariable_docs_dimension__id_optimization_optimizationvariable" + ), + ), + sa.PrimaryKeyConstraint( + "id", name=op.f("pk_optimization_optimizationvariable_docs") + ), + sa.UniqueConstraint( + "dimension__id", + name=op.f("uq_optimization_optimizationvariable_docs_dimension__id"), + ), + ) + op.create_table( + "optimization_parameter_docs", + sa.Column("description", sa.Text(), nullable=False), + sa.Column("dimension__id", sa.Integer(), nullable=True), + sa.Column( + "id", + sa.Integer(), + sa.Identity(always=False, on_null=True, start=1, increment=1), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["dimension__id"], + ["optimization_parameter.id"], + name=op.f( + "fk_optimization_parameter_docs_dimension__id_optimization_parameter" + ), + ), + sa.PrimaryKeyConstraint("id", name=op.f("pk_optimization_parameter_docs")), + sa.UniqueConstraint( + "dimension__id", name=op.f("uq_optimization_parameter_docs_dimension__id") + ), + ) + with op.batch_alter_table("optimization_column", schema=None) as batch_op: + batch_op.add_column(sa.Column("equation__id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("parameter__id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("variable__id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("created_at", sa.DateTime(), nullable=True)) + batch_op.add_column( + sa.Column("created_by", sa.String(length=255), nullable=True) + ) + batch_op.alter_column("table__id", existing_type=sa.INTEGER(), nullable=True) + batch_op.drop_index("ix_optimization_column_table__id") + batch_op.create_foreign_key( + batch_op.f( + "fk_optimization_column_variable__id_optimization_optimizationvariable" + ), + "optimization_optimizationvariable", + ["variable__id"], + ["id"], + ) + batch_op.create_foreign_key( + batch_op.f("fk_optimization_column_parameter__id_optimization_parameter"), + "optimization_parameter", + ["parameter__id"], + ["id"], + ) + batch_op.create_foreign_key( + batch_op.f("fk_optimization_column_equation__id_optimization_equation"), + "optimization_equation", + ["equation__id"], + ["id"], + ) + + with op.batch_alter_table("region", schema=None) as batch_op: + batch_op.alter_column( + "name", + existing_type=sa.VARCHAR(length=1023), + type_=sa.String(length=255), + existing_nullable=False, + ) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("region", schema=None) as batch_op: + batch_op.alter_column( + "name", + existing_type=sa.String(length=255), + type_=sa.VARCHAR(length=1023), + existing_nullable=False, + ) + + with op.batch_alter_table("optimization_column", schema=None) as batch_op: + batch_op.drop_constraint( + batch_op.f("fk_optimization_column_equation__id_optimization_equation"), + type_="foreignkey", + ) + batch_op.drop_constraint( + batch_op.f("fk_optimization_column_parameter__id_optimization_parameter"), + type_="foreignkey", + ) + batch_op.drop_constraint( + batch_op.f( + "fk_optimization_column_variable__id_optimization_optimizationvariable" + ), + type_="foreignkey", + ) + batch_op.create_index( + "ix_optimization_column_table__id", ["table__id"], unique=False + ) + batch_op.alter_column("table__id", existing_type=sa.INTEGER(), nullable=False) + batch_op.drop_column("created_by") + batch_op.drop_column("created_at") + batch_op.drop_column("variable__id") + batch_op.drop_column("parameter__id") + batch_op.drop_column("equation__id") + + op.drop_table("optimization_parameter_docs") + op.drop_table("optimization_optimizationvariable_docs") + op.drop_table("optimization_equation_docs") + with op.batch_alter_table("optimization_parameter", schema=None) as batch_op: + batch_op.drop_index(batch_op.f("ix_optimization_parameter_run__id")) + + op.drop_table("optimization_parameter") + with op.batch_alter_table( + "optimization_optimizationvariable", schema=None + ) as batch_op: + batch_op.drop_index(batch_op.f("ix_optimization_optimizationvariable_run__id")) + + op.drop_table("optimization_optimizationvariable") + with op.batch_alter_table("optimization_equation", schema=None) as batch_op: + batch_op.drop_index(batch_op.f("ix_optimization_equation_run__id")) + + op.drop_table("optimization_equation") + # ### end Alembic commands ### From 58a064fefd6516a3c2c9eb6acb90df38970dfe7a Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 10 Jul 2024 15:47:38 +0200 Subject: [PATCH 13/50] Add transport tutorial --- tutorial/transport/linopy_model.py | 126 +++++++++++++++ tutorial/transport/py_transport.ipynb | 215 ++++++++++++-------------- 2 files changed, 222 insertions(+), 119 deletions(-) create mode 100644 tutorial/transport/linopy_model.py diff --git a/tutorial/transport/linopy_model.py b/tutorial/transport/linopy_model.py new file mode 100644 index 00000000..aa4bfdd6 --- /dev/null +++ b/tutorial/transport/linopy_model.py @@ -0,0 +1,126 @@ +# m = linopy.Model() +# i = {"Canning Plants": ["seattle", "san-diego"]} +# j = {"Markets": ["new-york", "chicago", "topeka"]} +# a = xr.DataArray([350, 600], coords=i, name="capacity of plant i in cases") +# b = xr.DataArray([325, 300, 275], coords=j, name="demand at market j in cases") +# d = xr.DataArray( +# [[2.5, 1.7, 1.8], [2.5, 1.8, 1.4]], +# coords=i | j, +# name="distance in thousands of miles", +# ) +# f = 90 # Freight in dollars per case per thousand miles +# c = d * f / 1000 +# c.name = "transport cost in thousands of dollars per case" +# x = m.add_variables(lower=0.0, coords=c.coords, name="Shipment quantities in cases") +# con = x.sum(dim="Markets") <= a +# con1 = m.add_constraints(con, name="Observe supply limit at plant i") +# con = x.sum(dim="Canning Plants") >= b +# con2 = m.add_constraints(con, name="Satisfy demand at market j") +# obj = c * x +# m.add_objective(obj) +# print(linopy.available_solvers) +# m.solve("highs") +# print(x.solution) + +import linopy +import pandas as pd + +from ixmp4.core import Equation, IndexSet, Parameter, Scalar +from ixmp4.core import OptimizationVariable as Variable + + +def create_set(indexset: IndexSet, name: str | None = None) -> pd.Index: + return pd.Index(indexset.elements, name=name or indexset.name) + + +def create_parameter( + parameter: Parameter, index: pd.Index | list[pd.Index], name: str +) -> pd.Series: + if isinstance(index, list): + index = pd.MultiIndex.from_product(index) + + return pd.Series(data=parameter.values, index=index, name=name) + + +def create_dantzig_model( + i: IndexSet, + j: IndexSet, + a: Parameter, + b: Parameter, + d: Parameter, + f: Scalar, +) -> linopy.Model: + m = linopy.Model() + i_set = create_set(indexset=i, name="Canning Plants") + j_set = create_set(indexset=j, name="Markets") + a_parameter = create_parameter( + parameter=a, index=i_set, name="capacity of plant i in cases" + ) + b_parameter = create_parameter( + parameter=b, index=j_set, name="demand at market j in cases" + ) + d_parameter = create_parameter( + parameter=d, index=[i_set, j_set], name="distance in thousands of miles" + ) + f_scalar = f.value + + c = d_parameter * f_scalar / 1000 + c.name = "transport cost in thousands of dollars per case" + + x = m.add_variables( + lower=0.0, coords=[i_set, j_set], name="Shipment quantities in cases" + ) + + con = x.sum(dim="Markets") <= a_parameter + m.add_constraints(con, name="Observe supply limit at plant i") + + con = x.sum(dim="Canning Plants") >= b_parameter + m.add_constraints(con, name="Satisfy demand at market j") + + obj = c.to_xarray() * x + m.add_objective(obj) + + return m + + +def store_dantzig_solution( + model: linopy.Model, z: Variable, x: Variable, demand: Equation, supply: Equation +) -> None: + # Handle objective + # TODO adding fake marginals here until Variables don't require this column anymore + # Can't add units if this column was not declared above. Better stored as Scalar + # maybe? + z.add(data={"levels": [model.objective.value], "marginals": [-0.0]}) + + # Handle shipment quantities + x_data: pd.DataFrame = model.solution.to_dataframe() + x_data.reset_index(inplace=True) + x_data.rename( + columns={ + "Shipment quantities in cases": "levels", + "Canning Plants": "i", + "Markets": "j", + }, + inplace=True, + ) + # x_data["units"] = "cases" + # TODO Again setting fake marginals until they are optional for variables + x_data["marginals"] = -0.0 + x.add(data=x_data) + + # The following don't seem to be typed correctly by linopy + # Add supply data + supply_data = { + "i": ["seattle", "san-diego"], + "levels": model.constraints["Observe supply limit at plant i"].data.rhs, # type: ignore + "marginals": model.constraints["Observe supply limit at plant i"].data.dual, # type: ignore + } + supply.add(data=supply_data) + + # Add demand data + demand_data = { + "j": ["new-york", "chicago", "topeka"], + "levels": model.constraints["Satisfy demand at market j"].data.rhs, # type: ignore + "marginals": model.constraints["Satisfy demand at market j"].data.dual, # type: ignore + } + demand.add(data=demand_data) diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index abe96991..c7d6ca40 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -51,16 +51,6 @@ "A run is identified by a model name, a scenario name and a version number (assigned automatically)." ] }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "import ixmp4" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -69,7 +59,7 @@ "\n", "Run the following in the command-line:\n", "```\n", - "ixmp4 platforms add sqlite-test\n", + "ixmp4 platforms add tutorial-test\n", "```\n", "\n", "You can then check if the database was successfully created by running\n", @@ -82,11 +72,13 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "mp = ixmp4.Platform(\"sqlite-test\")" + "import ixmp4\n", + "\n", + "mp = ixmp4.Platform(\"tutorial-test\")" ] }, { @@ -98,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -135,16 +127,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "i = run.optimization.IndexSet(\"i\")" + "i = run.optimization.indexsets.create(\"i\")" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -160,20 +152,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['seattle', 'san-diego']" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "i.elements" ] @@ -187,11 +168,14 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "run.optimization.IndexSet(\"j\").add([\"new-york\", \"chicago\", \"topeka\"])" + "run.optimization.indexsets.create(\"j\").add([\"new-york\", \"chicago\", \"topeka\"])\n", + "\n", + "# Get the resulting indexset for later usage:\n", + "j = run.optimization.indexsets.get(\"j\")" ] }, { @@ -223,22 +207,36 @@ "metadata": {}, "outputs": [], "source": [ - "# capacity of plant i in cases \n", - "# add parameter elements one-by-one (string and value) \n", - "scen.init_par(\"a\", idx_sets=\"i\")\n", - "scen.add_par(\"a\", \"seattle\", 350, \"cases\")\n", - "scen.add_par(\"a\", \"san-diego\", 600, \"cases\")\n", + "import pandas as pd\n", "\n", - "# demand at market j in cases \n", - "# add parameter elements as dataframe (with index names) \n", - "scen.init_par(\"b\", idx_sets=\"j\")\n", - "b_data = [\n", - " {'j': \"new-york\", 'value': 325, 'unit': \"cases\"},\n", - " {'j': \"chicago\", 'value': 300, 'unit': \"cases\"},\n", - " {'j': \"topeka\", 'value': 275, 'unit': \"cases\"}\n", - "]\n", - "b = pd.DataFrame(b_data)\n", - "scen.add_par(\"b\", b)" + "from ixmp4.core import Unit\n", + "\n", + "# Only needed once for each mp\n", + "try:\n", + " cases = mp.units.get(\"cases\")\n", + "except Unit.NotFound:\n", + " cases = mp.units.create(\"cases\")\n", + "\n", + "# capacity of plant i in cases\n", + "# add parameter data as a dict\n", + "a = run.optimization.parameters.create(name=\"a\", constrained_to_indexsets=[\"i\"])\n", + "a_data = {\n", + " \"i\": [\"seattle\", \"san-diego\"],\n", + " \"values\": [350, 600],\n", + " \"units\": [cases.name, cases.name],\n", + "}\n", + "a.add(data=a_data)\n", + "\n", + "# demand at market j in cases\n", + "# add parameter data as a pd.DataFrame\n", + "b = run.optimization.parameters.create(\"b\", constrained_to_indexsets=\"j\")\n", + "b_data = {\n", + " \"j\": [\"new-york\", \"chicago\", \"topeka\"],\n", + " \"values\": [325, 300, 275],\n", + " \"units\": [cases.name] * 3,\n", + "}\n", + "b_data = pd.DataFrame(b_data)\n", + "b.add(b_data)" ] }, { @@ -247,7 +245,8 @@ "metadata": {}, "outputs": [], "source": [ - "scen.par('b')" + "# And this is how e.g. b looks:\n", + "b.data" ] }, { @@ -266,21 +265,25 @@ "metadata": {}, "outputs": [], "source": [ - "# distance in thousands of miles \n", - "scen.init_par(\"d\", idx_sets=[\"i\", \"j\"])\n", - "# add more parameter elements as dataframe by index names \n", - "d_data = [\n", - " {'i': \"seattle\", 'j': \"new-york\", 'value': 2.5, 'unit': \"km\"},\n", - " {'i': \"seattle\", 'j': \"chicago\", 'value': 1.7, 'unit': \"km\"},\n", - " {'i': \"seattle\", 'j': \"topeka\", 'value': 1.8, 'unit': \"km\"},\n", - " {'i': \"san-diego\", 'j': \"new-york\", 'value': 2.5, 'unit': \"km\"},\n", - "]\n", - "d = pd.DataFrame(d_data)\n", - "scen.add_par(\"d\", d)\n", + "try:\n", + " km = mp.units.get(\"km\")\n", + "except Unit.NotFound:\n", + " km = mp.units.create(\"km\")\n", "\n", - "# add other parameter elements as key list, value, unit\n", - "scen.add_par(\"d\", [\"san-diego\", \"chicago\"], 1.8, \"km\")\n", - "scen.add_par(\"d\", [\"san-diego\", \"topeka\"], 1.4, \"km\")" + "# distance in thousands of miles\n", + "d = run.optimization.parameters.create(\"d\", constrained_to_indexsets=[\"i\", \"j\"])\n", + "# add more parameter data as dict\n", + "d_data = {\n", + " \"i\": [\"seattle\", \"seattle\", \"seattle\", \"san-diego\"],\n", + " \"j\": [\"new-york\", \"chicago\", \"topeka\", \"new-york\"],\n", + " \"values\": [2.5, 1.7, 1.8, 2.5],\n", + " \"units\": [km.name] * 4,\n", + "}\n", + "d.add(d_data)\n", + "\n", + "# add other parameter data one by one\n", + "d.add({\"i\": [\"san-diego\"], \"j\": [\"chicago\"], \"values\": [1.8], \"units\": [\"km\"]})\n", + "d.add({\"i\": [\"san-diego\"], \"j\": [\"topeka\"], \"values\": [1.4], \"units\": [\"km\"]})" ] }, { @@ -296,32 +299,15 @@ "metadata": {}, "outputs": [], "source": [ - "# cost per case per 1000 miles \n", - "# initialize scalar with a value and a unit (and optionally a comment) \n", - "scen.init_scalar(\"f\", 90.0, \"USD/km\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Committing the scenario to the ixmp database instance" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# commit new scenario to the database\n", - "# no changes can then be made to the scenario data until a check-out is performed\n", - "comment = \"importing Dantzig's transport problem for illustration\"\n", - "comment += \" and testing of the Python interface using a generic datastructure\" \n", - "scen.commit(comment) \n", + "# cost per case per 1000 miles\n", + "\n", + "# TODO we could really use a units.get_or_create() function!\n", + "try:\n", + " unit_cost_per_case = mp.units.get(\"USD/km\")\n", + "except Unit.NotFound:\n", + " unit_cost_per_case = mp.units.create(\"USD/km\")\n", "\n", - "# set this new scenario as the default version for the model/scenario name\n", - "scen.set_as_default()" + "f = run.optimization.scalars.create(name=\"f\", value=90, unit=unit_cost_per_case)" ] }, { @@ -330,7 +316,7 @@ "source": [ "### Defining variables and equations in the scenario\n", "\n", - "The levels and marginals of these variables and equations will be imported to the scenario when reading the gdx solution file." + "The levels and marginals of these variables and equations will be imported to the scenario when reading the model solution." ] }, { @@ -353,17 +339,11 @@ "metadata": {}, "outputs": [], "source": [ - "# perform a check_out to make further changes\n", - "scen.check_out()\n", - "\n", "# initialize the decision variables and equations\n", - "scen.init_var(\"z\", None, None)\n", - "scen.init_var(\"x\", idx_sets=[\"i\", \"j\"])\n", - "scen.init_equ(\"demand\", idx_sets=[\"j\"])\n", - "\n", - "# commit changes to the scenario (save changes in ixmp database instance)\n", - "change_comment = \"initialize the model variables and equations\"\n", - "scen.commit(change_comment)" + "z = run.optimization.variables.create(\"z\")\n", + "x = run.optimization.variables.create(\"x\", constrained_to_indexsets=[\"i\", \"j\"])\n", + "supply = run.optimization.equations.create(\"supply\", constrained_to_indexsets=[\"i\"])\n", + "demand = run.optimization.equations.create(\"demand\", constrained_to_indexsets=[\"j\"])" ] }, { @@ -372,9 +352,11 @@ "source": [ "### Solve the scenario\n", "\n", - "The ``solve()`` function exports the scenario to a GAMS gdx file, executes GAMS, and then imports the solution from an output GAMS gdx file to the database.\n", + "In this tutorial, we solve the tutorial using the ``highs`` solver in linopy. \n", + "\n", + "The ``create_dantzig_model()`` function is a convenience shortcut for setting up a linopy model correctly for the datzig scenario. Please see ``linopy_model.py`` for details.\n", "\n", - "For the model equations and the GAMS workflow (reading the data from gdx, solving the model, writing the results to gdx), see ``transport_ixmp.gms``." + "The solution data are stored with the model object automatically. ``store_dantzig_solution()`` then stores them in the ixmp4 objects." ] }, { @@ -383,7 +365,12 @@ "metadata": {}, "outputs": [], "source": [ - "scen.solve(model='dantzig')" + "from tutorial.transport.linopy_model import create_dantzig_model, store_dantzig_solution\n", + "\n", + "m = create_dantzig_model(i=i, j=j, a=a, b=b, d=d, f=f)\n", + "m.solve(\"highs\")\n", + "\n", + "store_dantzig_solution(model=m, z=z, x=x, demand=demand, supply=supply)" ] }, { @@ -400,7 +387,7 @@ "outputs": [], "source": [ "# display the objective value of the solution\n", - "scen.var(\"z\")" + "z.levels" ] }, { @@ -410,7 +397,7 @@ "outputs": [], "source": [ "# display the quantities transported from canning plants to demand locations\n", - "scen.var(\"x\")" + "x.data" ] }, { @@ -419,18 +406,8 @@ "metadata": {}, "outputs": [], "source": [ - "# display the quantities and marginals (=shadow prices) of the demand balance constraints\n", - "scen.equ(\"demand\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Close the database connection of the ix modeling platform\n", - "\n", - "Closing the database connection is recommended when working with the local file-based database, i.e., ``dbtype='HSQLDB'``.\n", - "This command closes the database files and removes temporary data. This is necessary so that other notebooks or ``ixmp`` instances can access the database file, or so that the database files can be copied to a different folder or drive." + "# display the quantities and marginals (shadow prices) of the demand balance constraints\n", + "demand.data" ] }, { @@ -439,8 +416,8 @@ "metadata": {}, "outputs": [], "source": [ - "# close the connection of the platform instance to the local ixmp database files\n", - "mp.close_db()" + "# display the quantities and marginals (shadow prices) of the supply balance constraints\n", + "supply.data" ] } ], @@ -461,7 +438,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.12.4" } }, "nbformat": 4, From b47dbdc20c39b717b1a2cac3781aea58340a3c6b Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 10 Jul 2024 16:00:11 +0200 Subject: [PATCH 14/50] Add required dependencies --- poetry.lock | 457 ++++++++++++++++++++++++++++++++++++++++++++++++- pyproject.toml | 2 + 2 files changed, 458 insertions(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index f516b2c5..0354024a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -125,6 +125,63 @@ files = [ [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +[[package]] +name = "bottleneck" +version = "1.4.0" +description = "Fast NumPy array functions written in C" +optional = false +python-versions = "*" +files = [ + {file = "Bottleneck-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2110af22aa8c2779faba8aa021d6b559df04449bdf21d510eacd7910934189fe"}, + {file = "Bottleneck-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:381cbd1e52338fcdf9ff01c962e6aa187b2d8b3b369d42e779b6d33ac61f8d35"}, + {file = "Bottleneck-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a91e40bbb8452e77772614d882be2c34b3b514d9f15460f703293525a6e173d"}, + {file = "Bottleneck-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:59604949aea476f5075b965129eaa3c2d90891fd43b0dfaf2ad7621bb5db14a5"}, + {file = "Bottleneck-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c2c92545e1bc8e859d8d137aefa3b24843bd374b17c9814dafa3bbcea9fc4ec0"}, + {file = "Bottleneck-1.4.0-cp310-cp310-win32.whl", hash = "sha256:f63e79bfa2f82a7432c8b147ed321d01ca7769bc17cc04644286a4ce58d30549"}, + {file = "Bottleneck-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:d69907d8d679cb5091a3f479c46bf1076f149f6311ff3298bac5089b86a2fab1"}, + {file = "Bottleneck-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67347b0f01f32a232a6269c37afc1c079e08f6455fa12e91f4a1cd12eb0d11a5"}, + {file = "Bottleneck-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1490348b3bbc0225523dc2c00c6bb3e66168c537d62797bd29783c0826c09838"}, + {file = "Bottleneck-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a704165552496cbcc8bcc5921bb679fd6fa66bb1e758888de091b1223231c9f0"}, + {file = "Bottleneck-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ffb4e4edf7997069719b9269926cc00a2a12c6e015422d1ebc2f621c4541396a"}, + {file = "Bottleneck-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5d6bf45ed58d5e7414c0011ef2da75474fe597a51970df83596b0bcb79c14c5e"}, + {file = "Bottleneck-1.4.0-cp311-cp311-win32.whl", hash = "sha256:ed209f8f3cb9954773764b0fa2510a7a9247ad245593187ac90bd0747771bc5c"}, + {file = "Bottleneck-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d53f1a72b12cfd76b56934c33bc0cb7c1a295f23a2d3ffba8c764514c9b5e0ff"}, + {file = "Bottleneck-1.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e720ff24370324c84a82b1a18195274715c23181748b2b9e3dacad24198ca06f"}, + {file = "Bottleneck-1.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44305c70c2a1539b0ae968e033f301ad868a6146b47e3cccd73fdfe3fc07c4ee"}, + {file = "Bottleneck-1.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4dac5d2a871b7bd296c2b92426daa27d5b07aa84ef2557db097d29135da4eb"}, + {file = "Bottleneck-1.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fbcdd01db9e27741fb16a02b720cf02389d4b0b99cefe3c834c7df88c2d7412d"}, + {file = "Bottleneck-1.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:14b3334a39308fbb05dacd35ac100842aa9e9bc70afbdcebe43e46179d183fd0"}, + {file = "Bottleneck-1.4.0-cp312-cp312-win32.whl", hash = "sha256:520d7a83cd48b3f58e5df1a258acb547f8a5386a8c21ca9e1058d83a0d622fdf"}, + {file = "Bottleneck-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b1339b9ad3ee217253f246cde5c3789eb527cf9dd31ff0a1f5a8bf7fc89eadad"}, + {file = "Bottleneck-1.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2749602200aaa0e12a0f3f936dd6d4035384ad10d3acf7ac4f418c501683397"}, + {file = "Bottleneck-1.4.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb79a2ac135567694f13339f0bebcee96aec09c596b324b61cd7fd5e306f49d"}, + {file = "Bottleneck-1.4.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c6097bf39723e76ff5bba160daab92ae599df212c859db8d46648548584d04a8"}, + {file = "Bottleneck-1.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b5f72b66ccc0272de46b67346cf8490737ba2adc6a302664f5326e7741b6d5ab"}, + {file = "Bottleneck-1.4.0-cp37-cp37m-win32.whl", hash = "sha256:9903f017b9d6f2f69ce241b424ddad7265624f64dc6eafbe257d45661febf8bd"}, + {file = "Bottleneck-1.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:834816c316ad184cae7ecb615b69876a42cd2cafb07ee66c57a9c1ccacb63339"}, + {file = "Bottleneck-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:03c43150f180d86a5633a6da788660d335983f6798fca306ba7f47ff27a1b7e7"}, + {file = "Bottleneck-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea333dbcadb780356c54f5c4fa7754f143573b57508fff43d5daf63298eb26a"}, + {file = "Bottleneck-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6179791c0119aec3708ef74ddadab8d183e3742adb93a9028718e8696bdf572b"}, + {file = "Bottleneck-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:220b72405f77aebb0137b733b464c2526ded471e4289ac1e840bab8852759a55"}, + {file = "Bottleneck-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8746f0f727997ce4c7457dc1fec4e4e3c0fdd8803514baa3d1c4ea6515ab04b2"}, + {file = "Bottleneck-1.4.0-cp38-cp38-win32.whl", hash = "sha256:6a36280ee33d9db799163f04e88b950261e590cc71d089f5e179b21680b5d491"}, + {file = "Bottleneck-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:de17e012694e6a987bb4eb050dd7f0cf939195a8e00cb23aa93ebee5fd5e64a8"}, + {file = "Bottleneck-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28260197ab8a4a6b7adf810523147b1a3e85607f4e26a0f685eb9d155cfc75af"}, + {file = "Bottleneck-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90d5d188a0cca0b9655ff2904ee61e7f183079e97550be98c2541a2eec358a72"}, + {file = "Bottleneck-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2861ff645d236f1a6f5c6d1ddb3db37d19af1d91057bdc4fd7b76299a15b3079"}, + {file = "Bottleneck-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6136ce7dcf825c432a20b80ab1c460264a437d8430fff32536176147e0b6b832"}, + {file = "Bottleneck-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:889e6855b77345622b4ba927335d3118745d590492941f5f78554f157d259e92"}, + {file = "Bottleneck-1.4.0-cp39-cp39-win32.whl", hash = "sha256:817aa43a671ede696ea023d8f35839a391244662340cc95a0f46965dda8b35cf"}, + {file = "Bottleneck-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:23834d82177d6997f21fa63156550668cd07a9a6e5a1b66ea80f1a14ac6ffd07"}, + {file = "bottleneck-1.4.0.tar.gz", hash = "sha256:beb36df519b8709e7d357c0c9639b03b885ca6355bbf5e53752c685de51605b8"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +doc = ["gitpython", "numpydoc", "sphinx"] + [[package]] name = "build" version = "1.2.1" @@ -349,6 +406,17 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} +[[package]] +name = "cloudpickle" +version = "3.0.0" +description = "Pickler class to extend the standard pickle.Pickler functionality" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"}, + {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"}, +] + [[package]] name = "colorama" version = "0.4.6" @@ -444,6 +512,35 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 [package.extras] toml = ["tomli"] +[[package]] +name = "dask" +version = "2024.6.2" +description = "Parallel PyData with Task Scheduling" +optional = false +python-versions = ">=3.9" +files = [ + {file = "dask-2024.6.2-py3-none-any.whl", hash = "sha256:81b80ee015b2e057b93bb2d1bf13a866136e762e2b24bf54b6b621e8b86b7708"}, + {file = "dask-2024.6.2.tar.gz", hash = "sha256:d429d6b19e85fd1306ac37c188aaf99d03bbe69a6fe59d2b42882b2ac188686f"}, +] + +[package.dependencies] +click = ">=8.1" +cloudpickle = ">=1.5.0" +fsspec = ">=2021.09.0" +importlib-metadata = {version = ">=4.13.0", markers = "python_version < \"3.12\""} +packaging = ">=20.0" +partd = ">=1.2.0" +pyyaml = ">=5.3.1" +toolz = ">=0.10.0" + +[package.extras] +array = ["numpy (>=1.21)"] +complete = ["dask[array,dataframe,diagnostics,distributed]", "lz4 (>=4.3.2)", "pyarrow (>=7.0)", "pyarrow-hotfix"] +dataframe = ["dask-expr (>=1.1,<1.2)", "dask[array]", "pandas (>=1.3)"] +diagnostics = ["bokeh (>=2.4.2)", "jinja2 (>=2.10.3)"] +distributed = ["distributed (==2024.6.2)"] +test = ["pandas[test]", "pre-commit", "pytest", "pytest-cov", "pytest-rerunfailures", "pytest-timeout", "pytest-xdist"] + [[package]] name = "debugpy" version = "1.8.2" @@ -497,6 +594,20 @@ files = [ {file = "deepmerge-1.1.1.tar.gz", hash = "sha256:53a489dc9449636e480a784359ae2aab3191748c920649551c8e378622f0eca4"}, ] +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = false +python-versions = "*" +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + [[package]] name = "distlib" version = "0.3.8" @@ -653,6 +764,45 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1 testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] typing = ["typing-extensions (>=4.8)"] +[[package]] +name = "fsspec" +version = "2024.6.1" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, + {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + [[package]] name = "greenlet" version = "3.0.3" @@ -750,6 +900,72 @@ files = [ hpack = ">=4.0,<5" hyperframe = ">=6.0,<7" +[[package]] +name = "highspy" +version = "1.7.2" +description = "A thin set of pybind11 wrappers to HiGHS" +optional = false +python-versions = ">=3.8" +files = [ + {file = "highspy-1.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:467124b1e01aeddff8b6d0aa7a56e51eef943ebb28d3e46dcbdd1e32b77384ec"}, + {file = "highspy-1.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:584590ec4d9948a6f1ef8a1ce51761e1c9c00241054c12cbc0e8a43f0f5183c6"}, + {file = "highspy-1.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c642da4035b6c33618bca73f01627fce94e07c1e741b46798dddddaa88cf376"}, + {file = "highspy-1.7.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bebb73f80c47e3215547abb1ebf8e520ae5f7f24e5420ad270ad901f0725041"}, + {file = "highspy-1.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f0bfad2a4ebb37944bb1ed883f0fbdb733d98141fdf4902fee0f75b0160a6c0"}, + {file = "highspy-1.7.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c64ede6b8e567eec0d14d12ea67114af855b4c380881d848becfb91cb01c844d"}, + {file = "highspy-1.7.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c2ee6f7b74a6a1508fceb7d40acf8097d81c5b75059628ea00715723d382110"}, + {file = "highspy-1.7.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9acabd04d16c5586753a9d6ce03c6f57b47f094fe3ef3b34185656181ed8685"}, + {file = "highspy-1.7.2-cp310-cp310-win32.whl", hash = "sha256:8625e193766192d4cfdc543548dc6cacf92ac09c86e2fcc7e48342f4909a9668"}, + {file = "highspy-1.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:e4d17d0c9bbbe15654a44b0369e5f1ee95f36935b71d54d4bdf70bedcc1b256e"}, + {file = "highspy-1.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eb2fb87f2cd72765fa281acc2fb10e0bacb5f5e7c3336bb267b917b5bffc30fc"}, + {file = "highspy-1.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cd96ff70cb9ba186129597e521a7afcaa2bbb285273ffa5417edfcc43d58a566"}, + {file = "highspy-1.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1152035fd6c861cb578115b976d03c38e4e0e2f87227ac93b4af12fb582ad971"}, + {file = "highspy-1.7.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb5b7067cd3cfc191920b33428395080d51892435cd507542ae75f7a2ae0853"}, + {file = "highspy-1.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad5ef7ddfd6fd879dc9b6ac02a9eecd13fe2b0581cd03985e5faa89f43b24ac"}, + {file = "highspy-1.7.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:eb039878156f6b521f383a42b53e615521af430f0ae55e12d825b1368e1eaa47"}, + {file = "highspy-1.7.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:afd27cf82c922a2add4b940a7900a9e74a2b66556446c39abfe2d854cfcf59d1"}, + {file = "highspy-1.7.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:81de7b020255e40aafd28387f7642b3b6ea844e9cb42845a045a05411c7a055a"}, + {file = "highspy-1.7.2-cp311-cp311-win32.whl", hash = "sha256:d7d1c11f8c68ab537023f487585b1a4f447c5c03603feb2c5f76e77914c388ac"}, + {file = "highspy-1.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:fafa076bad795c45d7f055f859b762c4a72abd872ecd9710e1d3c1202a9123ad"}, + {file = "highspy-1.7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:659f240736ae923fd35acd8ea0e86dc4165e8aec8e72c191642ec546476c1130"}, + {file = "highspy-1.7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b466011f4091051f156a13b46ac569316cc2cddff0c2881ee456c765c535519"}, + {file = "highspy-1.7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e574cb5ddb6dffbcae0db61ae1ebb7754191e6f42a822010b81e3599d1001df"}, + {file = "highspy-1.7.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa831a9a4fb286fe90bcba7c8a663923a47c14a318bdd30a6b96707f9a3f7496"}, + {file = "highspy-1.7.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71364d2136b0c31116684af47c63ba8bd2ca6da9320a4fadb69a0f57606bbdf7"}, + {file = "highspy-1.7.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b46b73ce68c9a98c36584348a31263f6deef84d8138cac872439b383cc17293e"}, + {file = "highspy-1.7.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a075da0c7d5b269f720691f0d743013540eea35bf22419e23bd32b343d4dda27"}, + {file = "highspy-1.7.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:98b2f01ec21764f233293eaae2ee637884ec009e6db608c46a446433c60d5e31"}, + {file = "highspy-1.7.2-cp312-cp312-win32.whl", hash = "sha256:ba78467db9e4693a384644b221deecf5f0243d150540d65fcb33534103486490"}, + {file = "highspy-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:47886d7794b3fa3fb12e5722d96989ef920a9a9460de66f4868632c8e723a07d"}, + {file = "highspy-1.7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:22e31ee5d3854024d075697fcfac88394b90d0afe25b84e4283d3964d0cd991b"}, + {file = "highspy-1.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fe9b2291b01ff13e14a2720e436cf807b28d7a9d33d27861e7f26ced001bceec"}, + {file = "highspy-1.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2d86c87a997de23001c687c8b3bff265b0f9edb1403657f5bb895d2525f0e78"}, + {file = "highspy-1.7.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5c0b5d913ae2e509e10991596caa3b09670e18aa6b55aab324e00884561f44d4"}, + {file = "highspy-1.7.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7973ba66d659728fadf7168f8d6a3560bef4333a504abfbc8cdb9ea51afd98"}, + {file = "highspy-1.7.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3e361e98ddd757c0393677a9a52de6349abfbe79ff5d2132088a3d02c6c735d9"}, + {file = "highspy-1.7.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:69ea90d97effbc27eeb2e20488c7c510f7d12813d929a8ca3fd0a7c9832564ab"}, + {file = "highspy-1.7.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0af568e0e61934e748c2b1057fb48f7fc3bfef6d6e6f159c616dd0ececb223a7"}, + {file = "highspy-1.7.2-cp38-cp38-win32.whl", hash = "sha256:20e86e18203d96f6c2b9d358b14e0178a7f83ac8ec6e806255d3f80710839bea"}, + {file = "highspy-1.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:0ac5990c90cc615a2a45143d2321d74a7857db2e79aa9ba3606461da99fb5c8b"}, + {file = "highspy-1.7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2d8199c8bd0528bfaec85d441c25c570adf2334be5a75d6d6839190db2e14f83"}, + {file = "highspy-1.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0a6d8b4fa17161c5b5941a49a9dab9b8569a3e6c28b2e28eaad3265fd8d7430"}, + {file = "highspy-1.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e10640542c41852d135172c87ced5e2664bbf12d5396a6f761ec8e62bc11ea6"}, + {file = "highspy-1.7.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64f99988d0c641843079c410883f606023ae4055e8e6158427cd4dc1e23227ff"}, + {file = "highspy-1.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb8a2919e958e07fd82e6d6f273374030f5232b09e2924c6d3f50e773bfa0a80"}, + {file = "highspy-1.7.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72fdc8dd3bb5e0d34b8594a851b0cad299b31eef40a50a180b3260494d86b09e"}, + {file = "highspy-1.7.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:b008ccdfbb73fde912ed1dd605e27a122a81e0c472c338fa3b3fa24996e5379f"}, + {file = "highspy-1.7.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3675f3242ddd11b107bde3345779ac9eb8dd9a940337b43ce8127836b592feef"}, + {file = "highspy-1.7.2-cp39-cp39-win32.whl", hash = "sha256:b496a5d337508847737836ada6d930b404d921a119132cd1d14df47a4b488db7"}, + {file = "highspy-1.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:e7d3883c697103c8c39d808976a00b85d68f869b97bae6d48b7b03811dfbb925"}, + {file = "highspy-1.7.2.tar.gz", hash = "sha256:7987b2a3f013254a1845bceb4597087da4070f7887c0084024649486321ae213"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +test = ["numpy", "pytest"] + [[package]] name = "hpack" version = "4.0.0" @@ -1127,6 +1343,49 @@ files = [ {file = "latexcodec-3.0.0.tar.gz", hash = "sha256:917dc5fe242762cc19d963e6548b42d63a118028cdd3361d62397e3b638b6bc5"}, ] +[[package]] +name = "linopy" +version = "0.1.dev1049+g5e0c68e" +description = "Linear optimization with N-D labeled arrays in Python" +optional = false +python-versions = ">=3.9" +files = [] +develop = true + +[package.dependencies] +bottleneck = "*" +dask = ">=0.18.0" +deprecation = "*" +numexpr = "*" +numpy = "*" +polars = "*" +scipy = "*" +toolz = "*" +tqdm = "*" +xarray = ">=2024.2.0" + +[package.extras] +dev = ["gurobipy", "highspy", "netcdf4", "paramiko", "pre-commit", "pytest", "pytest-cov", "types-paramiko"] +docs = ["gurobipy (==11.0.2)", "ipykernel (==6.29.5)", "ipython (==8.26.0)", "matplotlib (==3.9.1)", "nbsphinx (==0.9.4)", "nbsphinx-link (==1.3.0)", "numpydoc (==1.7.0)", "sphinx (==7.3.7)", "sphinx_book_theme (==1.1.3)", "sphinx_rtd_theme (==2.0.0)"] +solvers = ["coptpy", "cplex", "gurobipy", "highspy (>=1.5.0)", "highspy (>=1.7.1)", "mindoptpy", "mosek", "pyscipopt", "xpress"] + +[package.source] +type = "git" +url = "git@github.com:glatterf42/linopy.git" +reference = "enh/remove-numpy-pin" +resolved_reference = "5e0c68e5daae4d77b1879090760023677866da62" + +[[package]] +name = "locket" +version = "1.0.0" +description = "File-based locks for Python on Linux and Windows" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3"}, + {file = "locket-1.0.0.tar.gz", hash = "sha256:5c0d4c052a8bbbf750e056a8e65ccd309086f4f0f18a2eac306a8dfa4112a632"}, +] + [[package]] name = "mako" version = "1.3.5" @@ -1366,6 +1625,47 @@ files = [ {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, ] +[[package]] +name = "numexpr" +version = "2.10.1" +description = "Fast numerical expression evaluator for NumPy" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numexpr-2.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbd35f17f6efc00ebd4a480192af1ee30996094a0d5343b131b0e90e61e8b554"}, + {file = "numexpr-2.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fecdf4bf3c1250e56583db0a4a80382a259ba4c2e1efa13e04ed43f0938071f5"}, + {file = "numexpr-2.10.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b2efa499f460124538a5b4f1bf2e77b28eb443ee244cc5573ed0f6a069ebc635"}, + {file = "numexpr-2.10.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac23a72eff10f928f23b147bdeb0f1b774e862abe332fc9bf4837e9f1bc0bbf9"}, + {file = "numexpr-2.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b28eaf45f1cc1048aad9e90e3a8ada1aef58c5f8155a85267dc781b37998c046"}, + {file = "numexpr-2.10.1-cp310-cp310-win32.whl", hash = "sha256:4f0985bd1c493b23b5aad7d81fa174798f3812efb78d14844194834c9fee38b8"}, + {file = "numexpr-2.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:44f6d12a8c44be90199bbb10d3abf467f88951f48a3d1fbbd3c219d121f39c9d"}, + {file = "numexpr-2.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3c0b0bf165b2d886eb981afa4e77873ca076f5d51c491c4d7b8fc10f17c876f"}, + {file = "numexpr-2.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56648a04679063175681195670ad53e5c8ca19668166ed13875199b5600089c7"}, + {file = "numexpr-2.10.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce04ae6efe2a9d0be1a0e114115c3ae70c68b8b8fbc615c5c55c15704b01e6a4"}, + {file = "numexpr-2.10.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45f598182b4f5c153222e47d5163c3bee8d5ebcaee7e56dd2a5898d4d97e4473"}, + {file = "numexpr-2.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a50370bea77ba94c3734a44781c716751354c6bfda2d369af3aed3d67d42871"}, + {file = "numexpr-2.10.1-cp311-cp311-win32.whl", hash = "sha256:fa4009d84a8e6e21790e718a80a22d57fe7f215283576ef2adc4183f7247f3c7"}, + {file = "numexpr-2.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:fcbf013bb8494e8ef1d11fa3457827c1571c6a3153982d709e5d17594999d4dd"}, + {file = "numexpr-2.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:82fc95c301b15ff4823f98989ee363a2d5555d16a7cfd3710e98ddee726eaaaa"}, + {file = "numexpr-2.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbf79fef834f88607f977ab9867061dcd9b40ccb08bb28547c6dc6c73e560895"}, + {file = "numexpr-2.10.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:552c8d4b2e3b87cdb2abb40a781b9a61a9090a9f66ac7357fc5a0b93aff76be3"}, + {file = "numexpr-2.10.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22cc65e9121aeb3187a2b50827715b2b087ea70e8ab21416ea52662322087b43"}, + {file = "numexpr-2.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:00204e5853713b5eba5f3d0bc586a5d8d07f76011b597c8b4087592cc2ec2928"}, + {file = "numexpr-2.10.1-cp312-cp312-win32.whl", hash = "sha256:82bf04a1495ac475de4ab49fbe0a3a2710ed3fd1a00bc03847316b5d7602402d"}, + {file = "numexpr-2.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:300e577b3c006dd7a8270f1bb2e8a00ee15bf235b1650fe2a6febec2954bc2c3"}, + {file = "numexpr-2.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fb704620657a1c99d64933e8a982148d8bfb2b738a1943e107a2bfdee887ce56"}, + {file = "numexpr-2.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:368a1972c3186355160f6ee330a7eea146d8443da75a38a30083289ae251ef5a"}, + {file = "numexpr-2.10.1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ca8ae46481d0b0689ca0d00a8670bc464ce375e349599fe674a6d4957e7b7eb6"}, + {file = "numexpr-2.10.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a4db4456e0779d5e024220b7b6a7477ac900679bfa74836b06fa526aaed4e3c"}, + {file = "numexpr-2.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:926dd426c68f1d927412a2ad843831c1eb9a95871e7bb0bd8b20d547c12238d2"}, + {file = "numexpr-2.10.1-cp39-cp39-win32.whl", hash = "sha256:37598cca41f8f50dc889b0b72be1616a288758c16ab7d48c9ac8719e1a39d835"}, + {file = "numexpr-2.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:78b14c19c403df7498954468385768c86b0d2c52ad03dffb74e45d44ae5a9c77"}, + {file = "numexpr-2.10.1.tar.gz", hash = "sha256:9bba99d354a65f1a008ab8b87f07d84404c668e66bab624df5b6b5373403cf81"}, +] + +[package.dependencies] +numpy = ">=1.23.0" + [[package]] name = "numpy" version = "2.0.0" @@ -1642,6 +1942,24 @@ files = [ qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["docopt", "pytest"] +[[package]] +name = "partd" +version = "1.4.2" +description = "Appendable key-value storage" +optional = false +python-versions = ">=3.9" +files = [ + {file = "partd-1.4.2-py3-none-any.whl", hash = "sha256:978e4ac767ec4ba5b86c6eaa52e5a2a3bc748a2ca839e8cc798f1cc6ce6efb0f"}, + {file = "partd-1.4.2.tar.gz", hash = "sha256:d022c33afbdc8405c226621b015e8067888173d85f7f5ecebb3cafed9a20f02c"}, +] + +[package.dependencies] +locket = "*" +toolz = "*" + +[package.extras] +complete = ["blosc", "numpy (>=1.20.0)", "pandas (>=1.3)", "pyzmq"] + [[package]] name = "pexpect" version = "4.9.0" @@ -1698,6 +2016,46 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "polars" +version = "1.0.0" +description = "Blazingly fast DataFrame library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "polars-1.0.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:cf454ee75a2346cd7f44fb536cc69af7a26d8a243ea58bda50f6c810742c76ad"}, + {file = "polars-1.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:8191d8b5cf68d5ebaf9efb497120ff6d7e607a57a116bcce43618d50a536fe1c"}, + {file = "polars-1.0.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b58575fd7ddc12bc53adfde933da3b40c2841fdc5396fecbd85e80dfc9332e"}, + {file = "polars-1.0.0-cp38-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:44475877179f261f4ce1a6cfa0fc955392798b9987c17fc2b1a4b294602ace8a"}, + {file = "polars-1.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:bd483045c0629afced9e9ebc83b58550640022db5924d553a068a57621260a22"}, + {file = "polars-1.0.0.tar.gz", hash = "sha256:144a63d6d61dc5d675304673c4261ceccf4cfc75277431389d4afe9a5be0f70b"}, +] + +[package.extras] +adbc = ["adbc-driver-manager[dbapi]", "adbc-driver-sqlite[dbapi]"] +all = ["polars[async,cloudpickle,database,deltalake,excel,fsspec,graph,iceberg,numpy,pandas,plot,pyarrow,pydantic,style,timezone]"] +async = ["gevent"] +calamine = ["fastexcel (>=0.9)"] +cloudpickle = ["cloudpickle"] +connectorx = ["connectorx (>=0.3.2)"] +database = ["nest-asyncio", "polars[adbc,connectorx,sqlalchemy]"] +deltalake = ["deltalake (>=0.15.0)"] +excel = ["polars[calamine,openpyxl,xlsx2csv,xlsxwriter]"] +fsspec = ["fsspec"] +graph = ["matplotlib"] +iceberg = ["pyiceberg (>=0.5.0)"] +numpy = ["numpy (>=1.16.0,<2.0.0)"] +openpyxl = ["openpyxl (>=3.0.0)"] +pandas = ["pandas", "polars[pyarrow]"] +plot = ["hvplot (>=0.9.1)", "polars[pandas]"] +pyarrow = ["pyarrow (>=7.0.0)"] +pydantic = ["pydantic"] +sqlalchemy = ["polars[pandas]", "sqlalchemy"] +style = ["great-tables (>=0.8.0)"] +timezone = ["backports-zoneinfo", "tzdata"] +xlsx2csv = ["xlsx2csv (>=0.8.0)"] +xlsxwriter = ["xlsxwriter"] + [[package]] name = "pre-commit" version = "3.7.1" @@ -2625,6 +2983,48 @@ files = [ {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"}, ] +[[package]] +name = "scipy" +version = "1.14.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "scipy-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7e911933d54ead4d557c02402710c2396529540b81dd554fc1ba270eb7308484"}, + {file = "scipy-1.14.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:687af0a35462402dd851726295c1a5ae5f987bd6e9026f52e9505994e2f84ef6"}, + {file = "scipy-1.14.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:07e179dc0205a50721022344fb85074f772eadbda1e1b3eecdc483f8033709b7"}, + {file = "scipy-1.14.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a9c9a9b226d9a21e0a208bdb024c3982932e43811b62d202aaf1bb59af264b1"}, + {file = "scipy-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076c27284c768b84a45dcf2e914d4000aac537da74236a0d45d82c6fa4b7b3c0"}, + {file = "scipy-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42470ea0195336df319741e230626b6225a740fd9dce9642ca13e98f667047c0"}, + {file = "scipy-1.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:176c6f0d0470a32f1b2efaf40c3d37a24876cebf447498a4cefb947a79c21e9d"}, + {file = "scipy-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:ad36af9626d27a4326c8e884917b7ec321d8a1841cd6dacc67d2a9e90c2f0359"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6d056a8709ccda6cf36cdd2eac597d13bc03dba38360f418560a93050c76a16e"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f0a50da861a7ec4573b7c716b2ebdcdf142b66b756a0d392c236ae568b3a93fb"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:94c164a9e2498e68308e6e148646e486d979f7fcdb8b4cf34b5441894bdb9caf"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a7d46c3e0aea5c064e734c3eac5cf9eb1f8c4ceee756262f2c7327c4c2691c86"}, + {file = "scipy-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eee2989868e274aae26125345584254d97c56194c072ed96cb433f32f692ed8"}, + {file = "scipy-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3154691b9f7ed73778d746da2df67a19d046a6c8087c8b385bc4cdb2cfca74"}, + {file = "scipy-1.14.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c40003d880f39c11c1edbae8144e3813904b10514cd3d3d00c277ae996488cdb"}, + {file = "scipy-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:5b083c8940028bb7e0b4172acafda6df762da1927b9091f9611b0bcd8676f2bc"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff2438ea1330e06e53c424893ec0072640dac00f29c6a43a575cbae4c99b2b9"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bbc0471b5f22c11c389075d091d3885693fd3f5e9a54ce051b46308bc787e5d4"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:64b2ff514a98cf2bb734a9f90d32dc89dc6ad4a4a36a312cd0d6327170339eb0"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:7d3da42fbbbb860211a811782504f38ae7aaec9de8764a9bef6b262de7a2b50f"}, + {file = "scipy-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d91db2c41dd6c20646af280355d41dfa1ec7eead235642178bd57635a3f82209"}, + {file = "scipy-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a01cc03bcdc777c9da3cfdcc74b5a75caffb48a6c39c8450a9a05f82c4250a14"}, + {file = "scipy-1.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:65df4da3c12a2bb9ad52b86b4dcf46813e869afb006e58be0f516bc370165159"}, + {file = "scipy-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:4c4161597c75043f7154238ef419c29a64ac4a7c889d588ea77690ac4d0d9b20"}, + {file = "scipy-1.14.0.tar.gz", hash = "sha256:b5923f48cb840380f9854339176ef21763118a7300a88203ccd0bdd26e58527b"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "setuptools" version = "70.1.1" @@ -3120,6 +3520,17 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "toolz" +version = "0.12.1" +description = "List processing tools and functional utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"}, + {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, +] + [[package]] name = "tornado" version = "6.4.1" @@ -3140,6 +3551,26 @@ files = [ {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, ] +[[package]] +name = "tqdm" +version = "4.66.4" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, + {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + [[package]] name = "traitlets" version = "5.14.3" @@ -3701,6 +4132,30 @@ files = [ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] +[[package]] +name = "xarray" +version = "2024.6.0" +description = "N-D labeled arrays and datasets in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "xarray-2024.6.0-py3-none-any.whl", hash = "sha256:721a7394e8ec3d592b2d8ebe21eed074ac077dc1bb1bd777ce00e41700b4866c"}, + {file = "xarray-2024.6.0.tar.gz", hash = "sha256:0b91e0bc4dc0296947947640fe31ec6e867ce258d2f7cbc10bedf4a6d68340c7"}, +] + +[package.dependencies] +numpy = ">=1.23" +packaging = ">=23.1" +pandas = ">=2.0" + +[package.extras] +accel = ["bottleneck", "flox", "numbagg", "opt-einsum", "scipy"] +complete = ["xarray[accel,dev,io,parallel,viz]"] +dev = ["hypothesis", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-env", "pytest-timeout", "pytest-xdist", "ruff", "xarray[complete]"] +io = ["cftime", "fsspec", "h5netcdf", "netCDF4", "pooch", "pydap", "scipy", "zarr"] +parallel = ["dask[complete]"] +viz = ["matplotlib", "nc-time-axis", "seaborn"] + [[package]] name = "zipp" version = "3.19.2" @@ -3719,4 +4174,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10, <3.13" -content-hash = "99b50a6c19e87ad973082c0a08edf2a6bce5fcce2d22bfb13627ab191ed5b9db" +content-hash = "418adafb7d89dadea27dcc452170f7b7d632a778d927ce9fe4e17a439839391b" diff --git a/pyproject.toml b/pyproject.toml index 86f99914..f9216dc3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,6 +73,8 @@ optional = true [tool.poetry.group.tutorial.dependencies] ipykernel = ">=6.27.1" +linopy = {git = "git@github.com:glatterf42/linopy.git", rev = "enh/remove-numpy-pin", develop = true} +highspy = ">=1.7.2" [tool.poetry.scripts] ixmp4 = "ixmp4.__main__:app" From fcccc684fd50695feb0809ebbd166852875b5c75 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 10 Jul 2024 16:21:22 +0200 Subject: [PATCH 15/50] Remove outdated comments --- tutorial/transport/linopy_model.py | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/tutorial/transport/linopy_model.py b/tutorial/transport/linopy_model.py index aa4bfdd6..0fdeadf0 100644 --- a/tutorial/transport/linopy_model.py +++ b/tutorial/transport/linopy_model.py @@ -1,27 +1,3 @@ -# m = linopy.Model() -# i = {"Canning Plants": ["seattle", "san-diego"]} -# j = {"Markets": ["new-york", "chicago", "topeka"]} -# a = xr.DataArray([350, 600], coords=i, name="capacity of plant i in cases") -# b = xr.DataArray([325, 300, 275], coords=j, name="demand at market j in cases") -# d = xr.DataArray( -# [[2.5, 1.7, 1.8], [2.5, 1.8, 1.4]], -# coords=i | j, -# name="distance in thousands of miles", -# ) -# f = 90 # Freight in dollars per case per thousand miles -# c = d * f / 1000 -# c.name = "transport cost in thousands of dollars per case" -# x = m.add_variables(lower=0.0, coords=c.coords, name="Shipment quantities in cases") -# con = x.sum(dim="Markets") <= a -# con1 = m.add_constraints(con, name="Observe supply limit at plant i") -# con = x.sum(dim="Canning Plants") >= b -# con2 = m.add_constraints(con, name="Satisfy demand at market j") -# obj = c * x -# m.add_objective(obj) -# print(linopy.available_solvers) -# m.solve("highs") -# print(x.solution) - import linopy import pandas as pd @@ -71,11 +47,12 @@ def create_dantzig_model( lower=0.0, coords=[i_set, j_set], name="Shipment quantities in cases" ) + # The constraints don't seem to be typed correctly by linopy con = x.sum(dim="Markets") <= a_parameter - m.add_constraints(con, name="Observe supply limit at plant i") + m.add_constraints(con, name="Observe supply limit at plant i") # type: ignore con = x.sum(dim="Canning Plants") >= b_parameter - m.add_constraints(con, name="Satisfy demand at market j") + m.add_constraints(con, name="Satisfy demand at market j") # type: ignore obj = c.to_xarray() * x m.add_objective(obj) From 14b302ffff92dc9ae02d8b897ce382e443b2da58 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 11:59:48 +0200 Subject: [PATCH 16/50] Rename read_solution and other review suggestions --- tutorial/transport/linopy_model.py | 2 +- tutorial/transport/py_transport.ipynb | 46 ++++++++++++++------------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/tutorial/transport/linopy_model.py b/tutorial/transport/linopy_model.py index 0fdeadf0..58f2b516 100644 --- a/tutorial/transport/linopy_model.py +++ b/tutorial/transport/linopy_model.py @@ -60,7 +60,7 @@ def create_dantzig_model( return m -def store_dantzig_solution( +def read_dantzig_solution( model: linopy.Model, z: Variable, x: Variable, demand: Equation, supply: Equation ) -> None: # Handle objective diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index c7d6ca40..8f7d4de7 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -78,7 +78,7 @@ "source": [ "import ixmp4\n", "\n", - "mp = ixmp4.Platform(\"tutorial-test\")" + "platform = ixmp4.Platform(\"tutorial-test\")" ] }, { @@ -94,7 +94,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = mp.runs.create(model=\"transport problem\", scenario=\"standard\")" + "run = platform.runs.create(model=\"transport problem\", scenario=\"standard\")" ] }, { @@ -213,29 +213,31 @@ "\n", "# Only needed once for each mp\n", "try:\n", - " cases = mp.units.get(\"cases\")\n", + " cases = platform.units.get(\"cases\")\n", "except Unit.NotFound:\n", - " cases = mp.units.create(\"cases\")\n", + " cases = platform.units.create(\"cases\")\n", "\n", "# capacity of plant i in cases\n", "# add parameter data as a dict\n", "a = run.optimization.parameters.create(name=\"a\", constrained_to_indexsets=[\"i\"])\n", "a_data = {\n", " \"i\": [\"seattle\", \"san-diego\"],\n", - " \"values\": [350, 600],\n", - " \"units\": [cases.name, cases.name],\n", + " \"value\": [350, 600],\n", + " \"unit\": [cases.name, cases.name],\n", "}\n", "a.add(data=a_data)\n", "\n", "# demand at market j in cases\n", "# add parameter data as a pd.DataFrame\n", "b = run.optimization.parameters.create(\"b\", constrained_to_indexsets=\"j\")\n", - "b_data = {\n", - " \"j\": [\"new-york\", \"chicago\", \"topeka\"],\n", - " \"values\": [325, 300, 275],\n", - " \"units\": [cases.name] * 3,\n", - "}\n", - "b_data = pd.DataFrame(b_data)\n", + "b_data = pd.DataFrame(\n", + " [\n", + " [\"new-york\", 325, cases.name],\n", + " [\"chicago\", 300, cases.name],\n", + " [\"topeka\", 275, cases.name],\n", + " ],\n", + " columns=[\"j\", \"value\", \"unit\"],\n", + ")\n", "b.add(b_data)" ] }, @@ -266,9 +268,9 @@ "outputs": [], "source": [ "try:\n", - " km = mp.units.get(\"km\")\n", + " km = platform.units.get(\"km\")\n", "except Unit.NotFound:\n", - " km = mp.units.create(\"km\")\n", + " km = platform.units.create(\"km\")\n", "\n", "# distance in thousands of miles\n", "d = run.optimization.parameters.create(\"d\", constrained_to_indexsets=[\"i\", \"j\"])\n", @@ -276,14 +278,14 @@ "d_data = {\n", " \"i\": [\"seattle\", \"seattle\", \"seattle\", \"san-diego\"],\n", " \"j\": [\"new-york\", \"chicago\", \"topeka\", \"new-york\"],\n", - " \"values\": [2.5, 1.7, 1.8, 2.5],\n", - " \"units\": [km.name] * 4,\n", + " \"value\": [2.5, 1.7, 1.8, 2.5],\n", + " \"unit\": [km.name] * 4,\n", "}\n", "d.add(d_data)\n", "\n", "# add other parameter data one by one\n", - "d.add({\"i\": [\"san-diego\"], \"j\": [\"chicago\"], \"values\": [1.8], \"units\": [\"km\"]})\n", - "d.add({\"i\": [\"san-diego\"], \"j\": [\"topeka\"], \"values\": [1.4], \"units\": [\"km\"]})" + "d.add({\"i\": [\"san-diego\"], \"j\": [\"chicago\"], \"value\": [1.8], \"unit\": [\"km\"]})\n", + "d.add({\"i\": [\"san-diego\"], \"j\": [\"topeka\"], \"value\": [1.4], \"unit\": [\"km\"]})" ] }, { @@ -303,9 +305,9 @@ "\n", "# TODO we could really use a units.get_or_create() function!\n", "try:\n", - " unit_cost_per_case = mp.units.get(\"USD/km\")\n", + " unit_cost_per_case = platform.units.get(\"USD/km\")\n", "except Unit.NotFound:\n", - " unit_cost_per_case = mp.units.create(\"USD/km\")\n", + " unit_cost_per_case = platform.units.create(\"USD/km\")\n", "\n", "f = run.optimization.scalars.create(name=\"f\", value=90, unit=unit_cost_per_case)" ] @@ -365,12 +367,12 @@ "metadata": {}, "outputs": [], "source": [ - "from tutorial.transport.linopy_model import create_dantzig_model, store_dantzig_solution\n", + "from tutorial.transport.linopy_model import create_dantzig_model, read_dantzig_solution\n", "\n", "m = create_dantzig_model(i=i, j=j, a=a, b=b, d=d, f=f)\n", "m.solve(\"highs\")\n", "\n", - "store_dantzig_solution(model=m, z=z, x=x, demand=demand, supply=supply)" + "read_dantzig_solution(model=m, z=z, x=x, demand=demand, supply=supply)" ] }, { From e27d0a691edd7833c41030afda040045fc173e10 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 12:05:40 +0200 Subject: [PATCH 17/50] Rename model file --- ...{linopy_model.py => dantzig_model_linopy.py} | 17 ++++++++++++----- tutorial/transport/py_transport.ipynb | 5 ++++- 2 files changed, 16 insertions(+), 6 deletions(-) rename tutorial/transport/{linopy_model.py => dantzig_model_linopy.py} (90%) diff --git a/tutorial/transport/linopy_model.py b/tutorial/transport/dantzig_model_linopy.py similarity index 90% rename from tutorial/transport/linopy_model.py rename to tutorial/transport/dantzig_model_linopy.py index 58f2b516..725a8e5c 100644 --- a/tutorial/transport/linopy_model.py +++ b/tutorial/transport/dantzig_model_linopy.py @@ -47,12 +47,19 @@ def create_dantzig_model( lower=0.0, coords=[i_set, j_set], name="Shipment quantities in cases" ) - # The constraints don't seem to be typed correctly by linopy - con = x.sum(dim="Markets") <= a_parameter - m.add_constraints(con, name="Observe supply limit at plant i") # type: ignore + m.add_constraints( + lhs=x.sum(dim="Markets"), + sign="<=", + rhs=a_parameter, + name="Observe supply limit at plant i", + ) - con = x.sum(dim="Canning Plants") >= b_parameter - m.add_constraints(con, name="Satisfy demand at market j") # type: ignore + m.add_constraints( + lhs=x.sum(dim="Canning Plants"), + sign=">=", + rhs=b_parameter, + name="Satisfy demand at market j", + ) obj = c.to_xarray() * x m.add_objective(obj) diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index 8f7d4de7..9471dbf6 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -367,7 +367,10 @@ "metadata": {}, "outputs": [], "source": [ - "from tutorial.transport.linopy_model import create_dantzig_model, read_dantzig_solution\n", + "from tutorial.transport.dantzig_model_linopy import (\n", + " create_dantzig_model,\n", + " read_dantzig_solution,\n", + ")\n", "\n", "m = create_dantzig_model(i=i, j=j, a=a, b=b, d=d, f=f)\n", "m.solve(\"highs\")\n", From 045fd64851cde29f01375f830823ac010d2dc967 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 12:10:35 +0200 Subject: [PATCH 18/50] Remove superfluous auxiliary function --- tutorial/transport/dantzig_model_linopy.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tutorial/transport/dantzig_model_linopy.py b/tutorial/transport/dantzig_model_linopy.py index 725a8e5c..c44b77a7 100644 --- a/tutorial/transport/dantzig_model_linopy.py +++ b/tutorial/transport/dantzig_model_linopy.py @@ -5,10 +5,6 @@ from ixmp4.core import OptimizationVariable as Variable -def create_set(indexset: IndexSet, name: str | None = None) -> pd.Index: - return pd.Index(indexset.elements, name=name or indexset.name) - - def create_parameter( parameter: Parameter, index: pd.Index | list[pd.Index], name: str ) -> pd.Series: @@ -27,8 +23,8 @@ def create_dantzig_model( f: Scalar, ) -> linopy.Model: m = linopy.Model() - i_set = create_set(indexset=i, name="Canning Plants") - j_set = create_set(indexset=j, name="Markets") + i_set = pd.Index(i.elements, name="Canning Plants") + j_set = pd.Index(j.elements, name="Markets") a_parameter = create_parameter( parameter=a, index=i_set, name="capacity of plant i in cases" ) From 93ec91fad1c94999e3d59c2958a02a2777718694 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 12:18:20 +0200 Subject: [PATCH 19/50] Refactor to only passing run for interface simplicity --- tutorial/transport/dantzig_model_linopy.py | 60 ++++++++++------------ tutorial/transport/py_transport.ipynb | 4 +- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/tutorial/transport/dantzig_model_linopy.py b/tutorial/transport/dantzig_model_linopy.py index c44b77a7..4bc15ddd 100644 --- a/tutorial/transport/dantzig_model_linopy.py +++ b/tutorial/transport/dantzig_model_linopy.py @@ -1,8 +1,7 @@ import linopy import pandas as pd -from ixmp4.core import Equation, IndexSet, Parameter, Scalar -from ixmp4.core import OptimizationVariable as Variable +from ixmp4.core import Parameter, Run def create_parameter( @@ -14,46 +13,43 @@ def create_parameter( return pd.Series(data=parameter.values, index=index, name=name) -def create_dantzig_model( - i: IndexSet, - j: IndexSet, - a: Parameter, - b: Parameter, - d: Parameter, - f: Scalar, -) -> linopy.Model: +def create_dantzig_model(run: Run) -> linopy.Model: m = linopy.Model() - i_set = pd.Index(i.elements, name="Canning Plants") - j_set = pd.Index(j.elements, name="Markets") - a_parameter = create_parameter( - parameter=a, index=i_set, name="capacity of plant i in cases" + i = pd.Index(run.optimization.indexsets.get("i").elements, name="Canning Plants") + j = pd.Index(run.optimization.indexsets.get("j").elements, name="Markets") + a = create_parameter( + parameter=run.optimization.parameters.get("a"), + index=i, + name="capacity of plant i in cases", ) - b_parameter = create_parameter( - parameter=b, index=j_set, name="demand at market j in cases" + b = create_parameter( + parameter=run.optimization.parameters.get("b"), + index=j, + name="demand at market j in cases", ) - d_parameter = create_parameter( - parameter=d, index=[i_set, j_set], name="distance in thousands of miles" + d = create_parameter( + parameter=run.optimization.parameters.get("d"), + index=[i, j], + name="distance in thousands of miles", ) - f_scalar = f.value + f = run.optimization.scalars.get("f").value - c = d_parameter * f_scalar / 1000 + c = d * f / 1000 c.name = "transport cost in thousands of dollars per case" - x = m.add_variables( - lower=0.0, coords=[i_set, j_set], name="Shipment quantities in cases" - ) + x = m.add_variables(lower=0.0, coords=[i, j], name="Shipment quantities in cases") m.add_constraints( lhs=x.sum(dim="Markets"), sign="<=", - rhs=a_parameter, + rhs=a, name="Observe supply limit at plant i", ) m.add_constraints( lhs=x.sum(dim="Canning Plants"), sign=">=", - rhs=b_parameter, + rhs=b, name="Satisfy demand at market j", ) @@ -63,14 +59,14 @@ def create_dantzig_model( return m -def read_dantzig_solution( - model: linopy.Model, z: Variable, x: Variable, demand: Equation, supply: Equation -) -> None: +def read_dantzig_solution(model: linopy.Model, run: Run) -> None: # Handle objective # TODO adding fake marginals here until Variables don't require this column anymore # Can't add units if this column was not declared above. Better stored as Scalar # maybe? - z.add(data={"levels": [model.objective.value], "marginals": [-0.0]}) + run.optimization.variables.get("z").add( + data={"levels": [model.objective.value], "marginals": [-0.0]} + ) # Handle shipment quantities x_data: pd.DataFrame = model.solution.to_dataframe() @@ -86,7 +82,7 @@ def read_dantzig_solution( # x_data["units"] = "cases" # TODO Again setting fake marginals until they are optional for variables x_data["marginals"] = -0.0 - x.add(data=x_data) + run.optimization.variables.get("x").add(data=x_data) # The following don't seem to be typed correctly by linopy # Add supply data @@ -95,7 +91,7 @@ def read_dantzig_solution( "levels": model.constraints["Observe supply limit at plant i"].data.rhs, # type: ignore "marginals": model.constraints["Observe supply limit at plant i"].data.dual, # type: ignore } - supply.add(data=supply_data) + run.optimization.equations.get("supply").add(data=supply_data) # Add demand data demand_data = { @@ -103,4 +99,4 @@ def read_dantzig_solution( "levels": model.constraints["Satisfy demand at market j"].data.rhs, # type: ignore "marginals": model.constraints["Satisfy demand at market j"].data.dual, # type: ignore } - demand.add(data=demand_data) + run.optimization.equations.get("demand").add(data=demand_data) diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index 9471dbf6..29ff3aa6 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -372,10 +372,10 @@ " read_dantzig_solution,\n", ")\n", "\n", - "m = create_dantzig_model(i=i, j=j, a=a, b=b, d=d, f=f)\n", + "m = create_dantzig_model(run=run)\n", "m.solve(\"highs\")\n", "\n", - "read_dantzig_solution(model=m, z=z, x=x, demand=demand, supply=supply)" + "read_dantzig_solution(model=m, run=run)" ] }, { From 5a794672d312876ee174b0656b21f10820b72a93 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 12:24:54 +0200 Subject: [PATCH 20/50] Revert back to parameter.units and values as they are required --- tutorial/transport/py_transport.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index 29ff3aa6..1cbd245c 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -222,8 +222,8 @@ "a = run.optimization.parameters.create(name=\"a\", constrained_to_indexsets=[\"i\"])\n", "a_data = {\n", " \"i\": [\"seattle\", \"san-diego\"],\n", - " \"value\": [350, 600],\n", - " \"unit\": [cases.name, cases.name],\n", + " \"values\": [350, 600],\n", + " \"units\": [cases.name, cases.name],\n", "}\n", "a.add(data=a_data)\n", "\n", @@ -236,7 +236,7 @@ " [\"chicago\", 300, cases.name],\n", " [\"topeka\", 275, cases.name],\n", " ],\n", - " columns=[\"j\", \"value\", \"unit\"],\n", + " columns=[\"j\", \"values\", \"units\"],\n", ")\n", "b.add(b_data)" ] @@ -278,14 +278,14 @@ "d_data = {\n", " \"i\": [\"seattle\", \"seattle\", \"seattle\", \"san-diego\"],\n", " \"j\": [\"new-york\", \"chicago\", \"topeka\", \"new-york\"],\n", - " \"value\": [2.5, 1.7, 1.8, 2.5],\n", - " \"unit\": [km.name] * 4,\n", + " \"values\": [2.5, 1.7, 1.8, 2.5],\n", + " \"units\": [km.name] * 4,\n", "}\n", "d.add(d_data)\n", "\n", "# add other parameter data one by one\n", - "d.add({\"i\": [\"san-diego\"], \"j\": [\"chicago\"], \"value\": [1.8], \"unit\": [\"km\"]})\n", - "d.add({\"i\": [\"san-diego\"], \"j\": [\"topeka\"], \"value\": [1.4], \"unit\": [\"km\"]})" + "d.add({\"i\": [\"san-diego\"], \"j\": [\"chicago\"], \"values\": [1.8], \"units\": [\"km\"]})\n", + "d.add({\"i\": [\"san-diego\"], \"j\": [\"topeka\"], \"values\": [1.4], \"units\": [\"km\"]})" ] }, { From 5efa1cb3b561edbdce8df09523603c55b8a799de Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 14:32:41 +0200 Subject: [PATCH 21/50] Remove superfluous variable --- .../tutorial}/transport/dantzig_model_linopy.py | 0 tutorial/transport/py_transport.ipynb | 5 +---- 2 files changed, 1 insertion(+), 4 deletions(-) rename {tutorial => tests/tutorial}/transport/dantzig_model_linopy.py (100%) diff --git a/tutorial/transport/dantzig_model_linopy.py b/tests/tutorial/transport/dantzig_model_linopy.py similarity index 100% rename from tutorial/transport/dantzig_model_linopy.py rename to tests/tutorial/transport/dantzig_model_linopy.py diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index 1cbd245c..0509d4da 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -172,10 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "run.optimization.indexsets.create(\"j\").add([\"new-york\", \"chicago\", \"topeka\"])\n", - "\n", - "# Get the resulting indexset for later usage:\n", - "j = run.optimization.indexsets.get(\"j\")" + "run.optimization.indexsets.create(\"j\").add([\"new-york\", \"chicago\", \"topeka\"])" ] }, { From 04e60bc298d4c47edc7f856ae54532109b506274 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 11 Jul 2024 15:07:02 +0200 Subject: [PATCH 22/50] Add tests for transport tutorial * Exclude non-working one for now * Duplicate model file :( --- tests/tutorial/__init__.py | 0 tests/tutorial/transport/__init__.py | 0 .../transport/dantzig_model_linopy.py | 28 +++- .../transport/test_dantzig_model_linopy.py | 158 ++++++++++++++++++ tutorial/transport/dantzig_model_linopy.py | 102 +++++++++++ tutorial/transport/py_transport.ipynb | 9 + 6 files changed, 293 insertions(+), 4 deletions(-) create mode 100644 tests/tutorial/__init__.py create mode 100644 tests/tutorial/transport/__init__.py create mode 100644 tests/tutorial/transport/test_dantzig_model_linopy.py create mode 100644 tutorial/transport/dantzig_model_linopy.py diff --git a/tests/tutorial/__init__.py b/tests/tutorial/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/tutorial/transport/__init__.py b/tests/tutorial/transport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/tutorial/transport/dantzig_model_linopy.py b/tests/tutorial/transport/dantzig_model_linopy.py index 4bc15ddd..7785de92 100644 --- a/tests/tutorial/transport/dantzig_model_linopy.py +++ b/tests/tutorial/transport/dantzig_model_linopy.py @@ -88,15 +88,35 @@ def read_dantzig_solution(model: linopy.Model, run: Run) -> None: # Add supply data supply_data = { "i": ["seattle", "san-diego"], - "levels": model.constraints["Observe supply limit at plant i"].data.rhs, # type: ignore - "marginals": model.constraints["Observe supply limit at plant i"].data.dual, # type: ignore + "levels": [ + level + for level in model.constraints["Observe supply limit at plant i"] + .data.rhs.to_pandas() + .values + ], # type: ignore + "marginals": [ + marginal + for marginal in model.constraints["Observe supply limit at plant i"] + .data.dual.to_pandas() + .values + ], # type: ignore } run.optimization.equations.get("supply").add(data=supply_data) # Add demand data demand_data = { "j": ["new-york", "chicago", "topeka"], - "levels": model.constraints["Satisfy demand at market j"].data.rhs, # type: ignore - "marginals": model.constraints["Satisfy demand at market j"].data.dual, # type: ignore + "levels": [ + level + for level in model.constraints["Satisfy demand at market j"] + .data.rhs.to_pandas() + .values + ], # type: ignore + "marginals": [ + marginal + for marginal in model.constraints["Satisfy demand at market j"] + .data.dual.to_pandas() + .values + ], # type: ignore } run.optimization.equations.get("demand").add(data=demand_data) diff --git a/tests/tutorial/transport/test_dantzig_model_linopy.py b/tests/tutorial/transport/test_dantzig_model_linopy.py new file mode 100644 index 00000000..f25cec39 --- /dev/null +++ b/tests/tutorial/transport/test_dantzig_model_linopy.py @@ -0,0 +1,158 @@ +import pandas as pd +import xarray as xr + +from ixmp4 import Platform +from ixmp4.core import Run, Unit + +from ...utils import all_platforms +from .dantzig_model_linopy import ( + create_dantzig_model, + read_dantzig_solution, +) + + +def create_dantzig_run(mp: Platform) -> Run: + """Create a Run for the transport tutorial. + + Please see the tutorial file for explanation. + """ + # Only needed once for each mp + try: + cases = mp.units.get("cases") + km = mp.units.get("km") + unit_cost_per_case = mp.units.get("USD/km") + except Unit.NotFound: + cases = mp.units.create("cases") + km = mp.units.create("km") + unit_cost_per_case = mp.units.create("USD/km") + + # Create run and all data sets + run = mp.runs.create(model="transport problem", scenario="standard") + a_data = { + "i": ["seattle", "san-diego"], + "values": [350, 600], + "units": [cases.name, cases.name], + } + b_data = pd.DataFrame( + [ + ["new-york", 325, cases.name], + ["chicago", 300, cases.name], + ["topeka", 275, cases.name], + ], + columns=["j", "values", "units"], + ) + d_data = { + "i": ["seattle", "seattle", "seattle", "san-diego", "san-diego", "san-diego"], + "j": ["new-york", "chicago", "topeka", "new-york", "chicago", "topeka"], + "values": [2.5, 1.7, 1.8, 2.5, 1.8, 1.4], + "units": [km.name] * 6, + } + + # Add all data to the run + run.optimization.indexsets.create("i").add(["seattle", "san-diego"]) + run.optimization.indexsets.create("j").add(["new-york", "chicago", "topeka"]) + run.optimization.parameters.create(name="a", constrained_to_indexsets=["i"]).add( + data=a_data + ) + run.optimization.parameters.create("b", constrained_to_indexsets=["j"]).add( + data=b_data + ) + run.optimization.parameters.create("d", constrained_to_indexsets=["i", "j"]).add( + data=d_data + ) + run.optimization.scalars.create(name="f", value=90, unit=unit_cost_per_case) + + # Create further optimization items to store solution data + run.optimization.variables.create("z") + run.optimization.variables.create("x", constrained_to_indexsets=["i", "j"]) + run.optimization.equations.create("supply", constrained_to_indexsets=["i"]) + run.optimization.equations.create("demand", constrained_to_indexsets=["j"]) + + return run + + +@all_platforms +class TestTransportTutorialLinopy: + def test_create_dantzig_model(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = create_dantzig_run(test_mp) + model = create_dantzig_model(run) + + # Set expectations + expected = { + "supply_constraint_rhs": xr.DataArray([350.0, 600.0]), + "supply_constraint_sign": xr.DataArray(["<=", "<="]), + "demand_constraint_rhs": xr.DataArray([325.0, 300.0, 275.0]), + "demand_constraint_sign": xr.DataArray([">=", ">=", ">="]), + "objective_coeffs": xr.DataArray( + [0.162, 0.225, 0.126, 0.153, 0.225, 0.162] + ), + } + + assert model.variables["Shipment quantities in cases"].dims == ( + "Canning Plants", + "Markets", + ) + assert model.constraints["Observe supply limit at plant i"].coord_dims == ( + "Canning Plants", + ) + assert ( + model.constraints["Observe supply limit at plant i"].data.rhs.values + == expected["supply_constraint_rhs"] + ).all() + assert ( + model.constraints["Observe supply limit at plant i"].data.sign.values + == expected["supply_constraint_sign"] + ).all() + assert model.constraints["Satisfy demand at market j"].coord_dims == ( + "Markets", + ) + assert ( + model.constraints["Satisfy demand at market j"].data.rhs.values + == expected["demand_constraint_rhs"] + ).all() + assert ( + model.constraints["Satisfy demand at market j"].data.sign.values + == expected["demand_constraint_sign"] + ).all() + assert model.objective.sense == "min" + + # TODO Currently doesn't work though they should be equal + # assert ( + # model.objective.coeffs.to_pandas().values == expected["objective_coeffs"] + # ).all() + + def test_read_dantzig_solution(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + + # Could we store this as class attributes to avoid repetition? + run = create_dantzig_run(test_mp) + model = create_dantzig_model(run) + model.solve("highs") + read_dantzig_solution(model=model, run=run) + + # Assert what we want to show in the tutorial + assert run.optimization.variables.get("z").levels == [153.675] + assert run.optimization.variables.get("x").data == { + "i": [ + "seattle", + "seattle", + "seattle", + "san-diego", + "san-diego", + "san-diego", + ], + "j": ["new-york", "chicago", "topeka", "new-york", "chicago", "topeka"], + "levels": [0.0, 300.0, 0.0, 325.0, 0.0, 275.0], + "marginals": [-0.0, -0.0, -0.0, -0.0, -0.0, -0.0], + } + assert run.optimization.equations.get("demand").data == { + "j": ["new-york", "chicago", "topeka"], + "levels": [325.0, 300.0, 275.0], + "marginals": [0.225, 0.153, 0.126], + } + assert run.optimization.equations.get("supply").data == { + "i": ["seattle", "san-diego"], + "levels": [350.0, 600.0], + "marginals": [-0.0, -0.0], + } diff --git a/tutorial/transport/dantzig_model_linopy.py b/tutorial/transport/dantzig_model_linopy.py new file mode 100644 index 00000000..4bc15ddd --- /dev/null +++ b/tutorial/transport/dantzig_model_linopy.py @@ -0,0 +1,102 @@ +import linopy +import pandas as pd + +from ixmp4.core import Parameter, Run + + +def create_parameter( + parameter: Parameter, index: pd.Index | list[pd.Index], name: str +) -> pd.Series: + if isinstance(index, list): + index = pd.MultiIndex.from_product(index) + + return pd.Series(data=parameter.values, index=index, name=name) + + +def create_dantzig_model(run: Run) -> linopy.Model: + m = linopy.Model() + i = pd.Index(run.optimization.indexsets.get("i").elements, name="Canning Plants") + j = pd.Index(run.optimization.indexsets.get("j").elements, name="Markets") + a = create_parameter( + parameter=run.optimization.parameters.get("a"), + index=i, + name="capacity of plant i in cases", + ) + b = create_parameter( + parameter=run.optimization.parameters.get("b"), + index=j, + name="demand at market j in cases", + ) + d = create_parameter( + parameter=run.optimization.parameters.get("d"), + index=[i, j], + name="distance in thousands of miles", + ) + f = run.optimization.scalars.get("f").value + + c = d * f / 1000 + c.name = "transport cost in thousands of dollars per case" + + x = m.add_variables(lower=0.0, coords=[i, j], name="Shipment quantities in cases") + + m.add_constraints( + lhs=x.sum(dim="Markets"), + sign="<=", + rhs=a, + name="Observe supply limit at plant i", + ) + + m.add_constraints( + lhs=x.sum(dim="Canning Plants"), + sign=">=", + rhs=b, + name="Satisfy demand at market j", + ) + + obj = c.to_xarray() * x + m.add_objective(obj) + + return m + + +def read_dantzig_solution(model: linopy.Model, run: Run) -> None: + # Handle objective + # TODO adding fake marginals here until Variables don't require this column anymore + # Can't add units if this column was not declared above. Better stored as Scalar + # maybe? + run.optimization.variables.get("z").add( + data={"levels": [model.objective.value], "marginals": [-0.0]} + ) + + # Handle shipment quantities + x_data: pd.DataFrame = model.solution.to_dataframe() + x_data.reset_index(inplace=True) + x_data.rename( + columns={ + "Shipment quantities in cases": "levels", + "Canning Plants": "i", + "Markets": "j", + }, + inplace=True, + ) + # x_data["units"] = "cases" + # TODO Again setting fake marginals until they are optional for variables + x_data["marginals"] = -0.0 + run.optimization.variables.get("x").add(data=x_data) + + # The following don't seem to be typed correctly by linopy + # Add supply data + supply_data = { + "i": ["seattle", "san-diego"], + "levels": model.constraints["Observe supply limit at plant i"].data.rhs, # type: ignore + "marginals": model.constraints["Observe supply limit at plant i"].data.dual, # type: ignore + } + run.optimization.equations.get("supply").add(data=supply_data) + + # Add demand data + demand_data = { + "j": ["new-york", "chicago", "topeka"], + "levels": model.constraints["Satisfy demand at market j"].data.rhs, # type: ignore + "marginals": model.constraints["Satisfy demand at market j"].data.dual, # type: ignore + } + run.optimization.equations.get("demand").add(data=demand_data) diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/py_transport.ipynb index 0509d4da..7a195962 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/py_transport.ipynb @@ -421,6 +421,15 @@ "# display the quantities and marginals (shadow prices) of the supply balance constraints\n", "supply.data" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.objective.coeffs.to_pandas().values" + ] } ], "metadata": { From c380dee090c4102d2b24d98358978ebf616b20f5 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 24 Jul 2024 13:28:15 +0200 Subject: [PATCH 23/50] Install tutorial dependencies for this PR --- .github/workflows/pytest.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index e8cb5c5b..9f871f34 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -107,7 +107,7 @@ jobs: #------------------------------------------------ - name: Install dependencies if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - run: poetry install --no-interaction --no-root --with dev,server + run: poetry install --no-interaction --no-root --with dev,server,tutorial - name: Install PyArrow if: ${{ matrix.with-pyarrow }} @@ -174,7 +174,7 @@ jobs: #------------------------------------------------ - name: Install dependencies if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - run: poetry install --no-interaction --no-root --with dev,server + run: poetry install --no-interaction --no-root --with dev,server,tutorial #------------------------ # install root project From 53925ddd12066ba9d6264a2860136798c428b997 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 24 Jul 2024 14:18:12 +0200 Subject: [PATCH 24/50] Allow error tolerance to pass test --- .../transport/test_dantzig_model_linopy.py | 45 ++++++++++++------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/tests/tutorial/transport/test_dantzig_model_linopy.py b/tests/tutorial/transport/test_dantzig_model_linopy.py index f25cec39..e3755bc8 100644 --- a/tests/tutorial/transport/test_dantzig_model_linopy.py +++ b/tests/tutorial/transport/test_dantzig_model_linopy.py @@ -1,3 +1,4 @@ +import numpy as np import pandas as pd import xarray as xr @@ -80,13 +81,14 @@ def test_create_dantzig_model(self, test_mp, request): # Set expectations expected = { - "supply_constraint_rhs": xr.DataArray([350.0, 600.0]), "supply_constraint_sign": xr.DataArray(["<=", "<="]), - "demand_constraint_rhs": xr.DataArray([325.0, 300.0, 275.0]), "demand_constraint_sign": xr.DataArray([">=", ">=", ">="]), - "objective_coeffs": xr.DataArray( - [0.162, 0.225, 0.126, 0.153, 0.225, 0.162] - ), + # TODO enable this once #95 is merged; allows removal of xarray from file + # "supply_constraint_sign": np.array(["<=", "<="]), + # "demand_constraint_sign": np.array([">=", ">=", ">="]), + "supply_constraint_rhs": pd.Series([350.0, 600.0]), + "demand_constraint_rhs": pd.Series([325.0, 300.0, 275.0]), + "objective_coeffs": pd.Series([0.162, 0.225, 0.126, 0.153, 0.225, 0.162]), } assert model.variables["Shipment quantities in cases"].dims == ( @@ -96,31 +98,40 @@ def test_create_dantzig_model(self, test_mp, request): assert model.constraints["Observe supply limit at plant i"].coord_dims == ( "Canning Plants", ) - assert ( - model.constraints["Observe supply limit at plant i"].data.rhs.values - == expected["supply_constraint_rhs"] - ).all() + assert np.allclose( + model.constraints["Observe supply limit at plant i"].data.rhs.values, + expected["supply_constraint_rhs"], + ) assert ( model.constraints["Observe supply limit at plant i"].data.sign.values == expected["supply_constraint_sign"] ).all() + # TODO enable this once #95 is merged + # assert np.strings.equal( + # model.constraints["Observe supply limit at plant i"].data.sign.values, + # expected["supply_constraint_sign"], + # ).all() assert model.constraints["Satisfy demand at market j"].coord_dims == ( "Markets", ) - assert ( - model.constraints["Satisfy demand at market j"].data.rhs.values - == expected["demand_constraint_rhs"] - ).all() + assert np.allclose( + model.constraints["Satisfy demand at market j"].data.rhs.values, + expected["demand_constraint_rhs"], + ) assert ( model.constraints["Satisfy demand at market j"].data.sign.values == expected["demand_constraint_sign"] ).all() + # TODO enable this once #95 is merged + # assert np.strings.equal( + # model.constraints["Satisfy demand at market j"].data.sign.values, + # expected["demand_constraint_sign"], + # ).all() assert model.objective.sense == "min" - # TODO Currently doesn't work though they should be equal - # assert ( - # model.objective.coeffs.to_pandas().values == expected["objective_coeffs"] - # ).all() + assert np.allclose( + model.objective.coeffs.to_pandas(), expected["objective_coeffs"] + ) def test_read_dantzig_solution(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore From 4f65e6b801ad41c235d56261fc031c94e4f26351 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 24 Jul 2024 14:25:16 +0200 Subject: [PATCH 25/50] Allow linopy version with numpy pin :( --- poetry.lock | 20 ++++++++------------ pyproject.toml | 3 ++- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0354024a..ea4f5381 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1345,19 +1345,21 @@ files = [ [[package]] name = "linopy" -version = "0.1.dev1049+g5e0c68e" +version = "0.3.13" description = "Linear optimization with N-D labeled arrays in Python" optional = false python-versions = ">=3.9" -files = [] -develop = true +files = [ + {file = "linopy-0.3.13-py3-none-any.whl", hash = "sha256:7c6a9e012cdbcc25901686bcc5d5b9fb70a4ad03e61529fb3d1b72339c4b0378"}, + {file = "linopy-0.3.13.tar.gz", hash = "sha256:aacc5868136818952bf3855f733b0384f94c13038f29420485d832e2089b4154"}, +] [package.dependencies] bottleneck = "*" dask = ">=0.18.0" deprecation = "*" numexpr = "*" -numpy = "*" +numpy = "<2.0" polars = "*" scipy = "*" toolz = "*" @@ -1366,15 +1368,9 @@ xarray = ">=2024.2.0" [package.extras] dev = ["gurobipy", "highspy", "netcdf4", "paramiko", "pre-commit", "pytest", "pytest-cov", "types-paramiko"] -docs = ["gurobipy (==11.0.2)", "ipykernel (==6.29.5)", "ipython (==8.26.0)", "matplotlib (==3.9.1)", "nbsphinx (==0.9.4)", "nbsphinx-link (==1.3.0)", "numpydoc (==1.7.0)", "sphinx (==7.3.7)", "sphinx_book_theme (==1.1.3)", "sphinx_rtd_theme (==2.0.0)"] +docs = ["gurobipy (==11.0.2)", "ipykernel (==6.29.5)", "ipython (==8.26.0)", "matplotlib (==3.9.1)", "nbsphinx (==0.9.4)", "nbsphinx-link (==1.3.0)", "numpydoc (==1.7.0)", "sphinx (==7.3.7)", "sphinx-book-theme (==1.1.3)", "sphinx-rtd-theme (==2.0.0)"] solvers = ["coptpy", "cplex", "gurobipy", "highspy (>=1.5.0)", "highspy (>=1.7.1)", "mindoptpy", "mosek", "pyscipopt", "xpress"] -[package.source] -type = "git" -url = "git@github.com:glatterf42/linopy.git" -reference = "enh/remove-numpy-pin" -resolved_reference = "5e0c68e5daae4d77b1879090760023677866da62" - [[package]] name = "locket" version = "1.0.0" @@ -4174,4 +4170,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10, <3.13" -content-hash = "418adafb7d89dadea27dcc452170f7b7d632a778d927ce9fe4e17a439839391b" +content-hash = "064d839ad7e0edd3efec61b76962ea03827acef8c0fd0b6094750304f607948e" diff --git a/pyproject.toml b/pyproject.toml index f9216dc3..47991264 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,7 +73,8 @@ optional = true [tool.poetry.group.tutorial.dependencies] ipykernel = ">=6.27.1" -linopy = {git = "git@github.com:glatterf42/linopy.git", rev = "enh/remove-numpy-pin", develop = true} +# linopy = {git = "git@github.com:glatterf42/linopy.git", rev = "enh/remove-numpy-pin", develop = true} +linopy = ">=0.3.10" highspy = ">=1.7.2" [tool.poetry.scripts] From fcf866b9c39c4878939e8cc37652425e76e79903 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 2 Aug 2024 13:28:52 +0200 Subject: [PATCH 26/50] Exclude untyped linopy from mypy --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 47991264..c790b9d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,7 +73,6 @@ optional = true [tool.poetry.group.tutorial.dependencies] ipykernel = ">=6.27.1" -# linopy = {git = "git@github.com:glatterf42/linopy.git", rev = "enh/remove-numpy-pin", develop = true} linopy = ">=0.3.10" highspy = ">=1.7.2" @@ -97,7 +96,7 @@ implicit_reexport = true plugins = ['sqlalchemy.ext.mypy.plugin'] [[tool.mypy.overrides]] -module = ["pandas", "uvicorn.workers", "sqlalchemy_utils"] +module = ["linopy", "pandas", "uvicorn.workers", "sqlalchemy_utils"] ignore_missing_imports = true [tool.ruff] From 38806cf4cea020a1a854c3e975de11302d0e5042 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 2 Aug 2024 13:42:03 +0200 Subject: [PATCH 27/50] Rename transport file for consistency --- ...transport.ipynb => linopy_transport.ipynb} | 117 +++++++++++------- 1 file changed, 71 insertions(+), 46 deletions(-) rename tutorial/transport/{py_transport.ipynb => linopy_transport.ipynb} (75%) diff --git a/tutorial/transport/py_transport.ipynb b/tutorial/transport/linopy_transport.ipynb similarity index 75% rename from tutorial/transport/py_transport.ipynb rename to tutorial/transport/linopy_transport.ipynb index 7a195962..17ace268 100644 --- a/tutorial/transport/py_transport.ipynb +++ b/tutorial/transport/linopy_transport.ipynb @@ -11,12 +11,12 @@ "## Aim and scope of the tutorial\n", "\n", "This tutorial takes you through the steps to import the data for a very simple optimization model\n", - "and solve it using the **ixmp4**-GAMS interface.\n", + "and solve it using the **ixmp4**-linopy interface.\n", "\n", - "We use Dantzig's transport problem, which is also used as the standard GAMS tutorial.\n", + "We use Dantzig's transport problem, which is also used as a [tutorial for linopy](https://linopy.readthedocs.io/en/latest/transport-tutorial.html).\n", "This problem finds a least cost shipping schedule that meets requirements at markets and supplies at factories.\n", "\n", - "If you are not familiar with GAMS, please take a minute to look at the [transport.gms](transport.gms) code.\n", + "If you are more familiar with GAMS, you can find the implementation in [transport.gms](transport.gms).\n", "\n", "For reference of the transport problem, see:\n", "> Dantzig, G B, Chapter 3.3. In Linear Programming and Extensions. \n", @@ -32,22 +32,22 @@ "\n", "The steps in the tutorial are the following:\n", "\n", - "0. Launch an **ixmp4.Platform** instance and initialize a new **ixmp4.Run**\n", - "0. Define the **sets and parameters** in the scenario and save the data to the platform\n", - "0. Initialize **variables and equations** to import the solution from GAMS\n", - "0. Call GAMS to **solve the scenario** (export to GAMS input gdx, execute, read solution from output gdx)\n", - "0. Display the **solution** (variables and equation)" + "0. Launch an `ixmp4.Platform` instance and initialize a new `ixmp4.Run`.\n", + "0. Define the `Set`s and `Parameter`s in the scenario and save the data to the platform.\n", + "0. Initialize `Variable`s and `Equation`s to import the solution from GAMS.\n", + "0. Call GAMS to **solve the scenario** (export to GAMS input gdx, execute, read solution from output gdx).\n", + "0. Display the **solution** (variables and equations)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Launching the *Platform* and initializing a new *Run*\n", + "## Launching the `Platform` and initializing a new `Run`\n", "\n", - "A **Platform** is the connection to the database that holds all data and relevant additional information.\n", + "A `Platform` is the connection to the database that holds all data and relevant additional information.\n", "\n", - "A **Run** is an object that holds all relevant information for one quantification of a scenario. \n", + "A `Run` is an object that holds all relevant information for one quantification of a scenario. \n", "A run is identified by a model name, a scenario name and a version number (assigned automatically)." ] }, @@ -67,7 +67,7 @@ "ixmp4 platforms list\n", "```\n", "\n", - "After creating the database, you can connect to it via an **ixmp4.Platform** instance." + "After creating the database, you can connect to it via an `ixmp4.Platform` instance." ] }, { @@ -85,7 +85,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now, we initialize a new **ixmp4.Run** in the database. This is done by using the argument *version=\"new\"*." + "Now, we initialize a new `ixmp4.Run` in the database." ] }, { @@ -101,10 +101,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Defining the *IndexSets*\n", + "## Defining the `IndexSet`s\n", "\n", - "An **IndexSet** defines a named list of elements. These IndexSets can be used for \"indexed assignment\" of parameters, variables and equations. \n", - "In database-lingo, a column of a parameter can be \"foreign-keyed\" onto an IndexSet.\n", + "An `IndexSet` defines a list of elements with a name. These `IndexSet`s can be used for \"indexed assignment\" of parameters, variables and equations. \n", + "The entries of these parameters, etc. are then validated against the elements of the linked `IndexSet`(s). \n", + "In database terms, a column of a parameter can be \"foreign-keyed\" onto an `IndexSet`.\n", "\n", "Below, we first show the data as they would be written in the GAMS tutorial ([transport.gms](transport.gms) in this folder). " ] @@ -131,7 +132,15 @@ "metadata": {}, "outputs": [], "source": [ - "i = run.optimization.indexsets.create(\"i\")" + "i = run.optimization.indexsets.create(\"i\")\n", + "i.add([\"seattle\", \"san-diego\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can display the elements of any `IndexSet` as a Python list:" ] }, { @@ -140,14 +149,14 @@ "metadata": {}, "outputs": [], "source": [ - "i.add([\"seattle\", \"san-diego\"])" + "i.elements" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We can display the elements of **IndexSet i** as a Python list." + "`IndexSet`s can be notated with documentation strings to record their meaning. These strings can then be used by linopy, too! " ] }, { @@ -156,14 +165,24 @@ "metadata": {}, "outputs": [], "source": [ - "i.elements" + "# TODO We would need to implement this first, though, probably setting pd.Series.name\n", + "# to it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i.docs = \"Canning Plants\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "For simplicity, the steps of creating an **IndexSet** and assigning elements can be done in one line." + "For simplicity, the steps of creating an `IndexSet` and assigning elements can be done in one line." ] }, { @@ -179,9 +198,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Assigning the *Parameters*\n", + "## Assigning the `Parameter`s\n", "\n", - "Next, we define the parameters *capacity* and *demand*. The parameters are assigned on the IndexSets *i* and *j*, respectively." + "Next, we define the parameters *capacity* and *demand*. The parameters are assigned on the indexsets *i* and *j*, respectively." ] }, { @@ -208,14 +227,14 @@ "\n", "from ixmp4.core import Unit\n", "\n", - "# Only needed once for each mp\n", + "# We only need to (and can!) define units once for each Platform\n", "try:\n", " cases = platform.units.get(\"cases\")\n", "except Unit.NotFound:\n", " cases = platform.units.create(\"cases\")\n", "\n", - "# capacity of plant i in cases\n", - "# add parameter data as a dict\n", + "# Capacity of plant i in cases\n", + "# Parameter data can be a dict...\n", "a = run.optimization.parameters.create(name=\"a\", constrained_to_indexsets=[\"i\"])\n", "a_data = {\n", " \"i\": [\"seattle\", \"san-diego\"],\n", @@ -224,9 +243,9 @@ "}\n", "a.add(data=a_data)\n", "\n", - "# demand at market j in cases\n", - "# add parameter data as a pd.DataFrame\n", + "# Demand at market j in cases\n", "b = run.optimization.parameters.create(\"b\", constrained_to_indexsets=\"j\")\n", + "# ... or a pd.DataFrame\n", "b_data = pd.DataFrame(\n", " [\n", " [\"new-york\", 325, cases.name],\n", @@ -238,13 +257,21 @@ "b.add(b_data)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice how the `parameter.data` has three columns but has only been linked to one `IndexSet`? That's on purpose: Every `Parameter` needs to have (the columns) *values* and *units*, but these cannot be constrained to an `IndexSet`. The value(s) can be any number(s), but the units need to be known to the `Platform`.\n", + "\n", + "Here's how to access `parameter.data` to e.g. quickly confirm that *b* is set correctly:" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# And this is how e.g. b looks:\n", "b.data" ] }, @@ -269,9 +296,9 @@ "except Unit.NotFound:\n", " km = platform.units.create(\"km\")\n", "\n", - "# distance in thousands of miles\n", + "# Distance in thousands of miles\n", "d = run.optimization.parameters.create(\"d\", constrained_to_indexsets=[\"i\", \"j\"])\n", - "# add more parameter data as dict\n", + "# You can start with some data ...\n", "d_data = {\n", " \"i\": [\"seattle\", \"seattle\", \"seattle\", \"san-diego\"],\n", " \"j\": [\"new-york\", \"chicago\", \"topeka\", \"new-york\"],\n", @@ -280,11 +307,18 @@ "}\n", "d.add(d_data)\n", "\n", - "# add other parameter data one by one\n", + "# ... and expand it later on:\n", "d.add({\"i\": [\"san-diego\"], \"j\": [\"chicago\"], \"values\": [1.8], \"units\": [\"km\"]})\n", "d.add({\"i\": [\"san-diego\"], \"j\": [\"topeka\"], \"values\": [1.4], \"units\": [\"km\"]})" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Every time you add data, though, **all** columns must be present!" + ] + }, { "cell_type": "raw", "metadata": {}, @@ -313,7 +347,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Defining variables and equations in the scenario\n", + "### Defining `Variable`s and `Equation`s in the scenario\n", "\n", "The levels and marginals of these variables and equations will be imported to the scenario when reading the model solution." ] @@ -388,7 +422,7 @@ "metadata": {}, "outputs": [], "source": [ - "# display the objective value of the solution\n", + "# Display the objective value of the solution\n", "z.levels" ] }, @@ -398,7 +432,7 @@ "metadata": {}, "outputs": [], "source": [ - "# display the quantities transported from canning plants to demand locations\n", + "# Display the quantities transported from canning plants to demand locations\n", "x.data" ] }, @@ -408,7 +442,7 @@ "metadata": {}, "outputs": [], "source": [ - "# display the quantities and marginals (shadow prices) of the demand balance constraints\n", + "# Display the quantities and marginals (shadow prices) of the demand balance constraints\n", "demand.data" ] }, @@ -418,18 +452,9 @@ "metadata": {}, "outputs": [], "source": [ - "# display the quantities and marginals (shadow prices) of the supply balance constraints\n", + "# Display the quantities and marginals (shadow prices) of the supply balance constraints\n", "supply.data" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "m.objective.coeffs.to_pandas().values" - ] } ], "metadata": { From a7fa6ab7bc137749cd1ce749620af8e99e382094 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 12 Aug 2024 12:25:15 +0200 Subject: [PATCH 28/50] Shorten util function --- ixmp4/data/db/optimization/utils.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ixmp4/data/db/optimization/utils.py b/ixmp4/data/db/optimization/utils.py index 08c5c721..d602a22d 100644 --- a/ixmp4/data/db/optimization/utils.py +++ b/ixmp4/data/db/optimization/utils.py @@ -10,13 +10,10 @@ def collect_indexsets_to_check( columns: list["Column"], -) -> dict[str, Any]: +) -> dict[str, list[float | int | str]]: """Creates a {key:value} dict from linked Column.names and their IndexSet.elements.""" - collection: dict[str, Any] = {} - for column in columns: - collection[column.name] = column.indexset.elements - return collection + return {column.name: column.indexset.elements for column in columns} def validate_data(host: base.BaseModel, data: dict[str, Any], columns: list["Column"]): From 9581ec59a4ac7dd85d26c8f86cc75e56f4f9e458 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Tue, 27 Aug 2024 13:34:12 +0200 Subject: [PATCH 29/50] Introduce equation.remove_data() --- ixmp4/data/db/optimization/equation/repository.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ixmp4/data/db/optimization/equation/repository.py b/ixmp4/data/db/optimization/equation/repository.py index efe12fef..2627b5ed 100644 --- a/ixmp4/data/db/optimization/equation/repository.py +++ b/ixmp4/data/db/optimization/equation/repository.py @@ -184,3 +184,11 @@ def remove_data(self, equation_id: int) -> None: # TODO Is there a better way to reset .data? equation.data = {} self.session.commit() + + @guard("edit") + def remove_data(self, equation_id: int) -> None: + equation = self.get_by_id(id=equation_id) + # TODO Is there a better way to reset .data? + equation.data = {} + self.session.add(equation) + self.session.commit() From f21879e1cef3f237df155ec2d2c66f50605520fd Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Tue, 27 Aug 2024 13:35:03 +0200 Subject: [PATCH 30/50] Introduce variable.remove_data() --- ixmp4/data/db/optimization/variable/repository.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ixmp4/data/db/optimization/variable/repository.py b/ixmp4/data/db/optimization/variable/repository.py index 73f88aa9..285d2b48 100644 --- a/ixmp4/data/db/optimization/variable/repository.py +++ b/ixmp4/data/db/optimization/variable/repository.py @@ -196,3 +196,11 @@ def remove_data(self, variable_id: int) -> None: # TODO Is there a better way to reset .data? variable.data = {} self.session.commit() + + @guard("edit") + def remove_data(self, variable_id: int) -> None: + variable = self.get_by_id(id=variable_id) + # TODO Is there a better way to reset .data? + variable.data = {} + self.session.add(variable) + self.session.commit() From 5643bbeffbcf49e1d54d54a4453a19588294cf41 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 28 Aug 2024 10:39:56 +0200 Subject: [PATCH 31/50] Prefer None over Never for type hints --- ixmp4/core/optimization/variable.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ixmp4/core/optimization/variable.py b/ixmp4/core/optimization/variable.py index fff31ac1..cc14a337 100644 --- a/ixmp4/core/optimization/variable.py +++ b/ixmp4/core/optimization/variable.py @@ -1,4 +1,3 @@ -import sys from datetime import datetime from typing import Any, ClassVar, Iterable @@ -10,11 +9,6 @@ from ixmp4.data.abstract import Run from ixmp4.data.abstract.optimization import Column -if sys.version_info >= (3, 11): - from typing import Never -else: - from typing import NoReturn as Never - class Variable(BaseModelFacade): _model: VariableModel From d9f5903d1da38dd221432e571209a2120d065501 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 28 Aug 2024 10:47:18 +0200 Subject: [PATCH 32/50] Make run.get_by_id() available in all backends --- ixmp4/data/abstract/run.py | 20 ++++++++++++++++++++ ixmp4/data/api/run.py | 4 ++++ ixmp4/server/rest/run.py | 8 ++++++++ tests/data/test_run.py | 6 +++++- 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/ixmp4/data/abstract/run.py b/ixmp4/data/abstract/run.py index f5a5f841..7134c156 100644 --- a/ixmp4/data/abstract/run.py +++ b/ixmp4/data/abstract/run.py @@ -143,6 +143,26 @@ def get_default_version( """ ... + def get_by_id(self, id: int) -> Run: + """Retrieves a Run by its id. + + Parameters + ---------- + id : int + Unique integer id. + + Raises + ------ + :class:`ixmp4.data.abstract.Run.NotFound`. + If the Run with `id` does not exist. + + Returns + ------- + :class:`ixmp4.data.abstract.Run`: + The retrieved Run. + """ + ... + def list( self, *, diff --git a/ixmp4/data/api/run.py b/ixmp4/data/api/run.py index 9b1485ec..beb05d91 100644 --- a/ixmp4/data/api/run.py +++ b/ixmp4/data/api/run.py @@ -50,6 +50,10 @@ def get(self, model_name: str, scenario_name: str, version: int) -> Run: is_default=None, ) + def get_by_id(self, id: int) -> Run: + res = self._get_by_id(id) + return Run(**res) + def enumerate(self, **kwargs) -> list[Run] | pd.DataFrame: return super().enumerate(**kwargs) diff --git a/ixmp4/server/rest/run.py b/ixmp4/server/rest/run.py index 19af8057..f6aede5f 100644 --- a/ixmp4/server/rest/run.py +++ b/ixmp4/server/rest/run.py @@ -60,3 +60,11 @@ def unset_as_default_version( backend: Backend = Depends(deps.get_backend), ): backend.runs.unset_as_default_version(id) + + +@router.get("/{id}/", response_model=api.Run) +def get_by_id( + id: int, + backend: Backend = Depends(deps.get_backend), +): + return backend.runs.get_by_id(id) diff --git a/tests/data/test_run.py b/tests/data/test_run.py index ae991a4e..78f5e7ab 100644 --- a/tests/data/test_run.py +++ b/tests/data/test_run.py @@ -55,10 +55,14 @@ def test_get_or_create_run(self, platform: ixmp4.Platform): run3 = platform.backend.runs.get_or_create("Model", "Scenario") assert run1.id == run3.id + def test_get_run_by_id(self, platform: ixmp4.Platform): + expected = platform.backend.runs.create("Model", "Scenario") + result = platform.backend.runs.get_by_id(id=expected.id) + assert expected.id == result.id + def test_list_run(self, platform: ixmp4.Platform): run1 = platform.backend.runs.create("Model", "Scenario") platform.backend.runs.create("Model", "Scenario") - runs = platform.backend.runs.list(default_only=False) assert runs[0].id == 1 assert runs[0].version == 1 From 3543e10c9f04f655022e94ac849b377d56702f62 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 28 Aug 2024 11:42:06 +0200 Subject: [PATCH 33/50] Introduce run.opt.remove_solution() --- ixmp4/core/optimization/data.py | 6 ++++++ ixmp4/core/optimization/equation.py | 1 + tests/core/test_run.py | 31 +++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/ixmp4/core/optimization/data.py b/ixmp4/core/optimization/data.py index 69c18ee0..48be29a3 100644 --- a/ixmp4/core/optimization/data.py +++ b/ixmp4/core/optimization/data.py @@ -28,3 +28,9 @@ def __init__(self, *args, run: Run, **kwargs) -> None: self.scalars = ScalarRepository(_backend=self.backend, _run=run) self.tables = TableRepository(_backend=self.backend, _run=run) self.variables = VariableRepository(_backend=self.backend, _run=run) + + def remove_solution(self) -> None: + for equation in self.equations.list(): + equation.remove_data() + for variable in self.variables.list(): + variable.remove_data() diff --git a/ixmp4/core/optimization/equation.py b/ixmp4/core/optimization/equation.py index 638bf7f0..f49ec897 100644 --- a/ixmp4/core/optimization/equation.py +++ b/ixmp4/core/optimization/equation.py @@ -32,6 +32,7 @@ def data(self) -> dict[str, Any]: return self._model.data def add(self, data: dict[str, Any] | pd.DataFrame) -> None: + # TODO change to "to the Equation" """Adds data to an existing Equation.""" self.backend.optimization.equations.add_data( equation_id=self._model.id, data=data diff --git a/tests/core/test_run.py b/tests/core/test_run.py index 24886bf0..fd112179 100644 --- a/tests/core/test_run.py +++ b/tests/core/test_run.py @@ -174,3 +174,34 @@ def delete_all_datapoints(self, run: ixmp4.Run): run.iamc.remove(cat, type=ixmp4.DataPoint.Type.CATEGORICAL) if not datetime.empty: run.iamc.remove(datetime, type=ixmp4.DataPoint.Type.DATETIME) + + def test_run_remove_solution(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + indexset.add(["foo", "bar"]) + test_data = { + "Indexset": ["bar", "foo"], + "levels": [2.0, 1], + "marginals": [0, "test"], + } + equation = run.optimization.equations.create( + "Equation", + constrained_to_indexsets=[indexset.name], + ) + equation.add(test_data) + variable = run.optimization.variables.create( + "Variable", + constrained_to_indexsets=[indexset.name], + ) + variable.add(test_data) + + run.optimization.remove_solution() + # TODO Why is it necessary in the API run to get them again? + # Idea: core-equations.list() returns same equations, but API different ones? + # No, equations.list() always returns different objects. + # So run.opt.remove_solution() should be affecting different objects always. + # Why does it work when connecting directly to the DB in the first place? + equation = run.optimization.equations.get("Equation") + variable = run.optimization.variables.get("Variable") + assert equation.data == {} + assert variable.data == {} From 1ad1251fcf014c68bffead1628780789b63700cd Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 08:46:36 +0200 Subject: [PATCH 34/50] Remove superfluous check for scalar.unit --- ixmp4/core/optimization/scalar.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ixmp4/core/optimization/scalar.py b/ixmp4/core/optimization/scalar.py index b4262d80..c2767fcb 100644 --- a/ixmp4/core/optimization/scalar.py +++ b/ixmp4/core/optimization/scalar.py @@ -44,9 +44,7 @@ def unit(self): @unit.setter def unit(self, unit: str | Unit): - if isinstance(unit, Unit): - unit = unit - else: + if not isinstance(unit, Unit): unit_model = self.backend.units.get(unit) unit = Unit(_backend=self.backend, _model=unit_model) self._model = self.backend.optimization.scalars.update( From 41ccf98df571bcb7b2a8f797cedd433fa4cd9aa8 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 08:49:05 +0200 Subject: [PATCH 35/50] Introduce core.runs.clone() --- ixmp4/core/run.py | 60 +++++++++++++++++++++++ tests/core/test_run.py | 108 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 155 insertions(+), 13 deletions(-) diff --git a/ixmp4/core/run.py b/ixmp4/core/run.py index 447deb81..c980e84a 100644 --- a/ixmp4/core/run.py +++ b/ixmp4/core/run.py @@ -95,6 +95,66 @@ def tabulate(self, default_only: bool = True, **kwargs) -> pd.DataFrame: runs["scenario"] = runs["scenario__id"].map(self.backend.scenarios.map()) return runs[["id", "model", "scenario", "version", "is_default"]] + def clone( + self, + run_id: int, + model: str | None = None, + scenario: str | None = None, + keep_solution: bool = True, + ) -> Run: + base_run = Run( + _backend=self.backend, _model=self.backend.runs.get_by_id(run_id) + ) + run = Run( + _backend=self.backend, + _model=self.backend.runs.create( + model if model else base_run.model.name, + scenario if scenario else base_run.scenario.name, + ), + ) + run.iamc.add(df=base_run.iamc.tabulate()) + for scalar in base_run.optimization.scalars.list(): + run.optimization.scalars.create( + name=scalar.name, + value=scalar.value, + unit=self.backend.units.get(scalar.unit.name).name, + ) + for indexset in base_run.optimization.indexsets.list(): + run.optimization.indexsets.create(name=indexset.name).add( + elements=indexset.elements + ) + for table in base_run.optimization.tables.list(): + run.optimization.tables.create( + name=table.name, + constrained_to_indexsets=table.constrained_to_indexsets, + column_names=[column.name for column in table.columns], + ).add(data=table.data) + for parameter in base_run.optimization.parameters.list(): + run.optimization.parameters.create( + name=parameter.name, + constrained_to_indexsets=parameter.constrained_to_indexsets, + column_names=[column.name for column in parameter.columns], + ).add(data=parameter.data) + for equation in base_run.optimization.equations.list(): + cloned_equation = run.optimization.equations.create( + name=equation.name, + constrained_to_indexsets=equation.constrained_to_indexsets, + column_names=[column.name for column in equation.columns], + ) + if keep_solution: + cloned_equation.add(data=equation.data) + for variable in base_run.optimization.variables.list(): + cloned_variable = run.optimization.variables.create( + name=variable.name, + constrained_to_indexsets=variable.constrained_to_indexsets, + column_names=[column.name for column in variable.columns] + if variable.columns + else None, + ) + if keep_solution: + cloned_variable.add(data=variable.data) + return run + class RunMetaFacade(BaseFacade, UserDict): run: RunModel diff --git a/tests/core/test_run.py b/tests/core/test_run.py index fd112179..31b6a2e9 100644 --- a/tests/core/test_run.py +++ b/tests/core/test_run.py @@ -3,10 +3,10 @@ import pytest import ixmp4 -from ixmp4.core import Run +from ixmp4 import Run from ixmp4.core.exceptions import IxmpError -from ..fixtures import FilterIamcDataset +from ..fixtures import FilterIamcDataset, SmallIamcDataset def _expected_runs_table(*row_default): @@ -20,8 +20,44 @@ def _expected_runs_table(*row_default): ) +def assert_cloned_run(original: Run, clone: Run, kept_solution: bool) -> None: + pdt.assert_frame_equal(original.iamc.tabulate(), clone.iamc.tabulate()) + for original_indexset, cloned_indexset in zip( + original.optimization.indexsets.list(), clone.optimization.indexsets.list() + ): + assert original_indexset.name == cloned_indexset.name + assert original_indexset.elements == cloned_indexset.elements + for original_scalar, cloned_scalar in zip( + original.optimization.scalars.list(), clone.optimization.scalars.list() + ): + assert original_scalar.name == cloned_scalar.name + assert original_scalar.value == cloned_scalar.value + assert original_scalar.unit.name == cloned_scalar.unit.name + for original_table, cloned_table in zip( + original.optimization.tables.list(), clone.optimization.tables.list() + ): + assert original_table.name == cloned_table.name + assert original_table.data == cloned_table.data + for original_parameter, cloned_parameter in zip( + original.optimization.parameters.list(), clone.optimization.parameters.list() + ): + assert original_parameter.name == cloned_parameter.name + assert original_parameter.data == cloned_parameter.data + for original_equation, cloned_equation in zip( + original.optimization.equations.list(), clone.optimization.equations.list() + ): + assert original_equation.name == cloned_equation.name + assert cloned_equation.data == (original_equation.data if kept_solution else {}) + for original_variable, cloned_variable in zip( + original.optimization.variables.list(), clone.optimization.variables.list() + ): + assert original_variable.name == cloned_variable.name + assert cloned_variable.data == (original_variable.data if kept_solution else {}) + + class TestCoreRun: filter = FilterIamcDataset() + small = SmallIamcDataset def test_run_notfound(self, platform: ixmp4.Platform): # no Run with that model and scenario name exists @@ -184,24 +220,70 @@ def test_run_remove_solution(self, platform: ixmp4.Platform): "levels": [2.0, 1], "marginals": [0, "test"], } - equation = run.optimization.equations.create( + run.optimization.equations.create( "Equation", constrained_to_indexsets=[indexset.name], - ) - equation.add(test_data) - variable = run.optimization.variables.create( + ).add(test_data) + run.optimization.variables.create( "Variable", constrained_to_indexsets=[indexset.name], - ) - variable.add(test_data) + ).add(test_data) run.optimization.remove_solution() - # TODO Why is it necessary in the API run to get them again? - # Idea: core-equations.list() returns same equations, but API different ones? - # No, equations.list() always returns different objects. - # So run.opt.remove_solution() should be affecting different objects always. - # Why does it work when connecting directly to the DB in the first place? + # Need to fetch them here even if fetched before because API layer might not + # forward changes automatically equation = run.optimization.equations.get("Equation") variable = run.optimization.variables.get("Variable") assert equation.data == {} assert variable.data == {} + + def test_run_clone(self, platform: ixmp4.Platform, test_data_annual): + # Prepare test data and platform + test_data_annual = self.small.annual.copy() + # Define required regions and units in the database + self.small.load_regions(platform) + self.small.load_units(platform) + unit = platform.units.list()[0] # Test data currently only has one + test_data = {"Indexset": ["foo"], "values": [3.14], "units": [unit.name]} + test_solution = {"Indexset": ["foo"], "levels": [4], "marginals": [0.2]} + + # Prepare original run + run = platform.runs.create("Model", "Scenario") + run.iamc.add(test_data_annual, type=ixmp4.DataPoint.Type.ANNUAL) + indexset = run.optimization.indexsets.create("Indexset") + indexset.add(["foo", "bar"]) + run.optimization.scalars.create("Scalar", value=10, unit=unit.name) + run.optimization.tables.create( + "Table", + constrained_to_indexsets=[indexset.name], + ).add({"Indexset": ["bar"]}) + run.optimization.parameters.create( + "Parameter", constrained_to_indexsets=[indexset.name] + ).add(test_data) + run.optimization.variables.create( + "Variable", constrained_to_indexsets=[indexset.name] + ).add(test_solution) + run.optimization.equations.create( + "Equation", + constrained_to_indexsets=[indexset.name], + ).add(test_solution) + + # Test cloning while keeping the solution + clone_with_solution = platform.runs.clone(run_id=run.id) + assert_cloned_run(run, clone_with_solution, kept_solution=True) + + # Test cloning without keeping the solution + clone_without_solution = platform.runs.clone( + run_id=run.id, + model="new model", + scenario="new scenario", + keep_solution=False, + ) + assert_cloned_run(run, clone_without_solution, kept_solution=False) + + # Test working with cloned run + cloned_indexset = clone_with_solution.optimization.indexsets.get(indexset.name) + cloned_indexset.add("baz") + expected = indexset.elements + expected.append("baz") + assert cloned_indexset.elements == expected From eb1b0fc409b39ce8ef5631def5ac03f3174a336f Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 10:07:29 +0200 Subject: [PATCH 36/50] Remove superfluous session.add() for table, indexset --- ixmp4/data/db/optimization/indexset/repository.py | 1 - ixmp4/data/db/optimization/table/repository.py | 1 - 2 files changed, 2 deletions(-) diff --git a/ixmp4/data/db/optimization/indexset/repository.py b/ixmp4/data/db/optimization/indexset/repository.py index 9ec1bc81..a239b8d6 100644 --- a/ixmp4/data/db/optimization/indexset/repository.py +++ b/ixmp4/data/db/optimization/indexset/repository.py @@ -77,5 +77,4 @@ def add_elements( else: indexset.elements = indexset.elements + elements - self.session.add(indexset) self.session.commit() diff --git a/ixmp4/data/db/optimization/table/repository.py b/ixmp4/data/db/optimization/table/repository.py index eef57e9f..931a677a 100644 --- a/ixmp4/data/db/optimization/table/repository.py +++ b/ixmp4/data/db/optimization/table/repository.py @@ -159,5 +159,4 @@ def add_data(self, table_id: int, data: dict[str, Any] | pd.DataFrame) -> None: orient="list" ) - self.session.add(table) self.session.commit() From 1a4c439455e4f33b44cd76969e66e354729cd283 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 10:08:05 +0200 Subject: [PATCH 37/50] Remove superfluous session.add() for equation --- ixmp4/data/db/optimization/equation/repository.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ixmp4/data/db/optimization/equation/repository.py b/ixmp4/data/db/optimization/equation/repository.py index 2627b5ed..efe12fef 100644 --- a/ixmp4/data/db/optimization/equation/repository.py +++ b/ixmp4/data/db/optimization/equation/repository.py @@ -184,11 +184,3 @@ def remove_data(self, equation_id: int) -> None: # TODO Is there a better way to reset .data? equation.data = {} self.session.commit() - - @guard("edit") - def remove_data(self, equation_id: int) -> None: - equation = self.get_by_id(id=equation_id) - # TODO Is there a better way to reset .data? - equation.data = {} - self.session.add(equation) - self.session.commit() From 3b8f45f1855c080befc5e1c952e197dcc3745fd7 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 10:08:36 +0200 Subject: [PATCH 38/50] Remove superfluous session.add() for variable --- ixmp4/data/db/optimization/variable/repository.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ixmp4/data/db/optimization/variable/repository.py b/ixmp4/data/db/optimization/variable/repository.py index 285d2b48..73f88aa9 100644 --- a/ixmp4/data/db/optimization/variable/repository.py +++ b/ixmp4/data/db/optimization/variable/repository.py @@ -196,11 +196,3 @@ def remove_data(self, variable_id: int) -> None: # TODO Is there a better way to reset .data? variable.data = {} self.session.commit() - - @guard("edit") - def remove_data(self, variable_id: int) -> None: - variable = self.get_by_id(id=variable_id) - # TODO Is there a better way to reset .data? - variable.data = {} - self.session.add(variable) - self.session.commit() From b009acfb640106d853c7bc6b627697c9baa0d7ce Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 11:18:12 +0200 Subject: [PATCH 39/50] Allow clone of runs without iamc data --- ixmp4/core/run.py | 4 +++- tests/core/test_run.py | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ixmp4/core/run.py b/ixmp4/core/run.py index c980e84a..151dedd2 100644 --- a/ixmp4/core/run.py +++ b/ixmp4/core/run.py @@ -112,7 +112,9 @@ def clone( scenario if scenario else base_run.scenario.name, ), ) - run.iamc.add(df=base_run.iamc.tabulate()) + datapoints = base_run.iamc.tabulate() + if not datapoints.empty: + run.iamc.add(df=datapoints) for scalar in base_run.optimization.scalars.list(): run.optimization.scalars.create( name=scalar.name, diff --git a/tests/core/test_run.py b/tests/core/test_run.py index 31b6a2e9..b0e02912 100644 --- a/tests/core/test_run.py +++ b/tests/core/test_run.py @@ -287,3 +287,8 @@ def test_run_clone(self, platform: ixmp4.Platform, test_data_annual): expected = indexset.elements expected.append("baz") assert cloned_indexset.elements == expected + + # Test cloning Run without iamc data + run = test_mp.runs.create("Model", "Scenario") + clone_without_iamc = test_mp.runs.clone(run.id) + assert clone_without_iamc.iamc.tabulate().empty From 0c13ae1d69461009b31def7e7b59fc8592614257 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 11:20:00 +0200 Subject: [PATCH 40/50] Create helper create_default_dantzig_run() --- tutorial/transport/utils.py | 53 +++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 tutorial/transport/utils.py diff --git a/tutorial/transport/utils.py b/tutorial/transport/utils.py new file mode 100644 index 00000000..4b81d037 --- /dev/null +++ b/tutorial/transport/utils.py @@ -0,0 +1,53 @@ +import pandas as pd + +import ixmp4 +from ixmp4.core import Run, Unit + + +def create_default_dantzig_run(platform: ixmp4.Platform) -> Run: + """Creates new ixmp4.Run holding all data for Dantzig's problem.""" + try: + cases = platform.units.get("cases") + km = platform.units.get("km") + unit_cost_per_case = platform.units.get("USD/km") + except Unit.NotFound: + cases = platform.units.create("cases") + km = platform.units.create("km") + unit_cost_per_case = platform.units.create("USD/km") + run = platform.runs.create(model="transport problem", scenario="standard") + run.set_as_default() + run.optimization.indexsets.create("i").add(["seattle", "san-diego"]) + run.optimization.indexsets.create("j").add(["new-york", "chicago", "topeka"]) + a_data = { + "i": ["seattle", "san-diego"], + "values": [350, 600], + "units": [cases.name, cases.name], + } + run.optimization.parameters.create(name="a", constrained_to_indexsets=["i"]).add( + data=a_data + ) + b_data = pd.DataFrame( + [ + ["new-york", 325, cases.name], + ["chicago", 300, cases.name], + ["topeka", 275, cases.name], + ], + columns=["j", "values", "units"], + ) + run.optimization.parameters.create("b", constrained_to_indexsets=["j"]).add(b_data) + d_data = { + "i": ["seattle", "seattle", "seattle", "san-diego", "san-diego", "san-diego"], + "j": ["new-york", "chicago", "topeka", "new-york", "chicago", "topeka"], + "values": [2.5, 1.7, 1.8, 2.5, 1.8, 1.4], + "units": [km.name] * 6, + } + run.optimization.parameters.create("d", constrained_to_indexsets=["i", "j"]).add( + d_data + ) + run.optimization.scalars.create(name="f", value=90, unit=unit_cost_per_case) + run.optimization.variables.create("z") + run.optimization.variables.create("x", constrained_to_indexsets=["i", "j"]) + run.optimization.equations.create("supply", constrained_to_indexsets=["i"]) + run.optimization.equations.create("demand", constrained_to_indexsets=["j"]) + + return run From 103fca5febf01eb19eea9c3cb7892a310cd63daf Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 11:28:59 +0200 Subject: [PATCH 41/50] Improve linopy tutorial and helper functions --- tutorial/transport/dantzig_model_linopy.py | 6 ++++-- tutorial/transport/linopy_transport.ipynb | 14 +++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/tutorial/transport/dantzig_model_linopy.py b/tutorial/transport/dantzig_model_linopy.py index 4bc15ddd..0a55effe 100644 --- a/tutorial/transport/dantzig_model_linopy.py +++ b/tutorial/transport/dantzig_model_linopy.py @@ -14,6 +14,7 @@ def create_parameter( def create_dantzig_model(run: Run) -> linopy.Model: + """Creates new linopy.Model for Dantzig's problem based on ixmp4.Run.""" m = linopy.Model() i = pd.Index(run.optimization.indexsets.get("i").elements, name="Canning Plants") j = pd.Index(run.optimization.indexsets.get("j").elements, name="Markets") @@ -60,6 +61,7 @@ def create_dantzig_model(run: Run) -> linopy.Model: def read_dantzig_solution(model: linopy.Model, run: Run) -> None: + """Reads the Dantzig solution from linopy.Model to ixmp4.Run.""" # Handle objective # TODO adding fake marginals here until Variables don't require this column anymore # Can't add units if this column was not declared above. Better stored as Scalar @@ -87,7 +89,7 @@ def read_dantzig_solution(model: linopy.Model, run: Run) -> None: # The following don't seem to be typed correctly by linopy # Add supply data supply_data = { - "i": ["seattle", "san-diego"], + "i": run.optimization.indexsets.get("i").elements, "levels": model.constraints["Observe supply limit at plant i"].data.rhs, # type: ignore "marginals": model.constraints["Observe supply limit at plant i"].data.dual, # type: ignore } @@ -95,7 +97,7 @@ def read_dantzig_solution(model: linopy.Model, run: Run) -> None: # Add demand data demand_data = { - "j": ["new-york", "chicago", "topeka"], + "j": run.optimization.indexsets.get("j").elements, "levels": model.constraints["Satisfy demand at market j"].data.rhs, # type: ignore "marginals": model.constraints["Satisfy demand at market j"].data.dual, # type: ignore } diff --git a/tutorial/transport/linopy_transport.ipynb b/tutorial/transport/linopy_transport.ipynb index 17ace268..37e6483d 100644 --- a/tutorial/transport/linopy_transport.ipynb +++ b/tutorial/transport/linopy_transport.ipynb @@ -244,7 +244,7 @@ "a.add(data=a_data)\n", "\n", "# Demand at market j in cases\n", - "b = run.optimization.parameters.create(\"b\", constrained_to_indexsets=\"j\")\n", + "b = run.optimization.parameters.create(\"b\", constrained_to_indexsets=[\"j\"])\n", "# ... or a pd.DataFrame\n", "b_data = pd.DataFrame(\n", " [\n", @@ -349,7 +349,7 @@ "source": [ "### Defining `Variable`s and `Equation`s in the scenario\n", "\n", - "The levels and marginals of these variables and equations will be imported to the scenario when reading the model solution." + "The levels and marginals of these `Variable`s and `Equation`s will be imported to the scenario when reading the model solution." ] }, { @@ -387,7 +387,7 @@ "\n", "In this tutorial, we solve the tutorial using the ``highs`` solver in linopy. \n", "\n", - "The ``create_dantzig_model()`` function is a convenience shortcut for setting up a linopy model correctly for the datzig scenario. Please see ``linopy_model.py`` for details.\n", + "The ``create_dantzig_model()`` function is a convenience shortcut for setting up a linopy model correctly for the dantzig scenario. Please see ``linopy_model.py`` for details.\n", "\n", "The solution data are stored with the model object automatically. ``store_dantzig_solution()`` then stores them in the ixmp4 objects." ] @@ -403,10 +403,10 @@ " read_dantzig_solution,\n", ")\n", "\n", - "m = create_dantzig_model(run=run)\n", - "m.solve(\"highs\")\n", + "linopy_model = create_dantzig_model(run=run)\n", + "linopy_model.solve(\"highs\")\n", "\n", - "read_dantzig_solution(model=m, run=run)" + "read_dantzig_solution(model=linopy_model, run=run)" ] }, { @@ -433,7 +433,7 @@ "outputs": [], "source": [ "# Display the quantities transported from canning plants to demand locations\n", - "x.data" + "pd.DataFrame(x.data)" ] }, { From 4e02ecb0684409dabcd4000695a211121e2d31d0 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 11:29:26 +0200 Subject: [PATCH 42/50] Add second part of transport tutorial --- .../transport/linopy_transport_scenario.ipynb | 386 ++++++++++++++++++ 1 file changed, 386 insertions(+) create mode 100644 tutorial/transport/linopy_transport_scenario.ipynb diff --git a/tutorial/transport/linopy_transport_scenario.ipynb b/tutorial/transport/linopy_transport_scenario.ipynb new file mode 100644 index 00000000..895cd396 --- /dev/null +++ b/tutorial/transport/linopy_transport_scenario.ipynb @@ -0,0 +1,386 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tutorial 2 for Python\n", + "\n", + "## Make a scenario of Dantzig's Transport Problem using the *ix modeling platform* (ixmp4)\n", + "\n", + "\n", + "\n", + "### Aim and scope of the tutorial\n", + "\n", + "This tutorial uses the transport problem scenario developed in the first tutorial and illustrates how the ixmp4 framework can be applied for scenario analysis in the sense often used in economic or environmental modeling: develop a baseline, create a clone from the baseline with altered parameters or assumptions, and solve the new model. Then, compare the results from the original and altered scenario versions.\n", + "\n", + "In particular, this tutorial will take you through the following steps:\n", + "\n", + "0. Launch an `ixmp4.Platform` instance and retrieve the `ixmp4.Run` instance of Dantzig's transport problem\n", + "0. Retrieve some data from the `Run` for illustration of filters\n", + "0. Make a clone of the baseline scenario, then check out the clone and make changes: \n", + " in this case, add a new demand location and transport costs to that city\n", + "0. Solve the new scenario\n", + "0. Display the solution of both the baseline and the new scenario" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Launching the `platform` and loading a `Run` from the `ixmp4` database instance\n", + "\n", + "We launch a platform instance and display all models/scenarios currently stored in the connected database instance. We use the same local database we created in [part 1](linopy_transport.ipynb), so please see there for how to create such a local database. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import ixmp4\n", + "\n", + "platform = ixmp4.Platform(\"tutorial-test\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Tabulate all Runs in the database\n", + "# Per default, only \"default\" Runs are tabulated\n", + "platform.runs.tabulate(default_only=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Model and scenario name we used for Dantzig's transport problem in part 1\n", + "model = \"transport problem\"\n", + "scenario = \"standard\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you have run the first part of tutorial before, the existing `Run` should have appeared, and we can load it.\n", + "Uncomment and run the following lines as appropriate:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load the default version of the Run created in the first tutorial\n", + "# run = platform.runs.get(model=model, scenario=scenario)\n", + "\n", + "# If you already solved this Run, remember to remove its solution!\n", + "# run.optimization.remove_solution()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the `Run` did not appear (e.g. because you are starting with this tutorial), we can use a function that creates the `Run` from scratch in one step and solve it as usual:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tutorial.transport.utils import create_default_dantzig_run\n", + "\n", + "run = create_default_dantzig_run(platform=platform)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tutorial.transport.dantzig_model_linopy import (\n", + " create_dantzig_model,\n", + " read_dantzig_solution,\n", + ")\n", + "\n", + "linopy_model = create_dantzig_model(run=run)\n", + "linopy_model.solve(\"highs\")\n", + "\n", + "read_dantzig_solution(model=linopy_model, run=run)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Retrieve some data from the run for illustration of filters\n", + "\n", + "Before cloning a run and editing data, this section illustrates two-and-a-half methods to retrieve data for a parameter from a run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load the distance Parameter\n", + "d = run.optimization.parameters.get(\"d\")\n", + "d" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is an `ixmp4.Parameter` object! It's useful to interact with if you want to edit the information stored in the database about it. You could e.g. add `.docs` to it so that you can always look up in the database what this object does.\n", + "For interacting with the modeling data, it's more useful to interact with the `.data` attribute, which stores the actual data: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a dictionary because that's easier to store in a database. For ease of access, you may want to convert it to a `pandas.DataFrame`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "d_data = pd.DataFrame(d.data)\n", + "\n", + "# Show only the distances for connections from Seattle by filtering the pandas.DataFrame\n", + "d_data[d_data[\"i\"] == \"seattle\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# NOTE We currently don't support loading only specific parameter elements.\n", + "# We always load the whole parameter and can then select from the data as usual with\n", + "# e.g. pandas." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For faster access or more complex filtering, you can then use all familiar `pandas` features. For example, list only distances from Seattle to specific other cities:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d_data.loc[(d_data[\"i\"] == \"seattle\") & (d_data[\"j\"].isin([\"chicago\", \"topeka\"]))]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Please note that `pandas` recommends to use [advanced indexing](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#advanced-advanced-hierarchical) if you find yourself using more than three conditionals in `.loc[]`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Make a clone of the baseline scenario, then check out the clone and edit the scenario\n", + "\n", + "For illustration of a scenario analysis workflow, we add a new demand location ``detroit`` and add a demand level and transport costs to that city.\n", + "Because the production capacity does not allow much slack for increased production, we also reduce the demand level in ``chicago``." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a new Run by cloning the existing one (without keeping the solution)\n", + "run_detroit = platform.runs.clone(\n", + " run_id=run.id, model=model, scenario=\"detroit\", keep_solution=False\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Reduce demand in chicago\n", + "run_detroit.optimization.parameters.get(\"b\").add(\n", + " {\"j\": [\"chicago\"], \"values\": [200], \"units\": [\"cases\"]}\n", + ")\n", + "\n", + "# Add a new city with demand and distances\n", + "run_detroit.optimization.indexsets.get(\"j\").add(\"detroit\")\n", + "run_detroit.optimization.parameters.get(\"b\").add(\n", + " {\"j\": [\"detroit\"], \"values\": [150], \"units\": [\"cases\"]}\n", + ")\n", + "run_detroit.optimization.parameters.get(\"d\").add(\n", + " {\n", + " \"i\": [\"seattle\", \"san-diego\"],\n", + " \"j\": [\"detroit\", \"detroit\"],\n", + " \"values\": [1.7, 1.9],\n", + " \"units\": [\"km\", \"km\"],\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Solve the new scenario" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "linopy_model_detroit = create_dantzig_model(run=run_detroit)\n", + "linopy_model_detroit.solve(\"highs\")\n", + "\n", + "read_dantzig_solution(model=linopy_model_detroit, run=run_detroit)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Display and analyze the results\n", + "\n", + "For comparison between the baseline `Run`, i.e., the original transport problem, and the \"detroit\" `Run`, we show the solution for both cases." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "name": "scen-z" + } + }, + "outputs": [], + "source": [ + "# Display the objective value of the solution in the baseline Run\n", + "run.optimization.variables.get(\"z\").levels" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "name": "scen-detroit-z" + } + }, + "outputs": [], + "source": [ + "# Display the objective value of the solution in the \"detroit\" Run\n", + "run_detroit.optimization.variables.get(\"z\").levels" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display quantities transported from canning plants to demand locations in \"baseline\"\n", + "pd.DataFrame(run.optimization.variables.get(\"x\").data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display quantities transported from canning plants to demand locations in \"detroit\"\n", + "pd.DataFrame(run_detroit.optimization.variables.get(\"x\").data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display quantities and marginals (=shadow prices) of the demand balance constraints\n", + "# in \"baseline\"\n", + "run.optimization.equations.get(\"demand\").data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Display quantities and marginals (=shadow prices) of the demand balance constraints\n", + "# in \"detroit\"\n", + "run_detroit.optimization.equations.get(\"demand\").data" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 087b52c55f188c3353b52246fe6681be74886d46 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 12:39:28 +0200 Subject: [PATCH 43/50] Lock dependencies after rebase --- poetry.lock | 85 ++++++++++++++++++++++++----------------------------- 1 file changed, 38 insertions(+), 47 deletions(-) diff --git a/poetry.lock b/poetry.lock index ea4f5381..caf34b80 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1664,56 +1664,47 @@ numpy = ">=1.23.0" [[package]] name = "numpy" -version = "2.0.0" +version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, - {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, - {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, - {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, - {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, - {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, - {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, - {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, - {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, - {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] [[package]] @@ -4170,4 +4161,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10, <3.13" -content-hash = "064d839ad7e0edd3efec61b76962ea03827acef8c0fd0b6094750304f607948e" +content-hash = "967d25d797f53c469cd03e04f04d9605382280e6d104540e42e60eaf85d8f7fb" From 4c8ac887eb87f69d1ac738536cb92fd2f649660c Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 12:40:51 +0200 Subject: [PATCH 44/50] Adapt tests to fixtures and own errors --- tests/core/test_run.py | 6 +++--- tests/data/test_optimization_variable.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/core/test_run.py b/tests/core/test_run.py index b0e02912..314e7548 100644 --- a/tests/core/test_run.py +++ b/tests/core/test_run.py @@ -237,7 +237,7 @@ def test_run_remove_solution(self, platform: ixmp4.Platform): assert equation.data == {} assert variable.data == {} - def test_run_clone(self, platform: ixmp4.Platform, test_data_annual): + def test_run_clone(self, platform: ixmp4.Platform): # Prepare test data and platform test_data_annual = self.small.annual.copy() # Define required regions and units in the database @@ -289,6 +289,6 @@ def test_run_clone(self, platform: ixmp4.Platform, test_data_annual): assert cloned_indexset.elements == expected # Test cloning Run without iamc data - run = test_mp.runs.create("Model", "Scenario") - clone_without_iamc = test_mp.runs.clone(run.id) + run = platform.runs.create("Model", "Scenario") + clone_without_iamc = platform.runs.clone(run.id) assert clone_without_iamc.iamc.tabulate().empty diff --git a/tests/data/test_optimization_variable.py b/tests/data/test_optimization_variable.py index 74166e4a..5fc6c2cc 100644 --- a/tests/data/test_optimization_variable.py +++ b/tests/data/test_optimization_variable.py @@ -85,11 +85,11 @@ def test_create_variable(self, platform: ixmp4.Platform): # Test that giving column_names, but not constrained_to_indexsets raises with pytest.raises( - ValueError, + OptimizationItemUsageError, match="Received `column_names` to name columns, but no " "`constrained_to_indexsets`", ): - _ = test_mp.backend.optimization.variables.create( + _ = platform.backend.optimization.variables.create( run_id=run.id, name="Variable 0", column_names=["Dimension 1"], From 7bc37d029603ec475b606de5b8e7a2b47e4ec0ea Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 12:43:35 +0200 Subject: [PATCH 45/50] Adapt linopy tutorial tests to fixtures and own errors --- .../transport/test_dantzig_model_linopy.py | 51 +++++++------------ 1 file changed, 17 insertions(+), 34 deletions(-) diff --git a/tests/tutorial/transport/test_dantzig_model_linopy.py b/tests/tutorial/transport/test_dantzig_model_linopy.py index e3755bc8..0f4b6431 100644 --- a/tests/tutorial/transport/test_dantzig_model_linopy.py +++ b/tests/tutorial/transport/test_dantzig_model_linopy.py @@ -1,18 +1,16 @@ import numpy as np import pandas as pd -import xarray as xr -from ixmp4 import Platform +import ixmp4 from ixmp4.core import Run, Unit -from ...utils import all_platforms from .dantzig_model_linopy import ( create_dantzig_model, read_dantzig_solution, ) -def create_dantzig_run(mp: Platform) -> Run: +def create_dantzig_run(mp: ixmp4.Platform) -> Run: """Create a Run for the transport tutorial. Please see the tutorial file for explanation. @@ -72,20 +70,15 @@ def create_dantzig_run(mp: Platform) -> Run: return run -@all_platforms class TestTransportTutorialLinopy: - def test_create_dantzig_model(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = create_dantzig_run(test_mp) + def test_create_dantzig_model(self, platform: ixmp4.Platform): + run = create_dantzig_run(platform) model = create_dantzig_model(run) # Set expectations - expected = { - "supply_constraint_sign": xr.DataArray(["<=", "<="]), - "demand_constraint_sign": xr.DataArray([">=", ">=", ">="]), - # TODO enable this once #95 is merged; allows removal of xarray from file - # "supply_constraint_sign": np.array(["<=", "<="]), - # "demand_constraint_sign": np.array([">=", ">=", ">="]), + expected: dict[str, pd.Series] = { + "supply_constraint_sign": pd.Series(["<=", "<="]), + "demand_constraint_sign": pd.Series([">=", ">=", ">="]), "supply_constraint_rhs": pd.Series([350.0, 600.0]), "demand_constraint_rhs": pd.Series([325.0, 300.0, 275.0]), "objective_coeffs": pd.Series([0.162, 0.225, 0.126, 0.153, 0.225, 0.162]), @@ -102,15 +95,11 @@ def test_create_dantzig_model(self, test_mp, request): model.constraints["Observe supply limit at plant i"].data.rhs.values, expected["supply_constraint_rhs"], ) - assert ( - model.constraints["Observe supply limit at plant i"].data.sign.values - == expected["supply_constraint_sign"] + # TODO Replace this with np.strings.equal once supporting numpy >= 2.0.0 + assert np.char.equal( + model.constraints["Observe supply limit at plant i"].data.sign.values, + expected["supply_constraint_sign"], ).all() - # TODO enable this once #95 is merged - # assert np.strings.equal( - # model.constraints["Observe supply limit at plant i"].data.sign.values, - # expected["supply_constraint_sign"], - # ).all() assert model.constraints["Satisfy demand at market j"].coord_dims == ( "Markets", ) @@ -118,26 +107,20 @@ def test_create_dantzig_model(self, test_mp, request): model.constraints["Satisfy demand at market j"].data.rhs.values, expected["demand_constraint_rhs"], ) - assert ( - model.constraints["Satisfy demand at market j"].data.sign.values - == expected["demand_constraint_sign"] + # TODO Replace this with np.strings.equal once supporting numpy >= 2.0.0 + assert np.char.equal( + model.constraints["Satisfy demand at market j"].data.sign.values, + expected["demand_constraint_sign"], ).all() - # TODO enable this once #95 is merged - # assert np.strings.equal( - # model.constraints["Satisfy demand at market j"].data.sign.values, - # expected["demand_constraint_sign"], - # ).all() assert model.objective.sense == "min" assert np.allclose( model.objective.coeffs.to_pandas(), expected["objective_coeffs"] ) - def test_read_dantzig_solution(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - + def test_read_dantzig_solution(self, platform: ixmp4.Platform): # Could we store this as class attributes to avoid repetition? - run = create_dantzig_run(test_mp) + run = create_dantzig_run(platform) model = create_dantzig_model(run) model.solve("highs") read_dantzig_solution(model=model, run=run) From fb524217ef2f386deaeb8270862305389e9f6c1a Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 12:47:16 +0200 Subject: [PATCH 46/50] Clarify core docstrings --- ixmp4/core/optimization/equation.py | 5 ++--- ixmp4/core/optimization/parameter.py | 2 +- ixmp4/core/optimization/table.py | 2 +- ixmp4/core/optimization/variable.py | 4 ++-- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ixmp4/core/optimization/equation.py b/ixmp4/core/optimization/equation.py index f49ec897..08780a98 100644 --- a/ixmp4/core/optimization/equation.py +++ b/ixmp4/core/optimization/equation.py @@ -32,8 +32,7 @@ def data(self) -> dict[str, Any]: return self._model.data def add(self, data: dict[str, Any] | pd.DataFrame) -> None: - # TODO change to "to the Equation" - """Adds data to an existing Equation.""" + """Adds data to the Equation.""" self.backend.optimization.equations.add_data( equation_id=self._model.id, data=data ) @@ -42,7 +41,7 @@ def add(self, data: dict[str, Any] | pd.DataFrame) -> None: ).data def remove_data(self) -> None: - """Removes data from an existing Equation.""" + """Removes all data from the Equation.""" self.backend.optimization.equations.remove_data(equation_id=self._model.id) self._model.data = self.backend.optimization.equations.get( run_id=self._model.run__id, name=self._model.name diff --git a/ixmp4/core/optimization/parameter.py b/ixmp4/core/optimization/parameter.py index 32c07295..183d896d 100644 --- a/ixmp4/core/optimization/parameter.py +++ b/ixmp4/core/optimization/parameter.py @@ -32,7 +32,7 @@ def data(self) -> dict[str, Any]: return self._model.data def add(self, data: dict[str, Any] | pd.DataFrame) -> None: - """Adds data to an existing Parameter.""" + """Adds data to the Parameter.""" self.backend.optimization.parameters.add_data( parameter_id=self._model.id, data=data ) diff --git a/ixmp4/core/optimization/table.py b/ixmp4/core/optimization/table.py index dad74e6d..2055060e 100644 --- a/ixmp4/core/optimization/table.py +++ b/ixmp4/core/optimization/table.py @@ -32,7 +32,7 @@ def data(self) -> dict[str, Any]: return self._model.data def add(self, data: dict[str, Any] | pd.DataFrame) -> None: - """Adds data to an existing Table.""" + """Adds data to an the Table.""" self.backend.optimization.tables.add_data(table_id=self._model.id, data=data) self._model.data = self.backend.optimization.tables.get( run_id=self._model.run__id, name=self._model.name diff --git a/ixmp4/core/optimization/variable.py b/ixmp4/core/optimization/variable.py index cc14a337..26271c2f 100644 --- a/ixmp4/core/optimization/variable.py +++ b/ixmp4/core/optimization/variable.py @@ -32,7 +32,7 @@ def data(self) -> dict[str, Any]: return self._model.data def add(self, data: dict[str, Any] | pd.DataFrame) -> None: - """Adds data to an existing Variable.""" + """Adds data to the Variable.""" self.backend.optimization.variables.add_data( variable_id=self._model.id, data=data ) @@ -41,7 +41,7 @@ def add(self, data: dict[str, Any] | pd.DataFrame) -> None: ).data def remove_data(self) -> None: - """Removes data from an existing Variable.""" + """Removes all data from the Variable.""" self.backend.optimization.variables.remove_data(variable_id=self._model.id) self._model.data = self.backend.optimization.variables.get( run_id=self._model.run__id, name=self._model.name From d4e187d2cf27494d93f0904dec3509444429dc4b Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 14:12:37 +0200 Subject: [PATCH 47/50] Bump pandas version in poetry.lock --- poetry.lock | 73 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/poetry.lock b/poetry.lock index caf34b80..6f6a635e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1789,40 +1789,53 @@ files = [ [[package]] name = "pandas" -version = "2.2.2" +version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" files = [ - {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, - {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, - {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, - {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, - {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"}, - {file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"}, - {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, ] [package.dependencies] From a3673387cae55979307561436176ee5eb5f9e4ba Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 14:13:23 +0200 Subject: [PATCH 48/50] Fix np.strings comparison --- tests/tutorial/transport/test_dantzig_model_linopy.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/tutorial/transport/test_dantzig_model_linopy.py b/tests/tutorial/transport/test_dantzig_model_linopy.py index 0f4b6431..8c0ee67f 100644 --- a/tests/tutorial/transport/test_dantzig_model_linopy.py +++ b/tests/tutorial/transport/test_dantzig_model_linopy.py @@ -76,9 +76,9 @@ def test_create_dantzig_model(self, platform: ixmp4.Platform): model = create_dantzig_model(run) # Set expectations - expected: dict[str, pd.Series] = { - "supply_constraint_sign": pd.Series(["<=", "<="]), - "demand_constraint_sign": pd.Series([">=", ">=", ">="]), + expected: dict[str, np.ndarray | pd.Series] = { + "supply_constraint_sign": np.array(["<=", "<="]), + "demand_constraint_sign": np.array([">=", ">=", ">="]), "supply_constraint_rhs": pd.Series([350.0, 600.0]), "demand_constraint_rhs": pd.Series([325.0, 300.0, 275.0]), "objective_coeffs": pd.Series([0.162, 0.225, 0.126, 0.153, 0.225, 0.162]), From 31498cefc23ca249fea0c28d831ce0c58902bf14 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 3 Oct 2024 14:13:49 +0200 Subject: [PATCH 49/50] Set new variable.data index only for non-scalars --- ixmp4/data/db/optimization/variable/repository.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ixmp4/data/db/optimization/variable/repository.py b/ixmp4/data/db/optimization/variable/repository.py index 73f88aa9..8f2fe272 100644 --- a/ixmp4/data/db/optimization/variable/repository.py +++ b/ixmp4/data/db/optimization/variable/repository.py @@ -180,13 +180,18 @@ def add_data(self, variable_id: int, data: dict[str, Any] | pd.DataFrame) -> Non f"{', '.join(missing_columns)}!" ) + # TODO Somehow, this got to main without the if index_list checks + # -> Do we need/have a test for add_data() to a scalar variable? index_list = [column.name for column in variable.columns] existing_data = pd.DataFrame(variable.data) - if not existing_data.empty: - existing_data.set_index(index_list, inplace=True) - variable.data = ( - data.set_index(index_list).combine_first(existing_data).reset_index() - ).to_dict(orient="list") + if index_list: + data = data.set_index(index_list) + if not existing_data.empty: + existing_data.set_index(index_list, inplace=True) + data = data.combine_first(existing_data) + if index_list: + data = data.reset_index() + variable.data = data.to_dict(orient="list") self.session.commit() From 8b55f96d96fd40467c59e50dfd6eb9bebbda8244 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 7 Oct 2024 13:26:33 +0200 Subject: [PATCH 50/50] Reconcile alembic migrations --- ...3f7467dab_temporary_create_all_missing_.py | 26 ++----------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py b/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py index cb2f19b4..dd2cf996 100644 --- a/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py +++ b/ixmp4/db/migrations/versions/0d73f7467dab_temporary_create_all_missing_.py @@ -2,7 +2,7 @@ """TEMPORARY Create all missing optimization items for testing Revision ID: 0d73f7467dab -Revises: 081bbda6bb7b +Revises: c29289ced488 Create Date: 2024-07-08 14:09:49.174145 """ @@ -13,7 +13,7 @@ # Revision identifiers, used by Alembic. revision = "0d73f7467dab" -down_revision = "081bbda6bb7b" +down_revision = "c29289ced488" branch_labels = None depends_on = None @@ -199,10 +199,6 @@ def upgrade(): batch_op.add_column(sa.Column("equation__id", sa.Integer(), nullable=True)) batch_op.add_column(sa.Column("parameter__id", sa.Integer(), nullable=True)) batch_op.add_column(sa.Column("variable__id", sa.Integer(), nullable=True)) - batch_op.add_column(sa.Column("created_at", sa.DateTime(), nullable=True)) - batch_op.add_column( - sa.Column("created_by", sa.String(length=255), nullable=True) - ) batch_op.alter_column("table__id", existing_type=sa.INTEGER(), nullable=True) batch_op.drop_index("ix_optimization_column_table__id") batch_op.create_foreign_key( @@ -226,27 +222,11 @@ def upgrade(): ["id"], ) - with op.batch_alter_table("region", schema=None) as batch_op: - batch_op.alter_column( - "name", - existing_type=sa.VARCHAR(length=1023), - type_=sa.String(length=255), - existing_nullable=False, - ) - # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table("region", schema=None) as batch_op: - batch_op.alter_column( - "name", - existing_type=sa.String(length=255), - type_=sa.VARCHAR(length=1023), - existing_nullable=False, - ) - with op.batch_alter_table("optimization_column", schema=None) as batch_op: batch_op.drop_constraint( batch_op.f("fk_optimization_column_equation__id_optimization_equation"), @@ -266,8 +246,6 @@ def downgrade(): "ix_optimization_column_table__id", ["table__id"], unique=False ) batch_op.alter_column("table__id", existing_type=sa.INTEGER(), nullable=False) - batch_op.drop_column("created_by") - batch_op.drop_column("created_at") batch_op.drop_column("variable__id") batch_op.drop_column("parameter__id") batch_op.drop_column("equation__id")