Skip to content

Commit

Permalink
Merge pull request #504 from assume-framework/improve_output_role
Browse files Browse the repository at this point in the history
improve output writing speed
  • Loading branch information
maurerle authored Nov 29, 2024
2 parents 1e1b932 + d21a4e5 commit fd8963d
Show file tree
Hide file tree
Showing 10 changed files with 247 additions and 189 deletions.
382 changes: 220 additions & 162 deletions assume/common/outputs.py

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions assume/markets/base_market.py
Original file line number Diff line number Diff line change
Expand Up @@ -702,7 +702,7 @@ async def store_order_book(self, orderbook: Orderbook):
if db_addr:
message = {
"context": "write_results",
"type": "store_order_book",
"type": "market_orders",
"market_id": self.marketconfig.market_id,
"data": orderbook,
}
Expand All @@ -724,7 +724,7 @@ async def store_market_results(self, market_meta):
if db_addr:
message = {
"context": "write_results",
"type": "store_market_results",
"type": "market_meta",
"market_id": self.marketconfig.market_id,
"data": market_meta,
}
Expand All @@ -746,7 +746,7 @@ async def store_flows(self, flows: dict[tuple, float]):
if db_addr:
message = {
"context": "write_results",
"type": "store_flows",
"type": "grid_flows",
"market_id": self.marketconfig.market_id,
"data": flows,
}
Expand Down
2 changes: 1 addition & 1 deletion assume/scenario/loader_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ def run_learning(
# initialize policies already here to set the obs_dim and act_dim in the learning role
actors_and_critics = None
world.learning_role.initialize_policy(actors_and_critics=actors_and_critics)
world.output_role.del_similar_runs()
world.output_role.delete_similar_runs()

# check if we already stored policies for this simulation
save_path = world.learning_config["trained_policies_save_path"]
Expand Down
2 changes: 1 addition & 1 deletion assume/units/demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def calculate_min_max_power(
Returns:
tuple[pandas.Series, pandas.Series]: The bid colume as both the minimum and maximum power output of the unit.
"""

# end includes the end of the last product, to get the last products' start time we deduct the frequency once
end_excl = end - self.index.freq
bid_volume = (self.volume - self.outputs[product_type]).loc[start:end_excl]
Expand Down
2 changes: 1 addition & 1 deletion assume/units/powerplant.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def calculate_min_max_power(
Note:
The calculation does not include ramping constraints and can be used for arbitrary start times in the future.
"""
# end includes the end of the last product, to get the last products' start time we deduct the frequency once
# end includes the end of the last product, to get the last products' start time we deduct the frequency once
end_excl = end - self.index.freq

base_load = self.outputs["energy"].loc[start:end_excl]
Expand Down
1 change: 1 addition & 0 deletions docs/source/release_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ Upcoming Release
pandas Series. The `FastSeries` class retains a close resemblance to the pandas Series, including core
functionalities like indexing, slicing, and arithmetic operations. This ensures seamless integration,
allowing users to work with the new class without requiring significant code adaptation.
- **Performance Optimization:** Output role handles dict data directly and only converts to DataFrame on Database write.

**Bugfixes:**
- **Tutorials**: General fixes of the tutorials, to align with updated functionalitites of Assume
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@
" # initialize policies already here to set the obs_dim and act_dim in the learning role\n",
" actors_and_critics = None\n",
" world.learning_role.initialize_policy(actors_and_critics=actors_and_critics)\n",
" world.output_role.del_similar_runs()\n",
" world.output_role.delete_similar_runs()\n",
"\n",
" # check if we already stored policies for this simulation\n",
" save_path = world.learning_config[\"trained_policies_save_path\"]\n",
Expand Down
5 changes: 2 additions & 3 deletions examples/notebooks/04_reinforcement_learning_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1073,7 +1073,6 @@
"\n",
" duration = (end - start) / timedelta(hours=1)\n",
"\n",
"\n",
" # calculate profit as income - running_cost from this event\n",
" order_profit = order[\"accepted_price\"] * order[\"accepted_volume\"] * duration\n",
" order_cost = marginal_cost * order[\"accepted_volume\"] * duration\n",
Expand Down Expand Up @@ -2091,6 +2090,7 @@
"\n",
" return bids\n",
"\n",
"\n",
"# we define the class again and inherit from the initial class just to add the additional method to the original class\n",
"# this is a workaround to have different methods of the class in different cells\n",
"# which is good for the purpose of this tutorial\n",
Expand Down Expand Up @@ -2125,7 +2125,7 @@
" for order in orderbook:\n",
" start = order[\"start_time\"]\n",
" end = order[\"end_time\"]\n",
" \n",
"\n",
" # end includes the end of the last product, to get the last products' start time we deduct the frequency once\n",
" end_excl = end - unit.index.freq\n",
"\n",
Expand All @@ -2136,7 +2136,6 @@
"\n",
" duration = (end - start) / timedelta(hours=1)\n",
"\n",
"\n",
" # calculate profit as income - running_cost from this event\n",
" order_profit = order[\"accepted_price\"] * order[\"accepted_volume\"] * duration\n",
" order_cost = marginal_cost * order[\"accepted_volume\"] * duration\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/notebooks/09_example_Sim_and_xRL.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,7 @@
" # initialize policies already here to set the obs_dim and act_dim in the learning role\n",
" actors_and_critics = None\n",
" world.learning_role.initialize_policy(actors_and_critics=actors_and_critics)\n",
" world.output_role.del_similar_runs()\n",
" world.output_role.delete_similar_runs()\n",
"\n",
" # check if we already stored policies for this simulation\n",
" save_path = world.learning_config[\"trained_policies_save_path\"]\n",
Expand Down
32 changes: 16 additions & 16 deletions tests/test_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,16 @@ def test_output_market_orders():
start = datetime(2020, 1, 1)
end = datetime(2020, 1, 2)
output_writer = WriteOutput("test_sim", start, end, engine)
assert len(output_writer.write_dfs.keys()) == 0
assert len(output_writer.write_buffers.keys()) == 0
meta = {"sender_id": None}
content = {
"context": "write_results",
"type": "store_order_book",
"type": "market_orders",
"sender": "CRM_pos",
"data": [],
}
output_writer.handle_output_message(content, meta)
assert len(output_writer.write_dfs["market_orders"]) == 0
assert len(output_writer.write_buffers["market_orders"]) == 0

orderbook = [
{
Expand Down Expand Up @@ -67,24 +67,24 @@ def test_output_market_orders():

content = {
"context": "write_results",
"type": "store_order_book",
"type": "market_orders",
"sender": "CRM_pos",
"data": orderbook,
}
output_writer.handle_output_message(content, meta)
assert len(output_writer.write_dfs["market_orders"]) == 1
assert len(output_writer.write_buffers["market_orders"]) == 1


def test_output_market_results():
engine = create_engine(DB_URI)
start = datetime(2020, 1, 1)
end = datetime(2020, 1, 2)
output_writer = WriteOutput("test_sim", start, end, engine)
assert len(output_writer.write_dfs.keys()) == 0
assert len(output_writer.write_buffers.keys()) == 0
meta = {"sender_id": None}
content = {
"context": "write_results",
"type": "store_market_results",
"type": "market_meta",
"sender": "CRM_pos",
"data": [
{
Expand All @@ -105,36 +105,36 @@ def test_output_market_results():
],
}
output_writer.handle_output_message(content, meta)
assert len(output_writer.write_dfs["market_meta"]) == 1, "market_meta"
assert len(output_writer.write_buffers["market_meta"]) == 1, "market_meta"


def test_output_market_dispatch():
engine = create_engine(DB_URI)
start = datetime(2020, 1, 1)
end = datetime(2020, 1, 2)
output_writer = WriteOutput("test_sim", start, end, engine)
assert len(output_writer.write_dfs.keys()) == 0
assert len(output_writer.write_buffers.keys()) == 0
meta = {"sender_id": None}
content = {"context": "write_results", "type": "market_dispatch", "data": []}
output_writer.handle_output_message(content, meta)
# empty dfs are discarded
assert len(output_writer.write_dfs["market_dispatch"]) == 0, "market_dispatch"
assert len(output_writer.write_buffers["market_dispatch"]) == 0, "market_dispatch"

content = {
"context": "write_results",
"type": "market_dispatch",
"data": [[start, 90, "EOM", "TestUnit"]],
}
output_writer.handle_output_message(content, meta)
assert len(output_writer.write_dfs["market_dispatch"]) == 1, "market_dispatch"
assert len(output_writer.write_buffers["market_dispatch"]) == 1, "market_dispatch"


def test_output_unit_dispatch():
engine = create_engine(DB_URI)
start = datetime(2020, 1, 1)
end = datetime(2020, 1, 2)
output_writer = WriteOutput("test_sim", start, end, engine)
assert len(output_writer.write_dfs.keys()) == 0
assert len(output_writer.write_buffers.keys()) == 0
meta = {"sender_id": None}
content = {
"context": "write_results",
Expand All @@ -150,21 +150,21 @@ def test_output_unit_dispatch():
}

output_writer.handle_output_message(content, meta)
assert len(output_writer.write_dfs["unit_dispatch"]) == 1, "unit_dispatch"
assert len(output_writer.write_buffers["unit_dispatch"]) == 1, "unit_dispatch"


def test_output_write_flows():
engine = create_engine(DB_URI)
start = datetime(2020, 1, 1)
end = datetime(2020, 1, 2)
output_writer = WriteOutput("test_sim", start, end, engine)
assert len(output_writer.write_dfs.keys()) == 0
assert len(output_writer.write_buffers.keys()) == 0
meta = {"sender_id": None}
content = {
"context": "write_results",
"type": "store_flows",
"type": "grid_flows",
"data": {(datetime(2019, 1, 1, 0, 0), "north_south_example"): 0.0},
}

output_writer.handle_output_message(content, meta)
assert len(output_writer.write_dfs["flows"]) == 1, "flows"
assert len(output_writer.write_buffers["grid_flows"]) == 1, "grid_flows"

0 comments on commit fd8963d

Please sign in to comment.