Skip to content

Commit

Permalink
Merge branch 'main' into fix-empty-market-results-warning
Browse files Browse the repository at this point in the history
  • Loading branch information
nick-harder authored Dec 12, 2024
2 parents 45f9773 + 448425c commit 70dfe08
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 76 deletions.
9 changes: 5 additions & 4 deletions assume/common/outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,6 @@ def __init__(
if episode.isdigit():
self.episode = int(episode)

# check if episode=0 and delete all similar runs
if self.episode == 0:
self.delete_similar_runs()

# construct all timeframe under which hourly values are written to excel and db
self.start = start
self.end = end
Expand Down Expand Up @@ -202,6 +198,11 @@ def on_ready(self):
self.db = create_engine(self.db_uri)
if self.db is not None:
self.delete_db_scenario(self.simulation_id)

# check if episode equals 1 and delete all similar runs
if self.episode == 1:
self.delete_similar_runs()

if self.save_frequency_hours is not None:
recurrency_task = rr.rrule(
freq=rr.HOURLY,
Expand Down
59 changes: 15 additions & 44 deletions assume/scenario/loader_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,8 +536,8 @@ def setup_world(
study_case: str,
perform_evaluation: bool = False,
terminate_learning: bool = False,
episode: int = 0,
eval_episode: int = 0,
episode: int = 1,
eval_episode: int = 1,
) -> None:
"""
Load a scenario from a given path.
Expand All @@ -550,8 +550,8 @@ def setup_world(
study_case (str): The specific study case within the scenario to be loaded.
perform_evaluation (bool, optional): A flag indicating whether evaluation should be performed. Defaults to False.
terminate_learning (bool, optional): An automatically set flag indicating that we terminated the learning process now, either because we reach the end of the episode iteration or because we triggered an early stopping.
episode (int, optional): The episode number for learning. Defaults to 0.
eval_episode (int, optional): The episode number for evaluation. Defaults to 0.
episode (int, optional): The episode number for learning. Defaults to 1.
eval_episode (int, optional): The episode number for evaluation. Defaults to 1.
Raises:
ValueError: If the specified scenario or study case is not found in the provided inputs.
Expand Down Expand Up @@ -726,10 +726,6 @@ def load_scenario_folder(
inputs_path: str,
scenario: str,
study_case: str,
perform_evaluation: bool = False,
terminate_learning: bool = False,
episode: int = 1,
eval_episode: int = 1,
):
"""
Load a scenario from a given path.
Expand All @@ -741,29 +737,12 @@ def load_scenario_folder(
inputs_path (str): The path to the folder containing input files necessary for the scenario.
scenario (str): The name of the scenario to be loaded.
study_case (str): The specific study case within the scenario to be loaded.
perform_evaluation (bool, optional): A flag indicating whether evaluation should be performed. Defaults to False.
terminate_learning (bool, optional): An automatically set flag indicating that we terminated the learning process now, either because we reach the end of the episode iteration or because we triggered an early stopping.
episode (int, optional): The episode number for learning. Defaults to 0.
eval_episode (int, optional): The episode number for evaluation. Defaults to 0.
Raises:
ValueError: If the specified scenario or study case is not found in the provided inputs.
Example:
>>> load_scenario_folder(
world=world,
inputs_path="/path/to/inputs",
scenario="scenario_name",
study_case="study_case_name",
perform_evaluation=False,
episode=1,
eval_episode=1,
trained_policies_save_path="",
)
Notes:
- The function sets up the world environment based on the provided inputs and configuration files.
- If `perform_evaluation` is set to True, the function performs evaluation using the specified evaluation episode number.
- The function utilizes the specified inputs to configure the simulation environment, including market parameters, unit operators, and forecasting data.
- After calling this function, the world environment is prepared for further simulation and analysis.
Expand All @@ -776,10 +755,6 @@ def load_scenario_folder(
world=world,
scenario_data=scenario_data,
study_case=study_case,
perform_evaluation=perform_evaluation,
terminate_learning=terminate_learning,
episode=episode,
eval_episode=eval_episode,
)


Expand Down Expand Up @@ -878,7 +853,6 @@ def run_learning(
# initialize policies already here to set the obs_dim and act_dim in the learning role
actors_and_critics = None
world.learning_role.initialize_policy(actors_and_critics=actors_and_critics)
world.output_role.delete_similar_runs()

# check if we already stored policies for this simulation
save_path = world.learning_config["trained_policies_save_path"]
Expand Down Expand Up @@ -928,14 +902,12 @@ def run_learning(
range(1, world.learning_role.training_episodes + 1),
desc="Training Episodes",
):
# TODO normally, loading twice should not create issues, somehow a scheduling issue is raised currently
if episode != 1:
setup_world(
world=world,
scenario_data=scenario_data,
study_case=study_case,
episode=episode,
)
setup_world(
world=world,
scenario_data=scenario_data,
study_case=study_case,
episode=episode,
)

# -----------------------------------------
# Give the newly initialized learning role the needed information across episodes
Expand Down Expand Up @@ -993,13 +965,12 @@ def run_learning(

world.reset()

# if at end of simulation save last policies
if episode == (world.learning_role.training_episodes):
world.learning_role.rl_algorithm.save_params(
directory=f"{world.learning_role.trained_policies_save_path}/last_policies"
)
# save the last policies at the end of the training
world.learning_role.rl_algorithm.save_params(
directory=f"{world.learning_role.trained_policies_save_path}/last_policies"
)

# container shutdown implicitly with new initialisation
# container shutdown implicitly with new initialisation
logger.info("################")
logger.info("Training finished, Start evaluation run")
world.export_csv_path = temp_csv_path
Expand Down
13 changes: 8 additions & 5 deletions assume_cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,12 +127,13 @@ def cli(args=None):
warnings.filterwarnings("ignore", "coroutine.*?was never awaited.*")
logging.getLogger("asyncio").setLevel("FATAL")

try:
# import package after argcomplete.autocomplete
# to improve autocompletion speed
from assume import World
from assume.scenario.loader_csv import load_scenario_folder, run_learning
# import package after argcomplete.autocomplete
# to improve autocompletion speed
from assume import World
from assume.common.exceptions import AssumeException
from assume.scenario.loader_csv import load_scenario_folder, run_learning

try:
os.makedirs("./examples/local_db", exist_ok=True)

if args.parallel:
Expand Down Expand Up @@ -169,6 +170,8 @@ def cli(args=None):

except KeyboardInterrupt:
pass
except AssumeException as e:
logging.error(f"Stopping: {e}")
except Exception:
logging.exception("Simulation aborted")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -311,13 +311,12 @@
" range(1, world.learning_role.training_episodes + 1),\n",
" desc=\"Training Episodes\",\n",
" ):\n",
" if episode != 1:\n",
" setup_world(\n",
" world=world,\n",
" scenario_data=scenario_data,\n",
" study_case=study_case,\n",
" episode=episode,\n",
" )\n",
" setup_world(\n",
" world=world,\n",
" scenario_data=scenario_data,\n",
" study_case=study_case,\n",
" episode=episode,\n",
" )\n",
"\n",
" # Give the newly initialized learning role the needed information across episodes\n",
" world.learning_role.load_inter_episodic_data(inter_episodic_data)\n",
Expand Down Expand Up @@ -370,16 +369,15 @@
"\n",
" world.reset()\n",
"\n",
" # -----------------------------------------\n",
" # 4 - Terminate Learning and Save policies\n",
" # -----------------------------------------\n",
" # 4 - Terminate Learning and Save policies\n",
"\n",
" # if at end of simulation save last policies\n",
" if episode == (world.learning_role.training_episodes):\n",
" world.learning_role.rl_algorithm.save_params(\n",
" directory=f\"{world.learning_role.trained_policies_save_path}/last_policies\"\n",
" )\n",
" # if at end of simulation save last policies\n",
" world.learning_role.rl_algorithm.save_params(\n",
" directory=f\"{world.learning_role.trained_policies_save_path}/last_policies\"\n",
" )\n",
"\n",
" # container shutdown implicitly with new initialisation\n",
" # container shutdown implicitly with new initialisation\n",
" logger.info(\"################\")\n",
" logger.info(\"Training finished, Start evaluation run\")\n",
" world.export_csv_path = temp_csv_path\n",
Expand Down
14 changes: 6 additions & 8 deletions examples/notebooks/09_example_Sim_and_xRL.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -578,14 +578,12 @@
" range(1, world.learning_role.training_episodes + 1),\n",
" desc=\"Training Episodes\",\n",
" ):\n",
" # TODO normally, loading twice should not create issues, somehow a scheduling issue is raised currently\n",
" if episode != 1:\n",
" setup_world(\n",
" world=world,\n",
" scenario_data=scenario_data,\n",
" study_case=study_case,\n",
" episode=episode,\n",
" )\n",
" setup_world(\n",
" world=world,\n",
" scenario_data=scenario_data,\n",
" study_case=study_case,\n",
" episode=episode,\n",
" )\n",
"\n",
" # -----------------------------------------\n",
" # Give the newly initialized learning role the needed information across episodes\n",
Expand Down

0 comments on commit 70dfe08

Please sign in to comment.