From c19bd9fd9c25754a5b0171f536943f6c0f57bef4 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Thu, 30 Nov 2023 14:13:46 +0100 Subject: [PATCH 01/10] fix famos termination; remove other switching methods --- petab_select/candidate_space.py | 242 +++++++++--------- petab_select/constants.py | 16 +- petab_select/ui.py | 31 ++- test/candidate_space/test_candidate_space.py | 248 +++++++++---------- 4 files changed, 277 insertions(+), 260 deletions(-) diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index 8aaa9e04..73aa17bc 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -560,100 +560,100 @@ class BackwardCandidateSpace(ForwardCandidateSpace): direction = -1 -class BidirectionalCandidateSpace(ForwardCandidateSpace): - """The bidirectional method class. - - Attributes: - method_history: - The history of models that were found at each search. - A list of dictionaries, where each dictionary contains keys for the `METHOD` - and the list of `MODELS`. - """ - - # TODO refactor method to inherit from governing_method if not specified - # by constructor argument -- remove from here. - method = Method.BIDIRECTIONAL - governing_method = Method.BIDIRECTIONAL - retry_model_space_search_if_no_models = True - - def __init__( - self, - *args, - initial_method: Method = Method.FORWARD, - **kwargs, - ): - super().__init__(*args, **kwargs) - - # FIXME cannot access from CLI - # FIXME probably fine to replace `self.initial_method` - # with `self.method` here. i.e.: - # 1. change `method` to `Method.FORWARD - # 2. change signature to `initial_method: Method = None` - # 3. change code here to `if initial_method is not None: self.method = initial_method` - self.initial_method = initial_method - - self.method_history: List[Dict[str, Union[Method, List[Model]]]] = [] - - def update_method(self, method: Method): - if method == Method.FORWARD: - self.direction = 1 - elif method == Method.BACKWARD: - self.direction = -1 - else: - raise NotImplementedError( - f'Bidirectional direction must be either `Method.FORWARD` or `Method.BACKWARD`, not {method}.' - ) - - self.method = method - - def switch_method(self): - if self.method == Method.FORWARD: - method = Method.BACKWARD - elif self.method == Method.BACKWARD: - method = Method.FORWARD - - self.update_method(method=method) - - def setup_before_model_subspaces_search(self): - # If previous search found no models, then switch method. - previous_search = ( - None if not self.method_history else self.method_history[-1] - ) - if previous_search is None: - self.update_method(self.initial_method) - return - - self.update_method(previous_search[METHOD]) - if not previous_search[MODELS]: - self.switch_method() - self.retry_model_space_search_if_no_models = False - - def setup_after_model_subspaces_search(self): - current_search = { - METHOD: self.method, - MODELS: self.models, - } - self.method_history.append(current_search) - self.method = self.governing_method - - def wrap_search_subspaces(self, search_subspaces): - def wrapper(): - def iterate(): - self.setup_before_model_subspaces_search() - search_subspaces() - self.setup_after_model_subspaces_search() - - # Repeat until models are found or switching doesn't help. - iterate() - while ( - not self.models and self.retry_model_space_search_if_no_models - ): - iterate() - - # Reset flag for next time. - self.retry_model_space_search_if_no_models = True - - return wrapper +#class BidirectionalCandidateSpace(ForwardCandidateSpace): +# """The bidirectional method class. +# +# Attributes: +# method_history: +# The history of models that were found at each search. +# A list of dictionaries, where each dictionary contains keys for the `METHOD` +# and the list of `MODELS`. +# """ +# +# # TODO refactor method to inherit from governing_method if not specified +# # by constructor argument -- remove from here. +# method = Method.BIDIRECTIONAL +# governing_method = Method.BIDIRECTIONAL +# retry_model_space_search_if_no_models = True +# +# def __init__( +# self, +# *args, +# initial_method: Method = Method.FORWARD, +# **kwargs, +# ): +# super().__init__(*args, **kwargs) +# +# # FIXME cannot access from CLI +# # FIXME probably fine to replace `self.initial_method` +# # with `self.method` here. i.e.: +# # 1. change `method` to `Method.FORWARD +# # 2. change signature to `initial_method: Method = None` +# # 3. change code here to `if initial_method is not None: self.method = initial_method` +# self.initial_method = initial_method +# +# self.method_history: List[Dict[str, Union[Method, List[Model]]]] = [] +# +# def update_method(self, method: Method): +# if method == Method.FORWARD: +# self.direction = 1 +# elif method == Method.BACKWARD: +# self.direction = -1 +# else: +# raise NotImplementedError( +# f'Bidirectional direction must be either `Method.FORWARD` or `Method.BACKWARD`, not {method}.' +# ) +# +# self.method = method +# +# def switch_method(self): +# if self.method == Method.FORWARD: +# method = Method.BACKWARD +# elif self.method == Method.BACKWARD: +# method = Method.FORWARD +# +# self.update_method(method=method) +# +# def setup_before_model_subspaces_search(self): +# # If previous search found no models, then switch method. +# previous_search = ( +# None if not self.method_history else self.method_history[-1] +# ) +# if previous_search is None: +# self.update_method(self.initial_method) +# return +# +# self.update_method(previous_search[METHOD]) +# if not previous_search[MODELS]: +# self.switch_method() +# self.retry_model_space_search_if_no_models = False +# +# def setup_after_model_subspaces_search(self): +# current_search = { +# METHOD: self.method, +# MODELS: self.models, +# } +# self.method_history.append(current_search) +# self.method = self.governing_method +# +# def wrap_search_subspaces(self, search_subspaces): +# def wrapper(): +# def iterate(): +# self.setup_before_model_subspaces_search() +# search_subspaces() +# self.setup_after_model_subspaces_search() +# +# # Repeat until models are found or switching doesn't help. +# iterate() +# while ( +# not self.models and self.retry_model_space_search_if_no_models +# ): +# iterate() +# +# # Reset flag for next time. +# self.retry_model_space_search_if_no_models = True +# +# return wrapper class FamosCandidateSpace(CandidateSpace): @@ -1256,31 +1256,31 @@ def wrapper(): return wrapper -# TODO rewrite so BidirectionalCandidateSpace inherits from ForwardAndBackwardCandidateSpace -# instead -class ForwardAndBackwardCandidateSpace(BidirectionalCandidateSpace): - method = Method.FORWARD_AND_BACKWARD - governing_method = Method.FORWARD_AND_BACKWARD - retry_model_space_search_if_no_models = False - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs, initial_method=None) - - def wrap_search_subspaces(self, search_subspaces): - def wrapper(): - for method in [Method.FORWARD, Method.BACKWARD]: - self.update_method(method=method) - search_subspaces() - self.setup_after_model_subspaces_search() - - return wrapper - - # Disable unused interface - setup_before_model_subspaces_search = None - switch_method = None - - def setup_after_model_space_search(self): - pass +## TODO rewrite so BidirectionalCandidateSpace inherits from ForwardAndBackwardCandidateSpace +## instead +#class ForwardAndBackwardCandidateSpace(BidirectionalCandidateSpace): +# method = Method.FORWARD_AND_BACKWARD +# governing_method = Method.FORWARD_AND_BACKWARD +# retry_model_space_search_if_no_models = False +# +# def __init__(self, *args, **kwargs): +# super().__init__(*args, **kwargs, initial_method=None) +# +# def wrap_search_subspaces(self, search_subspaces): +# def wrapper(): +# for method in [Method.FORWARD, Method.BACKWARD]: +# self.update_method(method=method) +# search_subspaces() +# self.setup_after_model_subspaces_search() +# +# return wrapper +# +# # Disable unused interface +# setup_before_model_subspaces_search = None +# switch_method = None +# +# def setup_after_model_space_search(self): +# pass class LateralCandidateSpace(CandidateSpace): @@ -1362,10 +1362,10 @@ def _consider_method(self, model): candidate_space_classes = [ ForwardCandidateSpace, BackwardCandidateSpace, - BidirectionalCandidateSpace, + #BidirectionalCandidateSpace, LateralCandidateSpace, BruteForceCandidateSpace, - ForwardAndBackwardCandidateSpace, + #ForwardAndBackwardCandidateSpace, FamosCandidateSpace, ] diff --git a/petab_select/constants.py b/petab_select/constants.py index 0a7bee3e..4570f98a 100644 --- a/petab_select/constants.py +++ b/petab_select/constants.py @@ -119,11 +119,11 @@ class Method(str, Enum): """String literals for model selection methods.""" BACKWARD = 'backward' - BIDIRECTIONAL = 'bidirectional' + #BIDIRECTIONAL = 'bidirectional' BRUTE_FORCE = 'brute_force' FAMOS = 'famos' FORWARD = 'forward' - FORWARD_AND_BACKWARD = 'forward_and_backward' + #FORWARD_AND_BACKWARD = 'forward_and_backward' LATERAL = 'lateral' MOST_DISTANT = 'most_distant' @@ -142,17 +142,17 @@ class Criterion(str, Enum): # Methods that move through model space by taking steps away from some model. STEPWISE_METHODS = [ Method.BACKWARD, - Method.BIDIRECTIONAL, + #Method.BIDIRECTIONAL, Method.FORWARD, - Method.FORWARD_AND_BACKWARD, + #Method.FORWARD_AND_BACKWARD, Method.LATERAL, ] # Methods that require an initial model. INITIAL_MODEL_METHODS = [ Method.BACKWARD, - Method.BIDIRECTIONAL, + #Method.BIDIRECTIONAL, Method.FORWARD, - Method.FORWARD_AND_BACKWARD, + #Method.FORWARD_AND_BACKWARD, Method.LATERAL, ] @@ -160,7 +160,7 @@ class Criterion(str, Enum): VIRTUAL_INITIAL_MODEL = 'virtual_initial_model' VIRTUAL_INITIAL_MODEL_METHODS = [ Method.BACKWARD, - Method.BIDIRECTIONAL, + #Method.BIDIRECTIONAL, Method.FORWARD, - Method.FORWARD_AND_BACKWARD, + #Method.FORWARD_AND_BACKWARD, ] diff --git a/petab_select/ui.py b/petab_select/ui.py index bdb34b82..778e363a 100644 --- a/petab_select/ui.py +++ b/petab_select/ui.py @@ -134,15 +134,32 @@ def candidates( problem.model_space.exclude_model_hashes( model_hashes=excluded_model_hashes ) - if do_search: + while do_search: problem.model_space.search(candidate_space, limit=limit_sent) - write_summary_tsv( - problem=problem, - candidate_space=candidate_space, - previous_predecessor_model=candidate_space.previous_predecessor_model, - predecessor_model=predecessor_model, - ) + write_summary_tsv( + problem=problem, + candidate_space=candidate_space, + previous_predecessor_model=candidate_space.previous_predecessor_model, + predecessor_model=predecessor_model, + ) + + if candidate_space.models: + break + + # No models were found. Repeat the search with the same candidate space, + # if the candidate space is able to switch methods. + # N.B.: candidate spaces that switch methods must raise `StopIteration` + # when they stop switching. + if candidate_space.governing_method == Method.FAMOS: + try: + candidate_space.update_after_calibration( + calibrated_models=calibrated_models, + newly_calibrated_models={}, + criterion=criterion, + ) + except StopIteration: + break candidate_space.previous_predecessor_model = predecessor_model diff --git a/test/candidate_space/test_candidate_space.py b/test/candidate_space/test_candidate_space.py index 9a993ef6..6b63e9e9 100644 --- a/test/candidate_space/test_candidate_space.py +++ b/test/candidate_space/test_candidate_space.py @@ -109,127 +109,127 @@ def model_space(calibrated_model_space) -> pd.DataFrame: return model_space -def test_bidirectional( - model_space, calibrated_model_space, ordered_model_parameterizations -): - criterion = Criterion.AIC - model_id_length = one( - set([len(model_id) for model_id in calibrated_model_space]) - ) - - candidate_space = BidirectionalCandidateSpace() - calibrated_models = [] - - # Perform bidirectional search until no more models are found. - search_iterations = 0 - while True: - new_calibrated_models = [] - search_iterations += 1 - - # Get models. - model_space.search(candidate_space) - - # Calibrate models. - for model in candidate_space.models: - model_id = model.model_subspace_id[-model_id_length:] - model.set_criterion( - criterion=criterion, value=calibrated_model_space[model_id] - ) - new_calibrated_models.append(model) - - # End if no more models were found. - if not new_calibrated_models: - break - - # Get next predecessor model as best of new models. - best_new_model = None - for model in new_calibrated_models: - if best_new_model is None: - best_new_model = model - continue - if default_compare( - model0=best_new_model, model1=model, criterion=criterion - ): - best_new_model = model - - # Set next predecessor and exclusions. - calibrated_model_hashes = [ - model.get_hash() for model in calibrated_models - ] - candidate_space.reset( - predecessor_model=best_new_model, - exclusions=calibrated_model_hashes, - ) - - # exclude calibrated model hashes from model space too? - model_space.exclude_model_hashes(model_hashes=calibrated_model_hashes) - - # Check that expected models are found at each iteration. - ( - good_model_parameterizations_ascending, - bad_model_parameterizations, - ) = ordered_model_parameterizations - search_iteration = 0 - for history_item in candidate_space.method_history: - models = history_item[MODELS] - if not models: - continue - model_parameterizations = [ - model.model_subspace_id[-5:] for model in models - ] - - good_model_parameterization = good_model_parameterizations_ascending[ - search_iteration - ] - # The expected good model was found. - assert good_model_parameterization in model_parameterizations - model_parameterizations.remove(good_model_parameterization) - - if search_iteration == 0: - # All parameterizations have been correctly identified and removed. - assert not model_parameterizations - search_iteration += 1 - continue - - previous_model_parameterization = ( - good_model_parameterizations_ascending[search_iteration - 1] - ) - - # The expected bad model is also found. - # If a bad model is the same dimension and also represents a similar stepwise move away from the previous - # model parameterization, it should also be in the parameterizations. - for bad_model_parameterization in bad_model_parameterizations: - # Skip if different dimensions - if sum(map(int, bad_model_parameterization)) != sum( - map(int, good_model_parameterization) - ): - continue - # Skip if different distances from previous model parameterization - if sum( - [ - a != b - for a, b in zip( - bad_model_parameterization, - previous_model_parameterization, - ) - ] - ) != sum( - [ - a != b - for a, b in zip( - good_model_parameterization, - previous_model_parameterization, - ) - ] - ): - continue - assert bad_model_parameterization in model_parameterizations - model_parameterizations.remove(bad_model_parameterization) - - # All parameterizations have been correctly identified and removed. - assert not model_parameterizations - search_iteration += 1 - - # End test if all good models were found in the correct order. - if search_iteration >= len(good_model_parameterizations_ascending): - break +#def test_bidirectional( +# model_space, calibrated_model_space, ordered_model_parameterizations +#): +# criterion = Criterion.AIC +# model_id_length = one( +# set([len(model_id) for model_id in calibrated_model_space]) +# ) +# +# candidate_space = BidirectionalCandidateSpace() +# calibrated_models = [] +# +# # Perform bidirectional search until no more models are found. +# search_iterations = 0 +# while True: +# new_calibrated_models = [] +# search_iterations += 1 +# +# # Get models. +# model_space.search(candidate_space) +# +# # Calibrate models. +# for model in candidate_space.models: +# model_id = model.model_subspace_id[-model_id_length:] +# model.set_criterion( +# criterion=criterion, value=calibrated_model_space[model_id] +# ) +# new_calibrated_models.append(model) +# +# # End if no more models were found. +# if not new_calibrated_models: +# break +# +# # Get next predecessor model as best of new models. +# best_new_model = None +# for model in new_calibrated_models: +# if best_new_model is None: +# best_new_model = model +# continue +# if default_compare( +# model0=best_new_model, model1=model, criterion=criterion +# ): +# best_new_model = model +# +# # Set next predecessor and exclusions. +# calibrated_model_hashes = [ +# model.get_hash() for model in calibrated_models +# ] +# candidate_space.reset( +# predecessor_model=best_new_model, +# exclusions=calibrated_model_hashes, +# ) +# +# # exclude calibrated model hashes from model space too? +# model_space.exclude_model_hashes(model_hashes=calibrated_model_hashes) +# +# # Check that expected models are found at each iteration. +# ( +# good_model_parameterizations_ascending, +# bad_model_parameterizations, +# ) = ordered_model_parameterizations +# search_iteration = 0 +# for history_item in candidate_space.method_history: +# models = history_item[MODELS] +# if not models: +# continue +# model_parameterizations = [ +# model.model_subspace_id[-5:] for model in models +# ] +# +# good_model_parameterization = good_model_parameterizations_ascending[ +# search_iteration +# ] +# # The expected good model was found. +# assert good_model_parameterization in model_parameterizations +# model_parameterizations.remove(good_model_parameterization) +# +# if search_iteration == 0: +# # All parameterizations have been correctly identified and removed. +# assert not model_parameterizations +# search_iteration += 1 +# continue +# +# previous_model_parameterization = ( +# good_model_parameterizations_ascending[search_iteration - 1] +# ) +# +# # The expected bad model is also found. +# # If a bad model is the same dimension and also represents a similar stepwise move away from the previous +# # model parameterization, it should also be in the parameterizations. +# for bad_model_parameterization in bad_model_parameterizations: +# # Skip if different dimensions +# if sum(map(int, bad_model_parameterization)) != sum( +# map(int, good_model_parameterization) +# ): +# continue +# # Skip if different distances from previous model parameterization +# if sum( +# [ +# a != b +# for a, b in zip( +# bad_model_parameterization, +# previous_model_parameterization, +# ) +# ] +# ) != sum( +# [ +# a != b +# for a, b in zip( +# good_model_parameterization, +# previous_model_parameterization, +# ) +# ] +# ): +# continue +# assert bad_model_parameterization in model_parameterizations +# model_parameterizations.remove(bad_model_parameterization) +# +# # All parameterizations have been correctly identified and removed. +# assert not model_parameterizations +# search_iteration += 1 +# +# # End test if all good models were found in the correct order. +# if search_iteration >= len(good_model_parameterizations_ascending): +# break From efa17a5729a973eb2523274a17e0fd76e3f25384 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Thu, 30 Nov 2023 14:21:14 +0100 Subject: [PATCH 02/10] remove switching methods --- petab_select/candidate_space.py | 123 ------------------ petab_select/constants.py | 28 ----- test/candidate_space/test_candidate_space.py | 126 ------------------- 3 files changed, 277 deletions(-) diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index 73aa17bc..7fe62bba 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -560,102 +560,6 @@ class BackwardCandidateSpace(ForwardCandidateSpace): direction = -1 -#class BidirectionalCandidateSpace(ForwardCandidateSpace): -# """The bidirectional method class. -# -# Attributes: -# method_history: -# The history of models that were found at each search. -# A list of dictionaries, where each dictionary contains keys for the `METHOD` -# and the list of `MODELS`. -# """ -# -# # TODO refactor method to inherit from governing_method if not specified -# # by constructor argument -- remove from here. -# method = Method.BIDIRECTIONAL -# governing_method = Method.BIDIRECTIONAL -# retry_model_space_search_if_no_models = True -# -# def __init__( -# self, -# *args, -# initial_method: Method = Method.FORWARD, -# **kwargs, -# ): -# super().__init__(*args, **kwargs) -# -# # FIXME cannot access from CLI -# # FIXME probably fine to replace `self.initial_method` -# # with `self.method` here. i.e.: -# # 1. change `method` to `Method.FORWARD -# # 2. change signature to `initial_method: Method = None` -# # 3. change code here to `if initial_method is not None: self.method = initial_method` -# self.initial_method = initial_method -# -# self.method_history: List[Dict[str, Union[Method, List[Model]]]] = [] -# -# def update_method(self, method: Method): -# if method == Method.FORWARD: -# self.direction = 1 -# elif method == Method.BACKWARD: -# self.direction = -1 -# else: -# raise NotImplementedError( -# f'Bidirectional direction must be either `Method.FORWARD` or `Method.BACKWARD`, not {method}.' -# ) -# -# self.method = method -# -# def switch_method(self): -# if self.method == Method.FORWARD: -# method = Method.BACKWARD -# elif self.method == Method.BACKWARD: -# method = Method.FORWARD -# -# self.update_method(method=method) -# -# def setup_before_model_subspaces_search(self): -# # If previous search found no models, then switch method. -# previous_search = ( -# None if not self.method_history else self.method_history[-1] -# ) -# if previous_search is None: -# self.update_method(self.initial_method) -# return -# -# self.update_method(previous_search[METHOD]) -# if not previous_search[MODELS]: -# self.switch_method() -# self.retry_model_space_search_if_no_models = False -# -# def setup_after_model_subspaces_search(self): -# current_search = { -# METHOD: self.method, -# MODELS: self.models, -# } -# self.method_history.append(current_search) -# self.method = self.governing_method -# -# def wrap_search_subspaces(self, search_subspaces): -# def wrapper(): -# def iterate(): -# self.setup_before_model_subspaces_search() -# search_subspaces() -# self.setup_after_model_subspaces_search() -# -# # Repeat until models are found or switching doesn't help. -# iterate() -# while ( -# not self.models and self.retry_model_space_search_if_no_models -# ): -# iterate() -# -# # Reset flag for next time. -# self.retry_model_space_search_if_no_models = True -# -# return wrapper - - class FamosCandidateSpace(CandidateSpace): """The FAMoS method class. @@ -1256,33 +1160,6 @@ def wrapper(): return wrapper -## TODO rewrite so BidirectionalCandidateSpace inherits from ForwardAndBackwardCandidateSpace -## instead -#class ForwardAndBackwardCandidateSpace(BidirectionalCandidateSpace): -# method = Method.FORWARD_AND_BACKWARD -# governing_method = Method.FORWARD_AND_BACKWARD -# retry_model_space_search_if_no_models = False -# -# def __init__(self, *args, **kwargs): -# super().__init__(*args, **kwargs, initial_method=None) -# -# def wrap_search_subspaces(self, search_subspaces): -# def wrapper(): -# for method in [Method.FORWARD, Method.BACKWARD]: -# self.update_method(method=method) -# search_subspaces() -# self.setup_after_model_subspaces_search() -# -# return wrapper -# -# # Disable unused interface -# setup_before_model_subspaces_search = None -# switch_method = None -# -# def setup_after_model_space_search(self): -# pass - - class LateralCandidateSpace(CandidateSpace): """Find models with the same number of estimated parameters.""" diff --git a/petab_select/constants.py b/petab_select/constants.py index 4570f98a..1cd74ff3 100644 --- a/petab_select/constants.py +++ b/petab_select/constants.py @@ -48,12 +48,6 @@ # COMPARED_MODEL_ID = 'compared_'+MODEL_ID YAML_FILENAME = 'yaml' -# FORWARD = 'forward' -# BACKWARD = 'backward' -# BIDIRECTIONAL = 'bidirectional' -# LATERAL = 'lateral' - - # DISTANCES = { # FORWARD: { # 'l1': 1, @@ -70,20 +64,6 @@ # } CRITERIA = 'criteria' -# FIXME remove, change all uses to Enum below -# AIC = 'AIC' -# AICC = 'AICc' -# BIC = 'BIC' -# AKAIKE_INFORMATION_CRITERION = AIC -# CORRECTED_AKAIKE_INFORMATION_CRITERION = AICC -# BAYESIAN_INFORMATION_CRITERION = BIC -# LH = 'LH' -# LLH = 'LLH' -# NLLH = 'NLLH' -# LIKELIHOOD = LH -# LOG_LIKELIHOOD = LLH -# NEGATIVE_LOG_LIKELIHOOD = NLLH - PARAMETERS = 'parameters' # PARAMETER_ESTIMATE = 'parameter_estimate' @@ -119,11 +99,9 @@ class Method(str, Enum): """String literals for model selection methods.""" BACKWARD = 'backward' - #BIDIRECTIONAL = 'bidirectional' BRUTE_FORCE = 'brute_force' FAMOS = 'famos' FORWARD = 'forward' - #FORWARD_AND_BACKWARD = 'forward_and_backward' LATERAL = 'lateral' MOST_DISTANT = 'most_distant' @@ -142,17 +120,13 @@ class Criterion(str, Enum): # Methods that move through model space by taking steps away from some model. STEPWISE_METHODS = [ Method.BACKWARD, - #Method.BIDIRECTIONAL, Method.FORWARD, - #Method.FORWARD_AND_BACKWARD, Method.LATERAL, ] # Methods that require an initial model. INITIAL_MODEL_METHODS = [ Method.BACKWARD, - #Method.BIDIRECTIONAL, Method.FORWARD, - #Method.FORWARD_AND_BACKWARD, Method.LATERAL, ] @@ -160,7 +134,5 @@ class Criterion(str, Enum): VIRTUAL_INITIAL_MODEL = 'virtual_initial_model' VIRTUAL_INITIAL_MODEL_METHODS = [ Method.BACKWARD, - #Method.BIDIRECTIONAL, Method.FORWARD, - #Method.FORWARD_AND_BACKWARD, ] diff --git a/test/candidate_space/test_candidate_space.py b/test/candidate_space/test_candidate_space.py index 6b63e9e9..4512dbc0 100644 --- a/test/candidate_space/test_candidate_space.py +++ b/test/candidate_space/test_candidate_space.py @@ -107,129 +107,3 @@ def model_space(calibrated_model_space) -> pd.DataFrame: df = get_model_space_df(df) model_space = ModelSpace.from_df(df) return model_space - - -#def test_bidirectional( -# model_space, calibrated_model_space, ordered_model_parameterizations -#): -# criterion = Criterion.AIC -# model_id_length = one( -# set([len(model_id) for model_id in calibrated_model_space]) -# ) -# -# candidate_space = BidirectionalCandidateSpace() -# calibrated_models = [] -# -# # Perform bidirectional search until no more models are found. -# search_iterations = 0 -# while True: -# new_calibrated_models = [] -# search_iterations += 1 -# -# # Get models. -# model_space.search(candidate_space) -# -# # Calibrate models. -# for model in candidate_space.models: -# model_id = model.model_subspace_id[-model_id_length:] -# model.set_criterion( -# criterion=criterion, value=calibrated_model_space[model_id] -# ) -# new_calibrated_models.append(model) -# -# # End if no more models were found. -# if not new_calibrated_models: -# break -# -# # Get next predecessor model as best of new models. -# best_new_model = None -# for model in new_calibrated_models: -# if best_new_model is None: -# best_new_model = model -# continue -# if default_compare( -# model0=best_new_model, model1=model, criterion=criterion -# ): -# best_new_model = model -# -# # Set next predecessor and exclusions. -# calibrated_model_hashes = [ -# model.get_hash() for model in calibrated_models -# ] -# candidate_space.reset( -# predecessor_model=best_new_model, -# exclusions=calibrated_model_hashes, -# ) -# -# # exclude calibrated model hashes from model space too? -# model_space.exclude_model_hashes(model_hashes=calibrated_model_hashes) -# -# # Check that expected models are found at each iteration. -# ( -# good_model_parameterizations_ascending, -# bad_model_parameterizations, -# ) = ordered_model_parameterizations -# search_iteration = 0 -# for history_item in candidate_space.method_history: -# models = history_item[MODELS] -# if not models: -# continue -# model_parameterizations = [ -# model.model_subspace_id[-5:] for model in models -# ] -# -# good_model_parameterization = good_model_parameterizations_ascending[ -# search_iteration -# ] -# # The expected good model was found. -# assert good_model_parameterization in model_parameterizations -# model_parameterizations.remove(good_model_parameterization) -# -# if search_iteration == 0: -# # All parameterizations have been correctly identified and removed. -# assert not model_parameterizations -# search_iteration += 1 -# continue -# -# previous_model_parameterization = ( -# good_model_parameterizations_ascending[search_iteration - 1] -# ) -# -# # The expected bad model is also found. -# # If a bad model is the same dimension and also represents a similar stepwise move away from the previous -# # model parameterization, it should also be in the parameterizations. -# for bad_model_parameterization in bad_model_parameterizations: -# # Skip if different dimensions -# if sum(map(int, bad_model_parameterization)) != sum( -# map(int, good_model_parameterization) -# ): -# continue -# # Skip if different distances from previous model parameterization -# if sum( -# [ -# a != b -# for a, b in zip( -# bad_model_parameterization, -# previous_model_parameterization, -# ) -# ] -# ) != sum( -# [ -# a != b -# for a, b in zip( -# good_model_parameterization, -# previous_model_parameterization, -# ) -# ] -# ): -# continue -# assert bad_model_parameterization in model_parameterizations -# model_parameterizations.remove(bad_model_parameterization) -# -# # All parameterizations have been correctly identified and removed. -# assert not model_parameterizations -# search_iteration += 1 -# -# # End test if all good models were found in the correct order. -# if search_iteration >= len(good_model_parameterizations_ascending): -# break From 63042b93acd529114851cbaa5c6c56032e5a8412 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Thu, 30 Nov 2023 14:23:53 +0100 Subject: [PATCH 03/10] tidy --- petab_select/candidate_space.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index 7fe62bba..4f875fb7 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -1239,10 +1239,8 @@ def _consider_method(self, model): candidate_space_classes = [ ForwardCandidateSpace, BackwardCandidateSpace, - #BidirectionalCandidateSpace, LateralCandidateSpace, BruteForceCandidateSpace, - #ForwardAndBackwardCandidateSpace, FamosCandidateSpace, ] From d6beada0af4c04daa052e1445ab112e7e08d18c7 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Thu, 30 Nov 2023 15:09:16 +0100 Subject: [PATCH 04/10] remove test --- test/candidate_space/test_candidate_space.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/candidate_space/test_candidate_space.py b/test/candidate_space/test_candidate_space.py index 4512dbc0..8c0d2351 100644 --- a/test/candidate_space/test_candidate_space.py +++ b/test/candidate_space/test_candidate_space.py @@ -5,9 +5,12 @@ from more_itertools import one import petab_select -from petab_select.candidate_space import ( # BackwardCandidateSpace,; BruteForceCandidateSpace,; ForwardCandidateSpace,; ForwardAndBackwardCandidateSpace,; LateralCandidateSpace, - BidirectionalCandidateSpace, -) +# from petab_select.candidate_space import ( +# BackwardCandidateSpace, +# BruteForceCandidateSpace, +# ForwardCandidateSpace, +# LateralCandidateSpace, +# ) from petab_select.constants import ( ESTIMATE, MODEL_SUBSPACE_ID, From 36b495d48312aabdf61b01845901ce63f29bbd5d Mon Sep 17 00:00:00 2001 From: Doresic Date: Thu, 30 Nov 2023 17:29:09 +0100 Subject: [PATCH 05/10] Small docstring change --- petab_select/candidate_space.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index 4f875fb7..05bbdc62 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -806,7 +806,7 @@ def update_from_newly_calibrated_models( ) -> bool: """Update ``self.best_models`` with the latest ``newly_calibrated_models`` and determine if there was a new best model. If so, return - ``True``. ``False`` otherwise.""" + ``False``. ``True`` otherwise.""" go_into_switch_method = True for newly_calibrated_model in newly_calibrated_models.values(): From 253cbced8bfb94ce5d8097c7dda4f9df10adda57 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Fri, 1 Dec 2023 11:46:06 +0100 Subject: [PATCH 06/10] exclusions type error --- petab_select/candidate_space.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index 05bbdc62..149fc295 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -791,7 +791,7 @@ def update_after_calibration( logging.info("Switching method") self.switch_method(calibrated_models=calibrated_models) self.switch_inner_candidate_space( - calibrated_models=calibrated_models, + exclusions=list(calibrated_models), ) logging.info( "Method switched to ", self.inner_candidate_space.method @@ -1021,10 +1021,14 @@ def update_method(self, method: Method): def switch_inner_candidate_space( self, - calibrated_models: Dict[str, Model], + exclusions: List[str], ): - """Switch ``self.inner_candidate_space`` to the candidate space of - the current self.method.""" + """Switch the inner candidate space to match the current method. + + Args: + exclusions: + Hashes of excluded models. + """ # if self.method != Method.MOST_DISTANT: self.inner_candidate_space = self.inner_candidate_spaces[self.method] @@ -1032,7 +1036,7 @@ def switch_inner_candidate_space( # calibrated models self.inner_candidate_space.reset( predecessor_model=self.predecessor_model, - exclusions=calibrated_models, + exclusions=exclusions, ) def jump_to_most_distant( From c703e24a29a52661c4da04387c800bb791ac4628 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Fri, 1 Dec 2023 11:51:05 +0100 Subject: [PATCH 07/10] black --- test/candidate_space/test_candidate_space.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/candidate_space/test_candidate_space.py b/test/candidate_space/test_candidate_space.py index 8c0d2351..523f42d7 100644 --- a/test/candidate_space/test_candidate_space.py +++ b/test/candidate_space/test_candidate_space.py @@ -5,6 +5,7 @@ from more_itertools import one import petab_select + # from petab_select.candidate_space import ( # BackwardCandidateSpace, # BruteForceCandidateSpace, From d60eaad5cdcd18f19d2e67dd77abd16d9468ed8d Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Sat, 16 Dec 2023 22:48:05 +0100 Subject: [PATCH 08/10] use numpy to support log(0) --- petab_select/criteria.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/petab_select/criteria.py b/petab_select/criteria.py index 6e6db4b0..ceea1774 100644 --- a/petab_select/criteria.py +++ b/petab_select/criteria.py @@ -1,7 +1,6 @@ """Implementations of model selection criteria.""" -import math - +import numpy as np import petab from petab.C import OBJECTIVE_PRIOR_PARAMETERS, OBJECTIVE_PRIOR_TYPE @@ -88,7 +87,7 @@ def get_llh(self) -> float: """Get the log-likelihood.""" llh = self.model.get_criterion(Criterion.LLH, compute=False) if llh is None: - llh = math.log(self.get_lh()) + llh = np.log(self.get_lh()) return llh def get_lh(self) -> float: @@ -100,9 +99,9 @@ def get_lh(self) -> float: if lh is not None: return lh elif llh is not None: - return math.exp(llh) + return np.exp(llh) elif nllh is not None: - return math.exp(-1 * nllh) + return np.exp(-1 * nllh) raise ValueError( 'Please supply the likelihood (LH) or a compatible transformation. Compatible transformations: log(LH), -log(LH).' @@ -230,4 +229,4 @@ def calculate_bic( Returns: The BIC value. """ - return n_estimated * math.log(n_measurements + n_priors) + 2 * nllh + return n_estimated * np.log(n_measurements + n_priors) + 2 * nllh From 87f024372fc7ebd5e72f4811f144f7c4f1b3b208 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Sat, 16 Dec 2023 22:55:14 +0100 Subject: [PATCH 09/10] support returning `None` as model criterion if unavailable --- petab_select/model.py | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/petab_select/model.py b/petab_select/model.py index e3dd4792..09462d2a 100644 --- a/petab_select/model.py +++ b/petab_select/model.py @@ -222,6 +222,7 @@ def get_criterion( self, criterion: Criterion, compute: bool = True, + raise_on_failure: bool = True, ) -> Union[TYPE_CRITERION, None]: """Get a criterion value for the model. @@ -233,34 +234,53 @@ def get_criterion( attributes. For example, if the ``'AIC'`` criterion is requested, this can be computed from a predetermined model likelihood and its number of estimated parameters. + raise_on_failure: + Whether to raise a `ValueError` if the criterion could not be + computed. If `False`, `None` is returned. Returns: The criterion value, or `None` if it is not available. TODO check for previous use of this method before `.get` was used """ if criterion not in self.criteria and compute: - self.compute_criterion(criterion=criterion) + self.compute_criterion( + criterion=criterion, + raise_on_failure=raise_on_failure, + ) # value = self.criterion_computer(criterion=id) # self.set_criterion(id=id, value=value) return self.criteria.get(criterion, None) - def compute_criterion(self, criterion: Criterion) -> TYPE_CRITERION: + def compute_criterion( + self, + criterion: Criterion, + raise_on_failure: bool = True, + ) -> TYPE_CRITERION: """Compute a criterion value for the model. The value will also be stored, which will overwrite any previously stored value for the criterion. Args: - id: - The ID of the criterion (e.g. ``petab_select.constants.Criterion.AIC``). + criterion: + The ID of the criterion (e.g. :obj:`petab_select.constants.Criterion.AIC`). + raise_on_failure: + Whether to raise a `ValueError` if the criterion could not be + computed. If `False`, `None` is returned. Returns: The criterion value. """ - criterion_value = self.criterion_computer(criterion) - self.set_criterion(criterion, criterion_value) - return criterion_value + try: + criterion_value = self.criterion_computer(criterion) + self.set_criterion(criterion, criterion_value) + result = criterion_value + except ValueError: + if raise_on_failure: + raise + result = None + return result def set_estimated_parameters( self, From c88363547633780bfb32d834d4af09aed0e64c85 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana Date: Sat, 16 Dec 2023 22:57:14 +0100 Subject: [PATCH 10/10] not clean, but support automatic calibration of uncalibrated predecessor models --- petab_select/ui.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/petab_select/ui.py b/petab_select/ui.py index 778e363a..4d14ebd3 100644 --- a/petab_select/ui.py +++ b/petab_select/ui.py @@ -1,3 +1,4 @@ +import copy from pathlib import Path from typing import Dict, List, Optional, Union @@ -76,10 +77,29 @@ def candidates( candidate_space.exclude_hashes(calibrated_models) # Set the predecessor model to the previous predecessor model. + predecessor_model = candidate_space.previous_predecessor_model + + # If the predecessor model has not yet been calibrated, then calibrate it. + if ( + predecessor_model.get_criterion( + criterion, + raise_on_failure=False, + ) + is None + ): + candidate_space.models = [copy.deepcopy(predecessor_model)] + # Dummy zero likelihood, which the predecessor model will + # improve on after it's actually calibrated. + predecessor_model.set_criterion(Criterion.LH, 0.0) + return candidate_space + + # Exclude the calibrated predecessor model. + if not candidate_space.excluded(predecessor_model): + candidate_space.exclude(predecessor_model) + # Set the new predecessor_model from the initial model or # by calling ui.best to find the best model to jump to if # this is not the first step of the search. - predecessor_model = candidate_space.previous_predecessor_model if newly_calibrated_models: predecessor_model = problem.get_best( newly_calibrated_models.values(),