From 6b48debff4658c9f030e036ff738d424f5ca3c7a Mon Sep 17 00:00:00 2001 From: Kris Thielemans Date: Wed, 10 Jul 2024 23:28:22 +0100 Subject: [PATCH 1/4] provide example implementing OSEM --- main_OSEM.py | 102 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 main_OSEM.py diff --git a/main_OSEM.py b/main_OSEM.py new file mode 100644 index 0000000..8345961 --- /dev/null +++ b/main_OSEM.py @@ -0,0 +1,102 @@ +"""Main file to modify for submissions. + +Once renamed or symlinked as `main.py`, it will be used by `petric.py` as follows: + +>>> from main import Submission, submission_callbacks +>>> from petric import data, metrics +>>> algorithm = Submission(data) +>>> algorithm.run(np.inf, callbacks=metrics + submission_callbacks) +""" +from cil.optimisation.algorithms import Algorithm +from cil.optimisation.utilities import callbacks +from petric import Dataset +from sirf.contrib.partitioner.partitioner import partition_indices +import sirf.STIR as STIR + +class MaxIteration(callbacks.Callback): + """ + The organisers try to `Submission(data).run(inf)` i.e. for infinite iterations (until timeout). + This callback forces stopping after `max_iteration` instead. + """ + def __init__(self, max_iteration: int, verbose: int = 1): + super().__init__(verbose) + self.max_iteration = max_iteration + + def __call__(self, algorithm: Algorithm): + if algorithm.iteration >= self.max_iteration: + raise StopIteration + + +class Submission(Algorithm): + """ + This class provides an implementation of the OSEM algorithm (marginally modified). + Of course, OSEM does not converge to the MAP solution for this Challenge + (you will have to use data.prior for that), but hopefully this shows you how to + implement an algorithm from baree-bones. + In this implementation, we have chosen NOT to use the sirf.STIR Poisson objective + function and its functionality. This is illustrated in the BSREM examples on + https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/BSREM + + Note that in this implentation, we use a particular feature of OSEM (and in fact, + of the Poisson log-likelihood), that the multiplicative term cancels in the quotient + of measured and estimated data (thanks to our convention for the additive_term). + It turns out you only need it for the "sensitivity" terms. + """ + def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interval: int = 10, **kwargs): + """ + Initialisation function, setting up data & (hyper)parameters. + NB: in practice, `num_subsets` should likely be determined from the data. + This is just an example. Try to modify and improve it! + """ + self.acquisition_models = [] + self.prompts_subsets = [] + self.subset_sensitivities = [] + self.subset = 0 + self.x = data.OSEM_image.clone() + + # find views in each subset + # (note that SIRF can currently only do subsets over views) + views = data.mult_factors.dimensions()[2] + partitions_idxs = partition_indices(num_subsets, list(range(views)), stagger=True) + + # for each subset: find data, create acq_model, and create subset_sensitivity (backproj of 1) + for i in range(num_subsets): + prompts_subset = data.acquired_data.get_subset(partitions_idxs[i]) + additive_term_subset = data.additive_term.get_subset(partitions_idxs[i]) + multiplicative_factors_subset = data.mult_factors.get_subset(partitions_idxs[i]) + + acquisition_model_subset = STIR.AcquisitionModelUsingParallelproj() + acquisition_model_subset.set_additive_term(additive_term_subset) + acquisition_model_subset.set_up(prompts_subset, self.x) + + subset_sensitivity = acquisition_model_subset.backward(multiplicative_factors_subset) + # add a small number to avoid NaN in division + subset_sensitivity += subset_sensitivity.max() * 1e-6 + + self.acquisition_models.append(acquisition_model_subset) + self.prompts_subsets.append(prompts_subset) + self.subset_sensitivities.append(subset_sensitivity) + + super().__init__(update_objective_interval=update_objective_interval, **kwargs) + self.configured = True # required by Algorithm + + def update(self): + # compute forward projection for the denomintor + # add a small number to avoid NaN in division, as OSEM lead to 0/0 or worse. + # (Theoretically, MLEM cannot, but it might nevertheless due to numerical issues) + denom = self.acquisition_models[self.subset].forward(self.x) + .0001 + # divide measured data by estimate (ignoring mult_factors!) + quotient = self.prompts_subsets[self.subset] / denom + + # update image with quotient of the back projection (without mult_factors!) and the subset_sensitivity + self.x *= self.acquisition_models[self.subset].backward(quotient) / self.subset_sensitivities[self.subset] + self.subset = (self.subset + 1) % len(self.prompts_subsets) + + def update_objective(self): + # should do some stuff here to have a correct TensorBoard display, but the + # objective function value is not part of the challenge, we will take an ugly short-cut. + # If you need it, you can implement it as a sum over subsets of something like + # prompts * log(acq_model.forward(self.x)) - self.x * subset_sensitivity + return 0 + +submission_callbacks = [MaxIteration(660)] From c8146b54eb128f7050d0a565ebdcad632a758d22 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 11 Jul 2024 12:35:39 +0100 Subject: [PATCH 2/4] misc tidy --- main_OSEM.py | 44 ++++++++++++++++++++------------------------ 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/main_OSEM.py b/main_OSEM.py index 8345961..3b05579 100644 --- a/main_OSEM.py +++ b/main_OSEM.py @@ -7,11 +7,12 @@ >>> algorithm = Submission(data) >>> algorithm.run(np.inf, callbacks=metrics + submission_callbacks) """ +import sirf.STIR as STIR from cil.optimisation.algorithms import Algorithm from cil.optimisation.utilities import callbacks from petric import Dataset from sirf.contrib.partitioner.partitioner import partition_indices -import sirf.STIR as STIR + class MaxIteration(callbacks.Callback): """ @@ -29,18 +30,12 @@ def __call__(self, algorithm: Algorithm): class Submission(Algorithm): """ - This class provides an implementation of the OSEM algorithm (marginally modified). - Of course, OSEM does not converge to the MAP solution for this Challenge - (you will have to use data.prior for that), but hopefully this shows you how to - implement an algorithm from baree-bones. - In this implementation, we have chosen NOT to use the sirf.STIR Poisson objective - function and its functionality. This is illustrated in the BSREM examples on - https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/BSREM - - Note that in this implentation, we use a particular feature of OSEM (and in fact, - of the Poisson log-likelihood), that the multiplicative term cancels in the quotient - of measured and estimated data (thanks to our convention for the additive_term). - It turns out you only need it for the "sensitivity" terms. + OSEM algorithm example. + NB: In OSEM Poisson log-likelihood, the multiplicative term cancels in the quotient of measured & estimated data + (so is rewritten here for efficiency). + NB: OSEM does not use `data.prior` and thus does not converge to the MAP reference used in PETRIC. + NB: this example does not use the `sirf.STIR` Poisson objective function. + NB: see https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/BSREM """ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interval: int = 10, **kwargs): """ @@ -49,11 +44,11 @@ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interva This is just an example. Try to modify and improve it! """ self.acquisition_models = [] - self.prompts_subsets = [] - self.subset_sensitivities = [] + self.prompts = [] + self.sensitivities = [] self.subset = 0 self.x = data.OSEM_image.clone() - + # find views in each subset # (note that SIRF can currently only do subsets over views) views = data.mult_factors.dimensions()[2] @@ -64,7 +59,7 @@ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interva prompts_subset = data.acquired_data.get_subset(partitions_idxs[i]) additive_term_subset = data.additive_term.get_subset(partitions_idxs[i]) multiplicative_factors_subset = data.mult_factors.get_subset(partitions_idxs[i]) - + acquisition_model_subset = STIR.AcquisitionModelUsingParallelproj() acquisition_model_subset.set_additive_term(additive_term_subset) acquisition_model_subset.set_up(prompts_subset, self.x) @@ -74,8 +69,8 @@ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interva subset_sensitivity += subset_sensitivity.max() * 1e-6 self.acquisition_models.append(acquisition_model_subset) - self.prompts_subsets.append(prompts_subset) - self.subset_sensitivities.append(subset_sensitivity) + self.prompts.append(prompts_subset) + self.sensitivities.append(subset_sensitivity) super().__init__(update_objective_interval=update_objective_interval, **kwargs) self.configured = True # required by Algorithm @@ -86,17 +81,18 @@ def update(self): # (Theoretically, MLEM cannot, but it might nevertheless due to numerical issues) denom = self.acquisition_models[self.subset].forward(self.x) + .0001 # divide measured data by estimate (ignoring mult_factors!) - quotient = self.prompts_subsets[self.subset] / denom + quotient = self.prompts[self.subset] / denom - # update image with quotient of the back projection (without mult_factors!) and the subset_sensitivity - self.x *= self.acquisition_models[self.subset].backward(quotient) / self.subset_sensitivities[self.subset] - self.subset = (self.subset + 1) % len(self.prompts_subsets) + # update image with quotient of the backprojection (without mult_factors!) and the sensitivity + self.x *= self.acquisition_models[self.subset].backward(quotient) / self.sensitivities[self.subset] + self.subset = (self.subset + 1) % len(self.prompts) def update_objective(self): # should do some stuff here to have a correct TensorBoard display, but the # objective function value is not part of the challenge, we will take an ugly short-cut. # If you need it, you can implement it as a sum over subsets of something like - # prompts * log(acq_model.forward(self.x)) - self.x * subset_sensitivity + # prompts * log(acq_model.forward(self.x)) - self.x * sensitivity return 0 + submission_callbacks = [MaxIteration(660)] From e56e20d378c512f3b8cd359213fd4679ab4471b4 Mon Sep 17 00:00:00 2001 From: Kris Thielemans Date: Thu, 11 Jul 2024 14:27:26 +0100 Subject: [PATCH 3/4] update docstring --- main_OSEM.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main_OSEM.py b/main_OSEM.py index 3b05579..15e2aec 100644 --- a/main_OSEM.py +++ b/main_OSEM.py @@ -31,8 +31,8 @@ def __call__(self, algorithm: Algorithm): class Submission(Algorithm): """ OSEM algorithm example. - NB: In OSEM Poisson log-likelihood, the multiplicative term cancels in the quotient of measured & estimated data - (so is rewritten here for efficiency). + NB: In OSEM, the multiplicative term cancels in the back-projection of the quotient of measured & estimated data + (so this is used here for efficiency). Note that a similar optimisation can be used for all algorithms using the Poisson log-likelihood. NB: OSEM does not use `data.prior` and thus does not converge to the MAP reference used in PETRIC. NB: this example does not use the `sirf.STIR` Poisson objective function. NB: see https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/BSREM From 835f55560383717802ecd243d1dccc096f7bb70d Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 11 Jul 2024 14:45:24 +0100 Subject: [PATCH 4/4] update docstring --- main_OSEM.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/main_OSEM.py b/main_OSEM.py index 15e2aec..85bcf2e 100644 --- a/main_OSEM.py +++ b/main_OSEM.py @@ -88,10 +88,10 @@ def update(self): self.subset = (self.subset + 1) % len(self.prompts) def update_objective(self): - # should do some stuff here to have a correct TensorBoard display, but the - # objective function value is not part of the challenge, we will take an ugly short-cut. - # If you need it, you can implement it as a sum over subsets of something like - # prompts * log(acq_model.forward(self.x)) - self.x * sensitivity + """ + NB: The objective value is not required by OSEM nor by PETRIC, so this returns `0`. + NB: In theory it should be `sum(prompts * log(acq_model.forward(self.x)) - self.x * sensitivity)` across all subsets. + """ return 0