Skip to content

Commit

Permalink
misc tidy
Browse files Browse the repository at this point in the history
  • Loading branch information
casperdcl committed Jul 11, 2024
1 parent 1da64d8 commit cf1faf8
Showing 1 changed file with 20 additions and 24 deletions.
44 changes: 20 additions & 24 deletions main_OSEM.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@
>>> algorithm = Submission(data)
>>> algorithm.run(np.inf, callbacks=metrics + submission_callbacks)
"""
import sirf.STIR as STIR
from cil.optimisation.algorithms import Algorithm
from cil.optimisation.utilities import callbacks
from petric import Dataset
from sirf.contrib.partitioner.partitioner import partition_indices
import sirf.STIR as STIR


class MaxIteration(callbacks.Callback):
"""
Expand All @@ -29,18 +30,12 @@ def __call__(self, algorithm: Algorithm):

class Submission(Algorithm):
"""
This class provides an implementation of the OSEM algorithm (marginally modified).
Of course, OSEM does not converge to the MAP solution for this Challenge
(you will have to use data.prior for that), but hopefully this shows you how to
implement an algorithm from baree-bones.
In this implementation, we have chosen NOT to use the sirf.STIR Poisson objective
function and its functionality. This is illustrated in the BSREM examples on
https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/BSREM
Note that in this implentation, we use a particular feature of OSEM (and in fact,
of the Poisson log-likelihood), that the multiplicative term cancels in the quotient
of measured and estimated data (thanks to our convention for the additive_term).
It turns out you only need it for the "sensitivity" terms.
OSEM algorithm example.
NB: In OSEM Poisson log-likelihood, the multiplicative term cancels in the quotient of measured & estimated data
(so is rewritten here for efficiency).
NB: OSEM does not use `data.prior` and thus does not converge to the MAP reference used in PETRIC.
NB: this example does not use the `sirf.STIR` Poisson objective function.
NB: see https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/BSREM
"""
def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interval: int = 10, **kwargs):
"""
Expand All @@ -49,11 +44,11 @@ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interva
This is just an example. Try to modify and improve it!
"""
self.acquisition_models = []
self.prompts_subsets = []
self.subset_sensitivities = []
self.prompts = []
self.sensitivities = []
self.subset = 0
self.x = data.OSEM_image.clone()

# find views in each subset
# (note that SIRF can currently only do subsets over views)
views = data.mult_factors.dimensions()[2]
Expand All @@ -64,7 +59,7 @@ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interva
prompts_subset = data.acquired_data.get_subset(partitions_idxs[i])
additive_term_subset = data.additive_term.get_subset(partitions_idxs[i])
multiplicative_factors_subset = data.mult_factors.get_subset(partitions_idxs[i])

acquisition_model_subset = STIR.AcquisitionModelUsingParallelproj()
acquisition_model_subset.set_additive_term(additive_term_subset)
acquisition_model_subset.set_up(prompts_subset, self.x)
Expand All @@ -74,8 +69,8 @@ def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interva
subset_sensitivity += subset_sensitivity.max() * 1e-6

self.acquisition_models.append(acquisition_model_subset)
self.prompts_subsets.append(prompts_subset)
self.subset_sensitivities.append(subset_sensitivity)
self.prompts.append(prompts_subset)
self.sensitivities.append(subset_sensitivity)

super().__init__(update_objective_interval=update_objective_interval, **kwargs)
self.configured = True # required by Algorithm
Expand All @@ -86,17 +81,18 @@ def update(self):
# (Theoretically, MLEM cannot, but it might nevertheless due to numerical issues)
denom = self.acquisition_models[self.subset].forward(self.x) + .0001
# divide measured data by estimate (ignoring mult_factors!)
quotient = self.prompts_subsets[self.subset] / denom
quotient = self.prompts[self.subset] / denom

# update image with quotient of the back projection (without mult_factors!) and the subset_sensitivity
self.x *= self.acquisition_models[self.subset].backward(quotient) / self.subset_sensitivities[self.subset]
self.subset = (self.subset + 1) % len(self.prompts_subsets)
# update image with quotient of the backprojection (without mult_factors!) and the sensitivity
self.x *= self.acquisition_models[self.subset].backward(quotient) / self.sensitivities[self.subset]
self.subset = (self.subset + 1) % len(self.prompts)

def update_objective(self):
# should do some stuff here to have a correct TensorBoard display, but the
# objective function value is not part of the challenge, we will take an ugly short-cut.
# If you need it, you can implement it as a sum over subsets of something like
# prompts * log(acq_model.forward(self.x)) - self.x * subset_sensitivity
# prompts * log(acq_model.forward(self.x)) - self.x * sensitivity
return 0


submission_callbacks = [MaxIteration(660)]

0 comments on commit cf1faf8

Please sign in to comment.