Skip to content

Commit

Permalink
small changes to syntax (sapyb) in main, README udated with algorithm…
Browse files Browse the repository at this point in the history
… name in title
  • Loading branch information
samdporter committed Sep 30, 2024
1 parent ae909b0 commit 4d971b0
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 14 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# PETRIC: PET Rapid Image reconstruction Challenge

## Stochastic Optimisation with Subsets (SOS)
## Stochastic Optimisation with Subsets (SOS) - SAGA

Authors: Sam Porter

Expand All @@ -27,4 +27,4 @@ The algorithm is warm started with 5 iterations of preconditioned gradient desce

Acknowledgements:
- Thanks to Margaret Duff, Casper da Costa-Luis and Edoardo Pasca for all their help
- This algorithm lies heavily on the stochastic implementations in CIL so thanks to all those wo've helped there, as well
- This relies heavily on the stochastic implementations in CIL so thanks to all those wo've helped there, as well
19 changes: 7 additions & 12 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(self, acq_models, freeze_iter = np.inf, epsilon=1e-6):

self.epsilon = epsilon
self.freeze_iter = freeze_iter
self.x_freeze = None
self.freeze = None

for i,el in enumerate(acq_models):
if i == 0:
Expand All @@ -72,13 +72,12 @@ def __init__(self, acq_models, freeze_iter = np.inf, epsilon=1e-6):
self.s_sum_inv += s_inv

def apply(self, algorithm, gradient, out=None):

if algorithm.iteration < self.freeze_iter:
ret = gradient * ((algorithm.solution.copy() * self.s_sum_inv) + self.epsilon)
ret = gradient * ((algorithm.solution * self.s_sum_inv) + self.epsilon)
else:
if self.x_freeze is None:
self.x_freeze = (algorithm.solution.copy() * self.s_sum_inv) + self.epsilon
ret = gradient * self.x_freeze
if self.freeze is None:
self.freeze = ((algorithm.solution * self.s_sum_inv) + self.epsilon)
ret = gradient * self.freeze
if out is not None:
out.fill(ret)
else:
Expand Down Expand Up @@ -135,7 +134,8 @@ def get_step_size(self, algorithm):
# Armijo step size search
for _ in range(self.max_iter):
# Proximal step
x_new = algorithm.g.proximal(algorithm.solution.copy() - step_size * precond_grad, step_size)
x_new = algorithm.solution.copy().sapyb(1, precond_grad, -step_size)
algorithm.g.proximal(x_new, step_size, out=x_new)
f_x_new = algorithm.f(x_new) + algorithm.g(x_new)
print("f_x_new: ", f_x_new)
# Armijo condition check
Expand All @@ -151,7 +151,6 @@ def get_step_size(self, algorithm):
# Update the internal state with the new step size as the minimum of the current and previous step sizes
self.step_size = min(step_size, self.step_size)

self.initial_step_size = step_size / self.beta # Adjust initial step size for next search
if self.counter < self.steps:
self.counter += 1

Expand Down Expand Up @@ -225,7 +224,6 @@ def __init__(self, data: Dataset, update_objective_interval=10):
"""
Initialisation function, setting up data & (hyper)parameters.
"""

# Very simple heuristic to determine the number of subsets
self.num_subsets = calculate_subsets(data.acquired_data, min_counts_per_subset=2**20, max_num_subsets=16)
update_interval = self.num_subsets
Expand All @@ -242,12 +240,9 @@ def __init__(self, data: Dataset, update_objective_interval=10):

data.prior.set_penalisation_factor(data.prior.get_penalisation_factor() / len(obj_funs))
data.prior.set_up(data.OSEM_image)

grad = data.OSEM_image.get_uniform_copy(0)

for f, d in zip(obj_funs, data_subs): # add prior to every objective function
f.set_prior(data.prior)
grad -= f.gradient(data.OSEM_image)

sampler = Sampler.random_without_replacement(len(obj_funs))
f = -FullGradientInitialiserFunction(obj_funs, sampler=sampler, init_steps=5)
Expand Down

0 comments on commit 4d971b0

Please sign in to comment.