diff --git a/README.md b/README.md index 149a9e6..dd1d48e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # PETRIC: PET Rapid Image reconstruction Challenge -## Stochastic Optimisation with Subsets (SOS) +## Stochastic Optimisation with Subsets (SOS) - SAGA Authors: Sam Porter @@ -27,4 +27,4 @@ The algorithm is warm started with 5 iterations of preconditioned gradient desce Acknowledgements: - Thanks to Margaret Duff, Casper da Costa-Luis and Edoardo Pasca for all their help -- This algorithm lies heavily on the stochastic implementations in CIL so thanks to all those wo've helped there, as well \ No newline at end of file +- This relies heavily on the stochastic implementations in CIL so thanks to all those wo've helped there, as well \ No newline at end of file diff --git a/main.py b/main.py index 9a2e79b..d82432e 100644 --- a/main.py +++ b/main.py @@ -59,7 +59,7 @@ def __init__(self, acq_models, freeze_iter = np.inf, epsilon=1e-6): self.epsilon = epsilon self.freeze_iter = freeze_iter - self.x_freeze = None + self.freeze = None for i,el in enumerate(acq_models): if i == 0: @@ -72,13 +72,12 @@ def __init__(self, acq_models, freeze_iter = np.inf, epsilon=1e-6): self.s_sum_inv += s_inv def apply(self, algorithm, gradient, out=None): - if algorithm.iteration < self.freeze_iter: - ret = gradient * ((algorithm.solution.copy() * self.s_sum_inv) + self.epsilon) + ret = gradient * ((algorithm.solution * self.s_sum_inv) + self.epsilon) else: - if self.x_freeze is None: - self.x_freeze = (algorithm.solution.copy() * self.s_sum_inv) + self.epsilon - ret = gradient * self.x_freeze + if self.freeze is None: + self.freeze = ((algorithm.solution * self.s_sum_inv) + self.epsilon) + ret = gradient * self.freeze if out is not None: out.fill(ret) else: @@ -135,7 +134,8 @@ def get_step_size(self, algorithm): # Armijo step size search for _ in range(self.max_iter): # Proximal step - x_new = algorithm.g.proximal(algorithm.solution.copy() - step_size * precond_grad, step_size) + x_new = algorithm.solution.copy().sapyb(1, precond_grad, -step_size) + algorithm.g.proximal(x_new, step_size, out=x_new) f_x_new = algorithm.f(x_new) + algorithm.g(x_new) print("f_x_new: ", f_x_new) # Armijo condition check @@ -151,7 +151,6 @@ def get_step_size(self, algorithm): # Update the internal state with the new step size as the minimum of the current and previous step sizes self.step_size = min(step_size, self.step_size) - self.initial_step_size = step_size / self.beta # Adjust initial step size for next search if self.counter < self.steps: self.counter += 1 @@ -225,7 +224,6 @@ def __init__(self, data: Dataset, update_objective_interval=10): """ Initialisation function, setting up data & (hyper)parameters. """ - # Very simple heuristic to determine the number of subsets self.num_subsets = calculate_subsets(data.acquired_data, min_counts_per_subset=2**20, max_num_subsets=16) update_interval = self.num_subsets @@ -242,12 +240,9 @@ def __init__(self, data: Dataset, update_objective_interval=10): data.prior.set_penalisation_factor(data.prior.get_penalisation_factor() / len(obj_funs)) data.prior.set_up(data.OSEM_image) - - grad = data.OSEM_image.get_uniform_copy(0) for f, d in zip(obj_funs, data_subs): # add prior to every objective function f.set_prior(data.prior) - grad -= f.gradient(data.OSEM_image) sampler = Sampler.random_without_replacement(len(obj_funs)) f = -FullGradientInitialiserFunction(obj_funs, sampler=sampler, init_steps=5)