Skip to content

Commit

Permalink
Changes from Gemma's review
Browse files Browse the repository at this point in the history
  • Loading branch information
MargaretDuff committed Nov 22, 2024
1 parent 0b71bc9 commit c8ee858
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 11 deletions.
6 changes: 4 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,12 @@
- Fix bug with 'median' and 'mean' methods in Masker averaging over the wrong axes.
- Enhancements:
- Removed multiple exits from numba implementation of KullbackLeibler divergence (#1901)
- Updated the `SPDHG` algorithm to take a stochastic `Sampler` and to more easily set step sizes (#1644)
- Dependencies:
- Added scikit-image to CIL-Demos conda install command as needed for new Callbacks notebook.
- Changes that break backwards compatibility:
- Deprecated `norms` and `prob` in the `SPDHG` algorithm to be set in the `BlockOperator` and `Sampler` respectively (#1644)


* 24.2.0
- New Features:
Expand Down Expand Up @@ -58,7 +62,6 @@
- Added documentation for the Partitioner to `framework.rst` (#1828)
- Added CIL vs SIRF tests comparing preconditioned ISTA in CIL and MLEM in SIRF (#1823)
- Update to CCPi-Regularisation toolkit v24.0.1 (#1868)
- Updated the `SPDHG` algorithm to take a stochastic `Sampler` and to more easily set step sizes (#1644)
- Bug fixes:
- gradient descent `update_objective` called twice on the initial point.(#1789)
- ProjectionMap operator bug fix in adjoint and added documentation (#1743)
Expand All @@ -68,7 +71,6 @@
- Update dataexample remote data download to work with windows and use zenodo_get for data download (#1774)
- Changes that break backwards compatibility:
- Merged the files `BlockGeometry.py` and `BlockDataContainer.py` in `framework` to one file `block.py`. Please use `from cil.framework import BlockGeometry, BlockDataContainer` as before (#1799)
- Deprecated `norms` and `prob` in the `SPDHG` algorithm to be set in the `BlockOperator` and `Sampler` respectively (#1644)
- Bug fix in `FGP_TV` function to set the default behaviour not to enforce non-negativity (#1826).

* 24.0.0
Expand Down
26 changes: 17 additions & 9 deletions Wrappers/Python/test/test_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@
# Fast Gradient Projection algorithm for Total Variation(TV)
from cil.optimisation.functions import TotalVariation

from cil.plugins.ccpi_regularisation.functions import FGP_TV
import logging
from testclass import CCPiTestClass
from utils import has_astra
Expand Down Expand Up @@ -1178,9 +1177,10 @@ def setUp(self):
self.F = BlockFunction(*[L2NormSquared(b=partitioned_data[i])
for i in range(self.subsets)])
alpha = 0.025
self.G = alpha * FGP_TV()
self.G = alpha * IndicatorBox(lower=0)

def test_SPDHG_defaults_and_setters(self):
#Test SPDHG init with default values
gamma = 1.
rho = .99
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A)
Expand All @@ -1198,6 +1198,7 @@ def test_SPDHG_defaults_and_setters(self):
spdhg.x.as_array(), self.A.domain_geometry().allocate(0).as_array())
self.assertEqual(spdhg.update_objective_interval, 1)

#Test SPDHG setters - "from ratio"
gamma = 3.7
rho = 5.6
spdhg.set_step_sizes_from_ratio(gamma, rho)
Expand All @@ -1206,6 +1207,7 @@ def test_SPDHG_defaults_and_setters(self):
self.assertEqual(spdhg.tau, min([pi*rho / (si * ni**2) for pi, ni,
si in zip(spdhg._prob_weights, spdhg._norms, spdhg.sigma)]))

#Test SPDHG setters - set_step_sizes default values for sigma and tau
gamma = 1.
rho = .99
spdhg.set_step_sizes()
Expand All @@ -1214,21 +1216,25 @@ def test_SPDHG_defaults_and_setters(self):
self.assertEqual(spdhg.tau, min([rho*pi / (si * ni**2) for pi, ni,
si in zip(spdhg._prob_weights, spdhg._norms, spdhg.sigma)]))

#Test SPDHG setters - set_step_sizes with sigma and tau
spdhg.set_step_sizes(sigma=[1]*self.subsets, tau=100)
self.assertListEqual(spdhg.sigma, [1]*self.subsets)
self.assertEqual(spdhg.tau, 100)

#Test SPDHG setters - set_step_sizes with sigma
spdhg.set_step_sizes(sigma=[1]*self.subsets, tau=None)
self.assertListEqual(spdhg.sigma, [1]*self.subsets)
self.assertEqual(spdhg.tau, min([(rho*pi / (si * ni**2)) for pi, ni,
si in zip(spdhg._prob_weights, spdhg._norms, spdhg.sigma)]))

#Test SPDHG setters - set_step_sizes with tau
spdhg.set_step_sizes(sigma=None, tau=100)
self.assertListEqual(spdhg.sigma, [
gamma * rho*pi / (spdhg.tau*ni**2) for ni, pi in zip(spdhg._norms, spdhg._prob_weights)])
self.assertEqual(spdhg.tau, 100)

def test_spdhg_non_default_init(self):
#Test SPDHG init with non-default values
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, sampler=Sampler.random_with_replacement(10, list(np.arange(1, 11)/55.)),
initial=self.A.domain_geometry().allocate(1), update_objective_interval=10)

Expand All @@ -1237,8 +1243,9 @@ def test_spdhg_non_default_init(self):
spdhg.x.as_array(), self.A.domain_geometry().allocate(1).as_array())
self.assertEqual(spdhg.update_objective_interval, 10)

#Test SPDHG setters - prob_weights and a sampler gives an error
with self.assertRaises(ValueError):
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, _norms=[1]*len(self.A), prob_weights=[1/(self.subsets-1)]*(
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, prob_weights=[1/(self.subsets-1)]*(
self.subsets-1)+[0], sampler=Sampler.random_with_replacement(10, list(np.arange(1, 11)/55.)))

spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, prob_weights=[1/(self.subsets-1)]*(
Expand Down Expand Up @@ -1266,17 +1273,16 @@ def test_spdhg_deprecated_vargs(self):
1/(self.subsets-1)]*(self.subsets-1)+[0])

with self.assertRaises(ValueError):
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, _norms=[1]*len(self.A), prob=[1/(self.subsets-1)]*(
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, prob=[1/(self.subsets-1)]*(
self.subsets-1)+[0], sampler=Sampler.random_with_replacement(10, list(np.arange(1, 11)/55.)))


with self.assertRaises(ValueError):
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, _norms=[1]*len(self.A), prob=[1/(self.subsets-1)]*(
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, prob=[1/(self.subsets-1)]*(
self.subsets-1)+[0], prob_weights= [1/(self.subsets-1)]*(self.subsets-1)+[0])

with self.assertRaises(ValueError):
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, sfdsdf=3, _norms=[
1]*len(self.A), sampler=Sampler.random_with_replacement(10, list(np.arange(1, 11)/55.)))
spdhg = SPDHG(f=self.F, g=self.G, operator=self.A, sfdsdf=3, sampler=Sampler.random_with_replacement(10, list(np.arange(1, 11)/55.)))


def test_spdhg_set_norms(self):
Expand Down Expand Up @@ -1309,6 +1315,8 @@ def test_spdhg_check_convergence(self):
spdhg.set_step_sizes(sigma=None, tau=100)
self.assertTrue(spdhg.check_convergence())

@unittest.skipUnless(has_astra, "cil-astra not available")

def test_SPDHG_num_subsets_1(self):
data = dataexample.SIMPLE_PHANTOM_2D.get(size=(10, 10))

Expand Down Expand Up @@ -1340,7 +1348,7 @@ def test_SPDHG_num_subsets_1(self):
A_pdhg = IdentityOperator(partitioned_data[0].geometry)

alpha = 0.025
G = alpha * FGP_TV()
G = alpha * IndicatorBox(lower=0)

spdhg = SPDHG(f=F, g=G, operator=A, update_objective_interval=10)

Expand Down Expand Up @@ -1477,7 +1485,7 @@ def test_SPDHG_vs_PDHG_explicit(self):
prob = [1/(2*subsets)]*(len(K)-1) + [1/2]
spdhg = SPDHG(f=F, g=G, operator=K,
update_objective_interval=200, prob=prob)
spdhg.run(20 * subsets)
spdhg.run(25 * subsets)

# %% 'explicit' PDHG, scalar step-sizes
op1 = alpha * GradientOperator(ig)
Expand Down

0 comments on commit c8ee858

Please sign in to comment.