diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8739fb8..5129820 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,7 +24,7 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install flake8 pytest pytest-cov hypothesis - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + if [ -f requirements_dev.txt ]; then pip install -r requirements_dev.txt; fi - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names diff --git a/README.md b/README.md index 9afbeac..5ff5756 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,6 @@ [![DOI](https://zenodo.org/badge/555583385.svg)](https://zenodo.org/badge/latestdoi/555583385) [![PyPI version](https://badge.fury.io/py/regularizepsf.svg)](https://badge.fury.io/py/regularizepsf) -**UNDER DEVELOPMENT** - A package for manipulating and correcting variable point spread functions. Below is an example of correcting model data using the package. An initial image of a simplified starfield (a) is synthetically observed with a slowly @@ -12,8 +10,6 @@ varying PSF (b), then regularized with this technique (c). The final image visua the initial image with the target PSF (d). The panels are gamma-corrected to highlight the periphery of the model PSFs. ![Example result image](model_example.png) - - ## Getting started `pip install regularizepsf` and then follow along with the [Quickstart section](https://punch-mission.github.io/regularizepsf/quickstart.html). @@ -24,6 +20,9 @@ We encourage all contributions. If you have a problem with the code or would lik ## License See LICENSE for the MIT license +## Need help? +Please contact Marcus Hughes at [marcus.hughes@swri.org](mailto:marcus.hughes@swri.org). + ## Citation Please cite the associated paper if you use this technique: @@ -47,6 +46,3 @@ Please cite the associated paper if you use this technique: } ``` - -## Contact -Please contact Marcus Hughes at [marcus.hughes@swri.org](mailto:marcus.hughes@swri.org) for any questions. diff --git a/docs/_config.yml b/docs/_config.yml index dfad294..967e811 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -1,13 +1,14 @@ # Book settings # Learn more at https://jupyterbook.org/customize/config.html -title: regularizepsf +title: regularizePSF author: PUNCH Science Operations Center # Force re-execution of notebooks on each build. # See https://jupyterbook.org/content/execute.html execute: execute_notebooks: force + timeout: 100 # Define the name of the latex output file for PDF builds latex: @@ -20,10 +21,26 @@ bibtex_bibfiles: # Information about where the book exists on the web repository: - url: https://github.com/executablebooks/jupyter-book # Online location of your book + url: https://github.com/punch-mission/regularizepsf # Online location of your book path_to_book: docs # Optional path to your book, relative to the repository root branch: main # Which branch of the repository should be used when creating links (optional) +parse: + # default extensions to enable in the myst parser. + # See https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html + myst_enable_extensions: + # - amsmath + - colon_fence + # - deflist + - dollarmath + - html_admonition + # - html_image + - linkify + # - replacements + # - smartquotes + - substitution + myst_url_schemes: [mailto, http, https] # URI schemes that will be recognised as external URLs in Markdown links + # Add GitHub buttons to your book # See https://jupyterbook.org/customize/config.html#add-a-link-to-your-repository html: @@ -36,7 +53,22 @@ sphinx: - 'sphinx.ext.napoleon' - 'sphinx.ext.viewcode' - 'sphinx.ext.autosummary' + - 'sphinx.ext.inheritance_diagram' + - 'autoapi.extension' + - "myst_nb" +# - 'sphinx_codeautolink' config: + html_show_copyright: false add_module_names: False - autosummary_generate: True - + autoapi_dirs: + - "../regularizepsf" + autoapi_root: "api" + autoapi_add_toctree_entry: false + autoapi_keep_files: false + autoapi_options: + - "members" + - "undoc-members" + - "show-inheritance" + - "special-members" + - "imported-members" + autoapi_python_class_content: "both" \ No newline at end of file diff --git a/docs/_toc.yml b/docs/_toc.yml index 0eb977a..0130a1d 100644 --- a/docs/_toc.yml +++ b/docs/_toc.yml @@ -5,7 +5,9 @@ format: jb-book root: intro chapters: - file: quickstart -- file: array - file: functional - file: theory -- file: cite \ No newline at end of file +- file: cite +- file: help +- file: api/regularizepsf/index + title: API diff --git a/docs/array.md b/docs/array.md deleted file mode 100644 index 51966cc..0000000 --- a/docs/array.md +++ /dev/null @@ -1 +0,0 @@ -# Correct using `ArrayCorrector` \ No newline at end of file diff --git a/docs/cite.md b/docs/cite.md index cf24046..c2dbbfc 100644 --- a/docs/cite.md +++ b/docs/cite.md @@ -1,23 +1,35 @@ # How to cite -Use the following Bibtex citation: +To cite the paper: ``` -@misc{https://doi.org/10.48550/arxiv.2212.02594, - doi = {10.48550/ARXIV.2212.02594}, - - url = {https://arxiv.org/abs/2212.02594}, - - author = {Hughes, J. M. and DeForest, C. E. and Seaton, D. B.}, - - keywords = {Instrumentation and Methods for Astrophysics (astro-ph.IM), FOS: Physical sciences, FOS: Physical sciences}, - - title = {Coma Off It: Removing Variable Point Spread Functions from Astronomical Images}, - - publisher = {arXiv}, - - year = {2022}, - - copyright = {arXiv.org perpetual, non-exclusive license} +@ARTICLE{2022arXiv221202594H, + author = {{Hughes}, J.~M. and {DeForest}, C.~E. and {Seaton}, D.~B.}, + title = "{Coma Off It: Removing Variable Point Spread Functions from Astronomical Images}", + journal = {arXiv e-prints}, + keywords = {Astrophysics - Instrumentation and Methods for Astrophysics}, + year = 2022, + month = dec, + eid = {arXiv:2212.02594}, + pages = {arXiv:2212.02594}, +archivePrefix = {arXiv}, + eprint = {2212.02594}, + primaryClass = {astro-ph.IM}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2022arXiv221202594H}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} } +``` + +To cite the software: +``` +@software{marcus_hughes_2022_7392394, + author = {Marcus Hughes}, + title = {punch-mission/regularizepsf: v0.0.3}, + month = dec, + year = 2022, + publisher = {Zenodo}, + version = {0.0.3}, + doi = {10.5281/zenodo.7392394}, + url = {https://doi.org/10.5281/zenodo.7392394} +} ``` \ No newline at end of file diff --git a/docs/data/DASH.fits b/docs/data/DASH.fits new file mode 100644 index 0000000..1ad44cb Binary files /dev/null and b/docs/data/DASH.fits differ diff --git a/docs/functional.md b/docs/functional.md index c203b61..d8117cb 100644 --- a/docs/functional.md +++ b/docs/functional.md @@ -1 +1,113 @@ -# Correct using `FunctionalCorrector` \ No newline at end of file +# Correct using `FunctionalCorrector` +A functional corrector is defined by a set of equations instead of image arrays. +The functional corrector can be helpful if you know the form of the PSF. You can directly define it. + +```{note} +To correct an image, a `FunctionalCorrector` will be converted to an `ArrayCorrector`. +If you plan to save a model for many corrections, you may find it more convenient to manually convert to an `ArrayCorrector` +and then save an `ArrayConverter` instead. This is because a `FunctionalCorrector` merely pickles the functions using +`dill` while `ArrayCorrector` saves using HDF5. For more details, see the [Saving a corrector](save-corrector) section. +``` + +## `simple_psf`: the starting point +Every model begins with a `simple_psf`. It requires the first two arguments to be the `x` and `y` coordinates. +These will often be passed in as arrays so your function should operate in a vectorized manner and be able to output an +array as well. + +```py +from regularizepsf import simple_psf + +@simple_psf +def a_basic_example(x, y): + return x + y +``` +You can always evaluate your PSF at a single point to determine its value: +```py +print(a_basic_example(101, 204)) +``` +Or you can evaluate at a variety of `x` and `y` coordinates using a `numpy` array. +```py +print(a_basic_example(np.arange(100), np.arange(!00))) +``` + +You can then supply additional arguments with default values set. We will see in the next section +how to use a `varied_psf` to make them vary across the field-of-view (FOV) of the image. + +```py + +@simple_psf +def with_default_arguments(x, y, width=100): + return x + y + width +``` + +## `varied_psf`: a more realistic model +The purpose of this package is correct images with variable PSFs. Thus, we need a way to encode how the +PSF varies across the FOV. That's where `varied_psf` helps. The decorator requires a `simple_psf` as an argument. +We call this the base PSF. +Then, the function takes `x` and `y` as parameters and returns a dictionary of how the defaulted parameters of the base PSF vary. + +For example, +```py +from regularizepsf import simple_psf, varied_psf + +@simple_psf +def base_psf(x, y, width=100): + return x + y + width + +@varied_psf(base_psf) +def complicated_psf(x, y): + return {"width": x/(y+1)} +``` + +Now, the PSF's width will vary across an image and have the width of `x` divided by `y+1`. (We add one to avoid division +by zero errors.) + +## Making a `FunctionalCorrector` +Using these functionally defined examples, we can then create a `FunctionalCorrector` to correct an image. + +```py +from regularizepsf import FunctionalCorrector + +my_simple_corrector = FunctionalCorrector(base_psf) +my_complicated_corrector = FunctionalCorrector(complicated_psf) +``` + +As discussed in the [Quickstart](quickstart.md), we often wish to correct our PSF to a uniform output by applying a +target PSF. We can provide that too! + +```py +@simple_psf +def target_psf(x, y): + return np.ones_like(x) + +my_corrector = FunctionalCorrector(complicated_psf, target_psf) +``` + +## Correcting an image +Correcting an image is now trivial. We just load the image and correct with a specified patch size, 256 in this case. +```python +from astropy.io import fits + +with fits.open("path_to_image.fits") as hdul: + data = hdul[0].data.astype(float) + +my_corrector.correct_image(data, 256) +``` + +```{note} +If you're planning to do many corrections, you might first convert to an `ArrayCorrector`. The `FunctionalCorrector`'s +`correct_image` function involves this step and would do it for each image. +``` + +You can evaluate to an `ArrayCorrector` as shown below. The first argument is the `x`, then the `y`, and then the `size` of the patches. +```python +new_corrector = my_corrector.evaluate_to_array_form(np.arange(256), np.arange(256), 256) +``` + +(save-corrector)= +## Saving a corrector +We can save a corrector in either its `FunctionalCorrector` form or its `ArrayCorrector` form. +```python +my_corrector.save("functional.psf") +new_corrector.save("array.psf") +``` \ No newline at end of file diff --git a/docs/help.md b/docs/help.md new file mode 100644 index 0000000..687a7a9 --- /dev/null +++ b/docs/help.md @@ -0,0 +1,7 @@ +# Getting help + +If you notice a bug, please open an issue on GitHub. + +If you need help using this code, you can open an issue or contact Marcus Hughes directly at `marcus.hughes@swri.org`. +We want to make this code as user-friendly as possible. If you're encountering an issue, it's likely someone else is too +and you can help everyone by speaking up. Thanks for your support! \ No newline at end of file diff --git a/docs/images/dash.png b/docs/images/dash.png new file mode 100644 index 0000000..5ccfff0 Binary files /dev/null and b/docs/images/dash.png differ diff --git a/docs/intro.md b/docs/intro.md index fac60e7..c65822c 100644 --- a/docs/intro.md +++ b/docs/intro.md @@ -1,13 +1,15 @@ -# Welcome to regularizepsf +# Welcome `regularizepsf` is a Python package (with Cython speed improvements) for determining and correcting point spread functions in astronomical images. -It was originally developed for the [PUNCH](https://punch.space.swri.edu/) mission and is documented in an upcoming -Astrophysical Journal paper. For now, see [the arXiv listing](https://arxiv.org/abs/2212.02594). +It was originally developed for the [PUNCH](https://punch.space.swri.edu/) mission and is documented in a forthcoming +Astrophysical Journal paper (see [arXiv version](https://arxiv.org/abs/2212.02594) until publication). + +Below is an example of correcting model data using the package. +An initial image of a simplified starfield (a) is synthetically observed with a slowly varying PSF (b), +then regularized with this technique (c). The final image visually matches a direct convolution of the initial image +with the target PSF (d). The panels are gamma-corrected to highlight the periphery of the model PSFs. -Below is an example of correcting model data using the package. An initial image of a simplified starfield (a) is synthetically observed with a slowly -varying PSF (b), then regularized with this technique (c). The final image visually matches a direct convolution of -the initial image with the target PSF (d). The panels are gamma-corrected to highlight the periphery of the model PSFs. ```{image} images/model.png :alt: example of correction :width: 800px diff --git a/docs/quickstart.md b/docs/quickstart.md index b650b29..29569e0 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -1,37 +1,129 @@ +--- +jupytext: + formats: md:myst + text_representation: + extension: .md + format_name: myst +kernelspec: + display_name: Python + language: python3 + name: python3 +--- + # Quickstart ## Installing -Installation should be as simple as `pip install regularizepsf`. +Open a terminal and run `pip install regularizepsf`. + +## Overview of the technique +The basic premise of this technique is to model the point spread function of an imager using stars as our point sources. +Then, we calculate the inverse PSF and apply it by multiplying in Fourier space as this is equivalent to convolving in image space. Instead +of simply calculating the inverse PSF directly, we also include a "target PSF" model to make the resulting corrected +stars uniform. This target is typically a Gaussian as shown below in the example. See [theory](theory.md) for more details. + +## Demo: correcting DASH image +This tutorial uses an image from the Demonstration Airglow-Subtracted Heliospheric +imager (DASH) as an example. For details on DASH see Section 4.2 of the [accompanying paper](`https://ui.adsabs.harvard.edu/abs/2022arXiv221202594H`). +Here is a visualization of the result we're aiming to produce. -## Extracting a simple PSF model -Before you can correct an image, you need a PSF model for it. -You can either define a functional model or an array model. A functional +```{figure} images/dash.png +--- +alt: DASH correction example +width: 800px +align: center +--- +Image patches in a DASH image before and after PSF regularization. Left: Stars in each region exhibit a varying PSF before correction. Right: After application of a transfer PSF, the stars are more uniform across the image. The panels are gamma-corrected to highlight the periphery of the + original and regularized PSFs +``` +### Defining input parameters +Before you can correct an image, you need a PSF model for the system that collected the image. +You can either define a **functional model** or an **array model.** A functional model is defined by an equation whereas an array model uses the data directly as the model. For most purposes, we recommend an array model because deriving the correct functional form -can be tricky. +can be tricky. Thus, this quickstart tutorial walks through the array model PSF model form. +First, we need to define both the `psf_size` and the `patch_size`. This technique breaks the image into +overlapping square neighborhoods of length `patch_size` on each side. In this case a `patch_size` of 256 indicates we use a `256x255` neighborhood size. However, we know for +this instrument that the PSF is more compact than this, i.e. a star will not contribute light over the full neighborhood. +For computational efficiency reasons, we thus use a `psf_size` of `32x32`. For each star, a box centered on the star +of size `32x32` pixels is extracted. These are averaged to create the PSF model. The region outside the `32x32` box but +within `256x256` pixel window is filled with the median of the image. -## Correcting an image with a PSF ArrayCorrector - +Finally, we need to set the target PSF model's parameters. In this case, we will simply use a +symmetric Gaussian and thus have one parameter: the full-width-half-maximum (FWHM). We set this at 3.25 pixels. ```py -from regularizepsf.fitter import CoordinatePatchCollection -patch_size, psf_size = 256, 32 -image_fn = "path/to/image.fits" +import numpy as np +from astropy.io import fits +from regularizepsf import CoordinatePatchCollection, simple_psf -averaged = CoordinatePatchCollection.find_stars_and_average([image_fn], - psf_size, - patch_size) +# Define the parameters and image to use +psf_size = 32 +patch_size = 256 +target_fwhm = 3.25 +image_fn = "data/DASH.fits" +``` + +### Building the target PSF +We first create a target PSF model by defining +a Python function that has inputs of `x` and `y` and outputs the value of the PSF at `(x,y)`. +This is a functional model and is thus decorated with the `simple_psf` decorator. We evaluate the model over the full +patch size. +```py +# Define the target PSF +center = patch_size / 2 +sigma = target_fwhm / 2.355 @simple_psf -def target(x, y, x0=patch_size / 2, y0=patch_size / 2, sigma_x=3.25 / 2.355, sigma_y=3.25 / 2.355): +def target(x, y, x0=center, y0=center, sigma_x=sigma, sigma_y=sigma): return np.exp(-(np.square(x - x0) / (2 * np.square(sigma_x)) + np.square(y - y0) / (2 * np.square(sigma_y)))) -evaluation_dictionary = {(identifier.x, identifer.y): patch for identifier, patch in averaged.items()} target_evaluation = target(*np.meshgrid(np.arange(patch_size), np.arange(patch_size))) -array_corrector = ArrayCorrector(evaluation_dictionary, target_evaluation) +``` + +### Building the ArrayCorrector +Everything else is handled in the `find_stars_and_average` method on the `CoordinatePatchCollection`. +It takes a list of images, the `psf_size`, and the `patch_size`. The function's signature is: + +```{eval-rst} +.. autofunction:: regularizepsf.fitter.CoordinatePatchCollection.find_stars_and_average +``` + +We convert from a `CoordinatePatchCollection` object +to an `ArrayCorrector` using the `to_array_corrector` method. It requires an evaluated target PSF, which we constructed +in the previous section. +```py +# Extract all the stars from that image and create a PSF model with a target PSF +array_corrector = CoordinatePatchCollection.find_stars_and_average([image_fn], + psf_size, + patch_size).to_array_corrector(target_evaluation) +``` + +### Performing the correction +Now we have all the components. It's time to correct the image! We open it as a `numpy` array and then use the +`array_corrector` to correct. + +```{warning} +The image data type *must be* float. Otherwise, you will receive a somewhat cryptic error about DTYPEs not matching. +It also must match the endianness of your computer. This can be a problem as many computers are little-endian but FITS +images are stored big-endian. +``` + +```py +# Load the image for correcting with fits.open(image_fn) as hdul: - data = hdul[0].data + data = hdul[0].data.astype(float) +# See the corrected result! corrected = array_corrector.correct_image(data, alpha=2.0, epsilon=0.3) +``` + +The signature of `correct_image` is: +```{eval-rst} +.. autofunction:: regularizepsf.corrector.ArrayCorrector.correct_image +``` + +You can save the `ArrayCorrector` for future usage instead of having to derive it for each image. This is done by simply: +```py +array_corrector.save("path_to_save.psf") ``` \ No newline at end of file diff --git a/docs/quickstart_example.py b/docs/quickstart_example.py new file mode 100644 index 0000000..03f33c0 --- /dev/null +++ b/docs/quickstart_example.py @@ -0,0 +1,32 @@ +import numpy as np +from astropy.io import fits +from regularizepsf import CoordinatePatchCollection, simple_psf + +# Define the parameters and image to use +psf_size = 32 +patch_size = 256 +target_fwhm = 3.25 +image_fn = "data/DASH.fits" + +# Define the target PSF +@simple_psf +def target(x, y, + x0=patch_size / 2, y0=patch_size / 2, + sigma_x= target_fwhm / 2.355, sigma_y= target_fwhm / 2.355): + return np.exp(-(np.square(x - x0) / (2 * np.square(sigma_x)) + + np.square(y - y0) / (2 * np.square(sigma_y)))) + +target_evaluation = target(*np.meshgrid(np.arange(patch_size), np.arange(patch_size))) + +# Extract all the stars from that image and create a PSF model with a target PSF +array_corrector = CoordinatePatchCollection.find_stars_and_average([image_fn], + psf_size, + patch_size).to_array_corrector(target_evaluation) + +# Load the image for correcting +with fits.open(image_fn) as hdul: + data = hdul[0].data.astype(float) + +print("ready to correct") +# See the corrected result! +corrected = array_corrector.correct_image(data, alpha=2.0, epsilon=0.3) diff --git a/docs/references.bib b/docs/references.bib index 783ec6a..e69de29 100644 --- a/docs/references.bib +++ b/docs/references.bib @@ -1,56 +0,0 @@ ---- ---- - -@inproceedings{holdgraf_evidence_2014, - address = {Brisbane, Australia, Australia}, - title = {Evidence for {Predictive} {Coding} in {Human} {Auditory} {Cortex}}, - booktitle = {International {Conference} on {Cognitive} {Neuroscience}}, - publisher = {Frontiers in Neuroscience}, - author = {Holdgraf, Christopher Ramsay and de Heer, Wendy and Pasley, Brian N. and Knight, Robert T.}, - year = {2014} -} - -@article{holdgraf_rapid_2016, - title = {Rapid tuning shifts in human auditory cortex enhance speech intelligibility}, - volume = {7}, - issn = {2041-1723}, - url = {http://www.nature.com/doifinder/10.1038/ncomms13654}, - doi = {10.1038/ncomms13654}, - number = {May}, - journal = {Nature Communications}, - author = {Holdgraf, Christopher Ramsay and de Heer, Wendy and Pasley, Brian N. and Rieger, Jochem W. and Crone, Nathan and Lin, Jack J. and Knight, Robert T. and Theunissen, Frédéric E.}, - year = {2016}, - pages = {13654}, - file = {Holdgraf et al. - 2016 - Rapid tuning shifts in human auditory cortex enhance speech intelligibility.pdf:C\:\\Users\\chold\\Zotero\\storage\\MDQP3JWE\\Holdgraf et al. - 2016 - Rapid tuning shifts in human auditory cortex enhance speech intelligibility.pdf:application/pdf} -} - -@inproceedings{holdgraf_portable_2017, - title = {Portable learning environments for hands-on computational instruction using container-and cloud-based technology to teach data science}, - volume = {Part F1287}, - isbn = {978-1-4503-5272-7}, - doi = {10.1145/3093338.3093370}, - abstract = {© 2017 ACM. There is an increasing interest in learning outside of the traditional classroom setting. This is especially true for topics covering computational tools and data science, as both are challenging to incorporate in the standard curriculum. These atypical learning environments offer new opportunities for teaching, particularly when it comes to combining conceptual knowledge with hands-on experience/expertise with methods and skills. Advances in cloud computing and containerized environments provide an attractive opportunity to improve the effciency and ease with which students can learn. This manuscript details recent advances towards using commonly-Available cloud computing services and advanced cyberinfrastructure support for improving the learning experience in bootcamp-style events. We cover the benets (and challenges) of using a server hosted remotely instead of relying on student laptops, discuss the technology that was used in order to make this possible, and give suggestions for how others could implement and improve upon this model for pedagogy and reproducibility.}, - booktitle = {{ACM} {International} {Conference} {Proceeding} {Series}}, - author = {Holdgraf, Christopher Ramsay and Culich, A. and Rokem, A. and Deniz, F. and Alegro, M. and Ushizima, D.}, - year = {2017}, - keywords = {Teaching, Bootcamps, Cloud computing, Data science, Docker, Pedagogy} -} - -@article{holdgraf_encoding_2017, - title = {Encoding and decoding models in cognitive electrophysiology}, - volume = {11}, - issn = {16625137}, - doi = {10.3389/fnsys.2017.00061}, - abstract = {© 2017 Holdgraf, Rieger, Micheli, Martin, Knight and Theunissen. Cognitive neuroscience has seen rapid growth in the size and complexity of data recorded from the human brain as well as in the computational tools available to analyze this data. This data explosion has resulted in an increased use of multivariate, model-based methods for asking neuroscience questions, allowing scientists to investigate multiple hypotheses with a single dataset, to use complex, time-varying stimuli, and to study the human brain under more naturalistic conditions. These tools come in the form of “Encoding” models, in which stimulus features are used to model brain activity, and “Decoding” models, in which neural features are used to generated a stimulus output. Here we review the current state of encoding and decoding models in cognitive electrophysiology and provide a practical guide toward conducting experiments and analyses in this emerging field. Our examples focus on using linear models in the study of human language and audition. We show how to calculate auditory receptive fields from natural sounds as well as how to decode neural recordings to predict speech. The paper aims to be a useful tutorial to these approaches, and a practical introduction to using machine learning and applied statistics to build models of neural activity. The data analytic approaches we discuss may also be applied to other sensory modalities, motor systems, and cognitive systems, and we cover some examples in these areas. In addition, a collection of Jupyter notebooks is publicly available as a complement to the material covered in this paper, providing code examples and tutorials for predictive modeling in python. The aimis to provide a practical understanding of predictivemodeling of human brain data and to propose best-practices in conducting these analyses.}, - journal = {Frontiers in Systems Neuroscience}, - author = {Holdgraf, Christopher Ramsay and Rieger, J.W. and Micheli, C. and Martin, S. and Knight, R.T. and Theunissen, F.E.}, - year = {2017}, - keywords = {Decoding models, Encoding models, Electrocorticography (ECoG), Electrophysiology/evoked potentials, Machine learning applied to neuroscience, Natural stimuli, Predictive modeling, Tutorials} -} - -@book{ruby, - title = {The Ruby Programming Language}, - author = {Flanagan, David and Matsumoto, Yukihiro}, - year = {2008}, - publisher = {O'Reilly Media} -} diff --git a/docs/requirements.txt b/docs/requirements.txt index 7e821e4..a707b3c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,7 @@ jupyter-book matplotlib numpy +regularizepsf +sphinx-autoapi +sphinx-codeautolink +jupyter \ No newline at end of file diff --git a/docs/theory.md b/docs/theory.md index faa02fc..5162f5f 100644 --- a/docs/theory.md +++ b/docs/theory.md @@ -1 +1,5 @@ -# Mathematical Theory \ No newline at end of file +# Mathematical Theory + +An introduction of the theory is provided in the [QuickStart](quickstart.md). + +For details on the mathematical theory see [Section 2 of the paper](https://arxiv.org/pdf/2212.02594.pdf). \ No newline at end of file diff --git a/regularizepsf/__init__.py b/regularizepsf/__init__.py index f27fc5d..42b696d 100644 --- a/regularizepsf/__init__.py +++ b/regularizepsf/__init__.py @@ -1,2 +1,3 @@ from regularizepsf.psf import simple_psf, varied_psf -from regularizepsf.corrector import FunctionalCorrector, ArrayCorrector +from regularizepsf.corrector import FunctionalCorrector, ArrayCorrector, calculate_covering +from regularizepsf.fitter import CoordinatePatchCollection diff --git a/regularizepsf/corrector.py b/regularizepsf/corrector.py index f84ae6a..ce6f8be 100644 --- a/regularizepsf/corrector.py +++ b/regularizepsf/corrector.py @@ -21,7 +21,7 @@ def save(self, path: str | Path) -> None: Parameters ---------- path : str or `pathlib.Path` - where to save the model, suggested extension is ".corr" + where to save the model, suggested extension is ".psf" Returns ------- @@ -36,7 +36,7 @@ def load(cls, path: str | Path) -> CorrectorABC: Parameters ---------- path : str or `pathlib.Path` - where to load the model from, suggested extension is ".corr" + where to load the model from, suggested extension is ".psf" Returns ------- @@ -44,7 +44,7 @@ def load(cls, path: str | Path) -> CorrectorABC: """ @abc.abstractmethod - def correct_image(self, image: np.ndarray, size: int = None, + def correct_image(self, image: np.ndarray, size: int, alpha: float = 0.5, epsilon: float = 0.05, use_gpu: bool = False) -> np.ndarray: """PSF correct an image according to the model @@ -65,10 +65,6 @@ def correct_image(self, image: np.ndarray, size: int = None, ------- np.ndarray a image that has been PSF corrected - - Notes - ----- - # TODO: add notes """ @@ -128,10 +124,13 @@ def evaluate_to_array_form(self, x: np.ndarray, y: np.ndarray, size: int) -> Arr for yy in y: evaluations[(xx, yy)] = self._psf(image_x, image_y) - target_evaluation = self._target_model(image_x, image_y) + if self._target_model: + target_evaluation = self._target_model(image_x, image_y) + else: + target_evaluation = np.ones((size, size)) return ArrayCorrector(evaluations, target_evaluation) - def correct_image(self, image: np.ndarray, size: int = None, + def correct_image(self, image: np.ndarray, size: int, alpha: float = 0.5, epsilon: float = 0.05, use_gpu: bool = False) -> np.ndarray: corners = calculate_covering(image.shape, size) array_corrector = self.evaluate_to_array_form(corners[:, 0], corners[:, 1], size) @@ -163,11 +162,12 @@ def __init__(self, evaluations: dict[Any, np.ndarray], target_evaluation: np.nda """ self._evaluation_points: list[Any] = list(evaluations.keys()) + if not isinstance(evaluations[self._evaluation_points[0]], np.ndarray): + raise ValueError(f"Individual evaluations must be numpy arrays. " + f"Found {type(evaluations[self._evaluation_points[0]])}.") if len(evaluations[self._evaluation_points[0]].shape) != 2: raise InvalidSizeError(f"PSF evaluations must be 2-D numpy arrays.") self._size = evaluations[self._evaluation_points[0]].shape[0] - if self._size <= 0: - raise InvalidSizeError(f"Found size of {self._size}. Must be >= 1") if self._size % 2 != 0: raise InvalidSizeError(f"Size must be even. Found {self._size}") @@ -211,7 +211,7 @@ def load(cls, path): def calculate_covering(image_shape: tuple[int, int], size: int) -> np.ndarray: - """Determines the grid of patches to sum over. + """Determines the grid of overlapping neighborhood patches. Parameters ---------- diff --git a/regularizepsf/fitter.py b/regularizepsf/fitter.py index fdd36ea..1e1b8be 100644 --- a/regularizepsf/fitter.py +++ b/regularizepsf/fitter.py @@ -8,17 +8,16 @@ import numpy as np import deepdish as dd -from lmfit import Parameters, minimize, report_fit +from lmfit import Parameters, minimize from lmfit.minimizer import MinimizerResult import sep -from photutils.detection import DAOStarFinder from astropy.io import fits from scipy.interpolate import RectBivariateSpline -from skimage.transform import resize, downscale_local_mean +from skimage.transform import downscale_local_mean -from regularizepsf.psf import SimplePSF, VariedPSF, PointSpreadFunctionABC +from regularizepsf.psf import SimplePSF, PointSpreadFunctionABC from regularizepsf.exceptions import InvalidSizeError -from regularizepsf.corrector import calculate_covering +from regularizepsf.corrector import calculate_covering, ArrayCorrector class PatchCollectionABC(metaclass=abc.ABCMeta): @@ -226,9 +225,7 @@ def _fit_lmfit(self, base_psf: SimplePSF, initial_guesses: dict[str, Real]) -> d class CoordinatePatchCollection(PatchCollectionABC): - """A representation of a PatchCollection that operates on pixel coordinates from a set of images - - """ + """A representation of a PatchCollection that operates on pixel coordinates from a set of images""" @classmethod def extract(cls, images: list[np.ndarray], coordinates: list[CoordinateIdentifier], @@ -250,14 +247,45 @@ def extract(cls, images: list[np.ndarray], def find_stars_and_average(cls, image_paths: list[str], psf_size: int, patch_size: int, - scale: int = 1, + interpolation_scale: int = 1, average_mode: str = "median", star_threshold: int = 3, hdu_choice=0): + """Loads a series of images, finds stars in each, and builds a CoordinatePatchCollection with averaged stars + + Parameters + ---------- + image_paths : List[str] + location of FITS files to load + psf_size : int + size of the PSF model to use + patch_size : int + square size that each PSF model applies to + interpolation_scale : int + if >1, the image are first scaled by this factor. This results in stars being aligned at a subpixel scale + average_mode : str + "median" or "mean" determines how patches are combined + star_threshold : int + SEP's threshold for finding stars. See `threshold` in https://sep.readthedocs.io/en/v1.1.x/api/sep.extract.html#sep-extract + hdu_choice : int + Which HDU from each image will be used, default of 0 is most common but could be 1 for compressed images + + Returns + ------- + CoordinatePatchCollection + An averaged star model built from the provided images + + Notes + ------ + Using an `interpolation_scale` other than 1 for large images can dramatically slow down the execution. + """ + # Load the first image to determine the image shape, assumed to be the same for all images with fits.open(image_paths[0]) as hdul: image_shape = hdul[hdu_choice].data.shape + # the output collection to return this_collection = cls(dict()) + # for each image do the magic for i, image_path in enumerate(image_paths): with fits.open(image_path) as hdul: image = hdul[hdu_choice].data.astype(float) @@ -265,46 +293,55 @@ def find_stars_and_average(cls, image_paths: list[str], raise ValueError(f"Images must all be the same shape. Found both {image_shape} and {image.shape}.") # if the image should be scaled then, do the scaling before anything else - if scale != 1: + if interpolation_scale != 1: interpolator = RectBivariateSpline(np.arange(image.shape[0]), np.arange(image.shape[1]), image) - image = interpolator(np.linspace(0, image.shape[0], image.shape[0]*scale), - np.linspace(0, image.shape[1], image.shape[1]*scale)) + image = interpolator(np.linspace(0, image.shape[0], image.shape[0] * interpolation_scale), + np.linspace(0, image.shape[1], image.shape[1] * interpolation_scale)) + # find stars using SEP background = sep.Background(image) image_background_removed = image - background image_star_coords = sep.extract(image_background_removed, star_threshold, err=background.globalrms) - coordinates = [CoordinateIdentifier(i, int(y - psf_size * scale / 2), int(x - psf_size * scale / 2)) + coordinates = [CoordinateIdentifier(i, + int(y - psf_size * interpolation_scale / 2), + int(x - psf_size * interpolation_scale / 2)) for x, y in zip(image_star_coords['x'], image_star_coords['y'])] # pad in case someone selects a region on the edge of the image - padding_shape = ((psf_size * scale, psf_size * scale), (psf_size * scale, psf_size * scale)) + padding_shape = ((psf_size * interpolation_scale, psf_size * interpolation_scale), + (psf_size * interpolation_scale, psf_size * interpolation_scale)) padded_image = np.pad(image, padding_shape, mode='constant', constant_values=np.median(image)) for coordinate in coordinates: - patch = padded_image[coordinate.x + scale * psf_size:coordinate.x + 2 * scale * psf_size, - coordinate.y + scale * psf_size:coordinate.y + 2 * scale * psf_size] + patch = padded_image[coordinate.x+interpolation_scale*psf_size: + coordinate.x+2*interpolation_scale*psf_size, + coordinate.y + interpolation_scale * psf_size: + coordinate.y + 2 * interpolation_scale * psf_size] this_collection.add(coordinate, patch) - corners = calculate_covering((image_shape[0]*scale, image_shape[1]*scale), patch_size*scale) - averaged = this_collection.average(corners, patch_size*scale, psf_size*scale, mode=average_mode) + corners = calculate_covering((image_shape[0] * interpolation_scale, image_shape[1] * interpolation_scale), + patch_size * interpolation_scale) + averaged = this_collection.average(corners, patch_size * interpolation_scale, psf_size * interpolation_scale, + mode=average_mode) - if scale != 1: + if interpolation_scale != 1: for coordinate, patch in averaged.items(): - averaged._patches[coordinate] = downscale_local_mean(averaged._patches[coordinate], (scale, scale)) + averaged._patches[coordinate] = downscale_local_mean(averaged._patches[coordinate], + (interpolation_scale, interpolation_scale)) averaged._size = psf_size output = CoordinatePatchCollection(dict()) for key, patch in averaged.items(): - output._patches[CoordinateIdentifier(key.image_index, key.x//scale, key.y//scale)] = patch + output._patches[CoordinateIdentifier(key.image_index, key.x // interpolation_scale, key.y // interpolation_scale)] = patch return output def average(self, corners: np.ndarray, step: int, size: int, mode: str = "median") -> PatchCollectionABC: - self._validate_average_mode(mode) - pad_shape = self._calculate_pad_shape(size) + CoordinatePatchCollection._validate_average_mode(mode) + pad_shape = self._calculate_pad_shape(step) if mode == "mean": mean_stack = {tuple(corner): np.zeros((size, size)) for corner in corners} @@ -342,11 +379,13 @@ def average(self, corners: np.ndarray, step: int, size: int, elif mode == "median": averages = {CoordinateIdentifier(None, corner[0], corner[1]): np.nanmedian(median_stack[corner], axis=0) - if len(median_stack[corner]) > 0 else np.zeros((size, size)) + if len(median_stack[corner]) > 0 else np.zeros((size, size)) for corner in median_stack} return CoordinatePatchCollection(averages) - def _validate_average_mode(self, mode: str): + @staticmethod + def _validate_average_mode(mode: str): + """Determine if the average_mode is a valid kind""" valid_modes = ['median', 'mean'] if mode not in valid_modes: raise ValueError(f"Found a mode of {mode} but it must be in the list {valid_modes}.") @@ -363,6 +402,27 @@ def _calculate_pad_shape(self, size): return pad_shape def fit(self, base_psf: SimplePSF, is_varied: bool = False) -> PointSpreadFunctionABC: - raise NotImplementedError("TODO") # TODO: implement + raise NotImplementedError("TODO") + + def to_array_corrector(self, target_evaluation: np.array) -> ArrayCorrector: + """Converts a patch collection that has been averaged into an ArrayCorrector + + Parameters + ---------- + target_evaluation : np.ndarray + the evaluation of the Target PSF + + Returns + ------- + ArrayCorrector + An array corrector that can be used to correct PSFs + """ + evaluation_dictionary = dict() + for identifier, patch in self._patches.items(): + corrected_patch = patch.copy() + corrected_patch[np.isnan(corrected_patch)] = 0 + evaluation_dictionary[(identifier.x, identifier.y)] = corrected_patch + array_corrector = ArrayCorrector(evaluation_dictionary, target_evaluation) + return array_corrector diff --git a/regularizepsf/models.py b/regularizepsf/models.py deleted file mode 100644 index 004fa96..0000000 --- a/regularizepsf/models.py +++ /dev/null @@ -1,13 +0,0 @@ -import numpy as np - -from regularizepsf import simple_psf - - -@simple_psf -def constrained_gaussian(x, y, x0=0, y0=0, sigma_x=1, sigma_y=1): - return np.exp(-(np.square(x-x0)/(2*np.square(sigma_x)) + np.square(y-y0)/(2*np.square(sigma_y)))) - - -@simple_psf -def elliptical_gaussian(x, y, height=1, x0=0, y0=0, a=1, b=0, c=1): - return height*np.exp(-(a*np.square(x-x0) + 2*b*(x-x0)*(y-y0) + c*np.square(y-y0))) diff --git a/regularizepsf/psf.py b/regularizepsf/psf.py index 4062c7c..f0d391c 100644 --- a/regularizepsf/psf.py +++ b/regularizepsf/psf.py @@ -38,6 +38,7 @@ def parameters(self): class SimplePSF(PointSpreadFunctionABC): """Model for a simple PSF""" + def __init__(self, function: Callable): """Creates a PSF object @@ -78,6 +79,7 @@ def simple_psf(arg=None) -> SimplePSF: class VariedPSF(PointSpreadFunctionABC): """Model for a PSF that varies over the field of view""" + def __init__(self, vary_function: Callable, base_psf: SimplePSF, validate_at_call: bool = True): self._vary_function = vary_function self._base_psf = base_psf @@ -128,6 +130,7 @@ def inner(__fn=None, *, check_at_call: bool = True): return VariedPSF(__fn, base_psf, validate_at_call=check_at_call) else: return partial(inner, check_at_call=check_at_call) + return inner diff --git a/requirements.txt b/requirements.txt index 2e9ce7d..e391704 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,5 @@ lmfit==1.0.3 cython==0.29.32 astropy==5.1.1 scipy==1.9.3 -photutils==1.5.0 scikit-image==0.19.3 -pytest -pytest-runner \ No newline at end of file +sep==1.2.1 \ No newline at end of file diff --git a/requirements_dev.txt b/requirements_dev.txt new file mode 100644 index 0000000..27b4307 --- /dev/null +++ b/requirements_dev.txt @@ -0,0 +1,12 @@ +numpy==1.23.4 +dill==0.3.6 +deepdish==0.3.7 +lmfit==1.0.3 +cython==0.29.32 +astropy==5.1.1 +scipy==1.9.3 +scikit-image==0.19.3 +sep==1.2.1 +pytest +pytest-runner +hypothesis \ No newline at end of file diff --git a/setup.py b/setup.py index d8a8ec3..4bb4235 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setup( name='regularizepsf', - version='0.0.2', + version='0.0.3', description='Point spread function modeling and regularization', long_description=long_description, long_description_content_type='text/markdown', @@ -21,8 +21,7 @@ author='J. Marcus Hughes', author_email='hughes.jmb@gmail.com', ext_modules=cythonize(ext_modules, annotate=True, compiler_directives={'language_level': 3}), - install_requires=["numpy", "dill", "deepdish", "lmfit", "sep", "cython", "astropy", "scipy", - "photutils", "scikit-image"], + install_requires=["numpy", "dill", "deepdish", "lmfit", "sep", "cython", "astropy", "scipy", "scikit-image"], package_data={"regularizepsf": ["helper.pyx"]}, setup_requires=["cython"], extras_require={"test": ['pytest', 'coverage', 'pytest-runner']} diff --git a/tests/data/DASH.fits b/tests/data/DASH.fits new file mode 100644 index 0000000..1ad44cb Binary files /dev/null and b/tests/data/DASH.fits differ diff --git a/tests/test_corrector.py b/tests/test_corrector.py index 14777c3..565c494 100644 --- a/tests/test_corrector.py +++ b/tests/test_corrector.py @@ -1,9 +1,13 @@ +import os.path + import pytest from pytest import fixture import numpy as np from hypothesis import given, strategies as st, settings -from regularizepsf.corrector import calculate_covering +from regularizepsf.psf import simple_psf, varied_psf +from regularizepsf.corrector import calculate_covering, ArrayCorrector, FunctionalCorrector +from regularizepsf.exceptions import InvalidSizeError, EvaluatedModelInconsistentSizeError, UnevaluatedPointError def confirm_full_four_covering(corners, img_shape, patch_size): @@ -58,3 +62,142 @@ def padded_100by100_image_psf_10_with_pattern(): # set_padded_img_section(test_img, coord[0], coord[1], 10, np.zeros((10, 10))+value) # assert np.all(test_img == padded_100by100_image_psf_10_with_pattern) + +def test_create_array_corrector(): + example = ArrayCorrector({(0, 0): np.zeros((10, 10))}, + np.zeros((10, 10))) + assert isinstance(example, ArrayCorrector) + assert example._evaluation_points == [(0, 0)] + + +def test_nonimage_array_corrector_errors(): + with pytest.raises(InvalidSizeError): + example = ArrayCorrector({(0, 0): np.zeros(10)}, np.zeros(10)) + + +def test_noneven_array_corrector_errors(): + with pytest.raises(InvalidSizeError): + example = ArrayCorrector({(0, 0): np.zeros((11, 11))}, np.zeros((11, 11))) + + +def test_array_corrector_with_different_size_evaluations_errors(): + with pytest.raises(EvaluatedModelInconsistentSizeError): + example = ArrayCorrector({(0, 0): np.zeros((10, 10)), (1, 1): np.zeros((20, 20))}, + np.zeros((20, 20))) + + +def test_array_corrector_with_different_size_than_target_errors(): + with pytest.raises(EvaluatedModelInconsistentSizeError): + example = ArrayCorrector({(0, 0): np.zeros((10, 10)), (1, 1): np.zeros((10, 10))}, + np.zeros((20, 20))) + + +@simple_psf +def example_psf(x, y): + return 0 + + +def test_create_functional_corrector(): + example = FunctionalCorrector(example_psf, example_psf) + assert example._psf == example_psf + assert example.is_variable is False + assert example._target_model == example_psf + + example.save("test.psf") + assert os.path.isfile("test.psf") + loaded = example.load("test.psf") + assert isinstance(loaded, FunctionalCorrector) + os.remove("test.psf") + + +def test_evaluate_to_array_form_with_invalid_size_errors(): + @simple_psf + def base(x, y): + return np.ones_like(x) + + func_corrector = FunctionalCorrector(base, None) + with pytest.raises(InvalidSizeError): + arr_corrector = func_corrector.evaluate_to_array_form(np.arange(10), np.arange(10), 11) + + +def test_evaluate_to_array_form_with_ones_and_no_target(): + @simple_psf + def base(x, y): + return np.ones_like(x) + + func_corrector = FunctionalCorrector(base, None) + arr_corrector = func_corrector.evaluate_to_array_form(np.arange(10), np.arange(10), 10) + assert isinstance(arr_corrector, ArrayCorrector) + assert len(arr_corrector._evaluations) == 100 + assert len(arr_corrector._evaluation_points) == 100 + assert np.all(arr_corrector[0, 0] == 1) + + +def test_evaluate_to_array_form_with_ones_and_target(): + @simple_psf + def base(x, y): + return np.ones_like(x) + + @simple_psf + def target(x, y): + return np.ones_like(x).astype(float) + + + func_corrector = FunctionalCorrector(base, target) + arr_corrector = func_corrector.evaluate_to_array_form(np.arange(10), np.arange(10), 10) + assert isinstance(arr_corrector, ArrayCorrector) + assert len(arr_corrector._evaluations) == 100 + assert len(arr_corrector._evaluation_points) == 100 + assert np.all(arr_corrector[0, 0] == 1) + + +def test_functional_corrector_correct_image(): + @simple_psf + def base(x, y): + return np.ones_like(x) + + @simple_psf + def target(x, y): + return np.ones_like(x).astype(float) + + func_corrector = FunctionalCorrector(base, target) + raw_image = np.ones((100, 100)) + corrected_image = func_corrector.correct_image(raw_image, 10) + assert raw_image.shape == corrected_image.shape + + +def test_array_corrector_without_numpy_arrays(): + evaluations = {(1, 1): 1} + target = np.ones((100, 100)) + with pytest.raises(ValueError): + corr = ArrayCorrector(evaluations, target) + + +def test_array_corrector_correct_image_with_image_smaller_than_psf(): + image = np.ones((10, 10)) + evaluations = {(0, 0): np.ones((100, 100))} + target = np.ones((100, 100)) + with pytest.raises(InvalidSizeError): + corr = ArrayCorrector(evaluations, target) + corr.correct_image(image, 10) + + +def test_array_corrector_get_nonexistent_point(): + evaluations = {(0, 0): np.ones((100, 100))} + target = np.ones((100, 100)) + with pytest.raises(UnevaluatedPointError): + corr = ArrayCorrector(evaluations, target) + patch = corr[(1, 1)] + + +def test_create_array_corrector(): + evaluations = {(0, 0): np.ones((100, 100))} + target = np.ones((100, 100)) + example = ArrayCorrector(evaluations, target) + assert len(example._evaluations) == 1 + + example.save("test.psf") + assert os.path.isfile("test.psf") + loaded = example.load("test.psf") + assert isinstance(loaded, ArrayCorrector) + os.remove("test.psf") diff --git a/tests/test_fitter.py b/tests/test_fitter.py index a9feb28..76f171e 100644 --- a/tests/test_fitter.py +++ b/tests/test_fitter.py @@ -1,8 +1,15 @@ +import os.path +import pathlib + import pytest import numpy as np from hypothesis import given, strategies as st, settings, HealthCheck from regularizepsf.fitter import CoordinatePatchCollection, CoordinateIdentifier +from regularizepsf.exceptions import InvalidSizeError + + +TEST_DIR = pathlib.Path(__file__).parent.resolve() @pytest.fixture @@ -46,3 +53,55 @@ def test_coordinate_patch_collection_extraction_many_coordinates(coords, increme assert len(list(cpc.values())) == num_distinct_coords assert len(list(cpc.keys())) == num_distinct_coords assert len(list(cpc.items())) == num_distinct_coords + + +def test_missing_item_retrieval(): + collection = CoordinatePatchCollection({CoordinateIdentifier(0, 0, 0): np.zeros((10, 10))}) + with pytest.raises(IndexError): + item = collection[CoordinateIdentifier(1, 1, 1)] + + +def test_saving_and_loading(): + collection = CoordinatePatchCollection({CoordinateIdentifier(0, 0, 0): np.zeros((10, 10))}) + collection.save("test.psf") + assert os.path.isfile("test.psf") + loaded = CoordinatePatchCollection.load("test.psf") + assert isinstance(loaded, CoordinatePatchCollection) + os.remove("test.psf") + + +def test_coordinate_patch_average(): + collection = CoordinatePatchCollection({CoordinateIdentifier(0, 0, 0): np.zeros((10, 10)), + CoordinateIdentifier(0, 0, 0): np.ones((10, 10))*2}) + averaged_collection = collection.average(np.array([[0, 0]]), 10, 10, mode='median') + assert averaged_collection[CoordinateIdentifier(None, 0, 0)][1, 1] == 1 + + +def test_calculate_pad_shape(): + collection = CoordinatePatchCollection({CoordinateIdentifier(0, 0, 0): np.zeros((10, 10))}) + assert collection._size == 10 + assert collection._calculate_pad_shape(20) == ((5, 5), (5, 5)) + + +def test_negative_pad_shape_errors(): + collection = CoordinatePatchCollection({CoordinateIdentifier(0, 0, 0): np.zeros((10, 10))}) + with pytest.raises(InvalidSizeError): + collection._calculate_pad_shape(1) + + +def test_odd_pad_shape_errors(): + collection = CoordinatePatchCollection({CoordinateIdentifier(0, 0, 0): np.zeros((10, 10))}) + with pytest.raises(InvalidSizeError): + collection._calculate_pad_shape(11) + + +def test_validate_average_mode(): + with pytest.raises(ValueError): + CoordinatePatchCollection._validate_average_mode("nonexistent_method") + + +def test_find_stars_and_average(): + img_path = str(TEST_DIR / "data/DASH.fits") + example = CoordinatePatchCollection.find_stars_and_average([img_path], 32, 100) + assert isinstance(example, CoordinatePatchCollection) + diff --git a/tests/test_models.py b/tests/test_models.py deleted file mode 100644 index a79d2cd..0000000 --- a/tests/test_models.py +++ /dev/null @@ -1,12 +0,0 @@ -from regularizepsf.models import constrained_gaussian, elliptical_gaussian -from regularizepsf.psf import SimplePSF - - -def test_constrained_gaussian(): - assert isinstance(constrained_gaussian, SimplePSF) - assert constrained_gaussian(0, 0) == 1 - - -def test_elliptical_gaussain(): - assert isinstance(elliptical_gaussian, SimplePSF) - assert elliptical_gaussian(0, 0) == 1 \ No newline at end of file diff --git a/tests/test_psf.py b/tests/test_psf.py index 95bde27..571a5c8 100644 --- a/tests/test_psf.py +++ b/tests/test_psf.py @@ -1,7 +1,7 @@ import pytest from regularizepsf.psf import simple_psf, varied_psf, SimplePSF, VariedPSF -from regularizepsf.exceptions import PSFParameterValidationError +from regularizepsf.exceptions import PSFParameterValidationError, VariedPSFParameterMismatchError def test_simple_psf_valid(): @@ -95,3 +95,28 @@ def test_varied_psf_called_naked(): def func(x, y): return {"sigma": 0.1} + +def test_varied_psf_parameters_not_match_base_errors(): + @simple_psf + def base(x, y, m): + return x + y + + with pytest.raises(VariedPSFParameterMismatchError): + @varied_psf(base) + def varied(x, y): + return {"n": 0, "m": 30} + + +def test_varied_psf_parameters_match_except_at_call_errors(): + @simple_psf + def base(x, y, m): + return x + y + + with pytest.raises(VariedPSFParameterMismatchError): + @varied_psf(base) + def varied(x, y): + if x == 0 and y == 0: + return {"m": 30} + else: + return {"n": 100, "m": 30} + varied(10, 10)