diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 00000000..32a4250f --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,8 @@ +# MPoL GitHub Actions Workflows + +We use GitHub actions to continuously integrate and deploy the MPoL codebase. This document summarizes the intended functionality of each of the workflows. + +* `verify-tests-and-docs.yml` runs the unit tests and (if successful) builds (but does not deploy) the documentation to ascertain whether the codebase is in a working state (defined as passing tests on all non-experimental Python versions and a successful documentation build). This workflow is intended to run on every commit to the `main` branch as well as every commit to open pull requests. +* `docs-build-deploy.yml` builds and deploys the documentation to [GitHub Pages](https://mpol-dev.github.io/MPoL/). This workflow is intended to run on every commit to the `main` branch, to ensure that the currently deployed documentation matches the current state of the source code. Note that if you are merging a PR: you may find that the docs are built twice, once as part of the `verify-tests-and-docs.yml` and then again as part of `docs-build-deploy.yml`. This duplication is OK, since it is designed to support small changes implemented directly on `main` as well as changes introduced through branches and PRs. +* `pre-release.yml` tries to install the package into Linux, MacOS, and Windows using all supported Python versions. As the name suggests, this is designed to run in anticipation of a release and is triggered by a "draft" release on GitHub. +* `release.yml` is run when a release is submitted on GitHub. Note that there is no prerequisite for `pre-release.yml` to have run (or passed), but it is a good idea to go through this manually by drafting a release. \ No newline at end of file diff --git a/.github/workflows/gh_docs.yml b/.github/workflows/docs-build-deploy.yml similarity index 50% rename from .github/workflows/gh_docs.yml rename to .github/workflows/docs-build-deploy.yml index 71b9a8f5..3e44c134 100644 --- a/.github/workflows/gh_docs.yml +++ b/.github/workflows/docs-build-deploy.yml @@ -1,4 +1,4 @@ -name: gh-pages docs +name: build and deploy docs on: push: @@ -7,13 +7,13 @@ on: jobs: build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.12' - name: Install doc deps run: | pip install .'[dev]' @@ -21,28 +21,16 @@ jobs: run: | sudo apt-get install pandoc - name: Set up node - uses: actions/setup-node@v2 + uses: actions/setup-node@v4 - name: Install mermaid.js dependency run: | npm install @mermaid-js/mermaid-cli - - name: Cache/Restore the .mpol folder cache - uses: actions/cache@v3 - env: - cache-name: cache-mpol-dls - with: - # files are stored in .mpol - path: ~/.mpol - # the "key" is the hash of the download script - key: ${{ hashFiles('docs/download_external_files.py') }} - - name: Download large files - run: | - python3 docs/download_external_files.py - name: Build the docs run: | make -C docs clean make -C docs html MERMAID_PATH="../node_modules/.bin/" - name: Deploy - uses: peaceiris/actions-gh-pages@v3 + uses: peaceiris/actions-gh-pages@v4 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./docs/_build/html diff --git a/.github/workflows/docs_build.yml b/.github/workflows/docs_build.yml deleted file mode 100644 index c5990b19..00000000 --- a/.github/workflows/docs_build.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: docs test - -# Run this workflow when a review is requested on a PR that targets the main -# branch, or the PR is closed -on: - pull_request: - types: [review_requested, closed] - pull_request_review: - types: [submitted, dismissed] - -# Prevent multiple PRs from building/deploying the docs at the same time -concurrency: - group: ${{ github.workflow }} - -# test that the docs build -# (but don't deploy to gh-pages) -jobs: - build_docs: - runs-on: ubuntu-20.04 - strategy: - max-parallel: 4 - matrix: - python-version: ["3.10"] - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - # cache the Python environment, including installed dependencies - # (speeds up tests more than caching pip cache) - - name: Cache/Restore the Python env - uses: actions/cache@v3 - env: - cache-name: cache-python${{ matrix.python-version }}-env - with: - path: ${{ env.pythonLocation }} - key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }} - - name: Install doc dependencies - run: | - pip install .[dev] - - name: Install Pandoc dependency - run: | - sudo apt-get install pandoc - - name: Cache/Restore the .mpol folder cache - uses: actions/cache@v3 - env: - cache-name: cache-mpol-dls - with: - # files are stored in .mpol - path: ~/.mpol - # the "key" is the hash of the download script - key: ${{ hashFiles('docs/download_external_files.py') }} - - name: Build the docs - run: | - make -C docs clean - make -C docs html diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 41185d95..79aab848 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -6,43 +6,17 @@ on: - prereleased jobs: - dl_files: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - name: Install package deps - run: | - pip install .[dev] - - name: Cache/Restore the .mpol folder cache - uses: actions/cache@v3 - env: - cache-name: cache-mpol-dls - with: - # files are stored in .mpol - path: ~/.mpol - # the "key" is the hash of the download script - key: ${{ hashFiles('docs/download_external_files.py') }} - - name: Download large files - run: | - python3 docs/download_external_files.py - tests: - needs: dl_files # don't bother running if we didn't succeed getting the files runs-on: ${{ matrix.os }} strategy: fail-fast: false - max-parallel: 4 matrix: python-version: ["3.10", "3.11", "3.12"] - os: [ubuntu-20.04, macOS-latest, windows-latest] + os: [ubuntu-latest, macOS-latest, windows-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -54,22 +28,6 @@ jobs: - name: Install test deps run: | pip install .[test] - - name: Lint with flake8 - run: | - pip install flake8 - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Cache/Restore the .mpol folder cache - uses: actions/cache@v3 - env: - cache-name: cache-mpol-dls - with: - # files are stored in .mpol - path: ~/.mpol - # the "key" is the hash of the download script - key: ${{ hashFiles('docs/download_external_files.py') }} - name: Run tests with coverage run: | pytest --cov=mpol diff --git a/.github/workflows/package.yml b/.github/workflows/release.yml similarity index 80% rename from .github/workflows/package.yml rename to .github/workflows/release.yml index 0b7b9fea..07af6e0d 100644 --- a/.github/workflows/package.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: build and upload pip +name: build and upload to PyPI on: release: @@ -7,14 +7,13 @@ on: jobs: deploy: - runs-on: ubuntu-20.04 - + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: 3.10 + python-version: "3.12" - name: Install dependencies run: | pip install --upgrade pip diff --git a/.github/workflows/tests.yml b/.github/workflows/verify-tests-and-docs.yml similarity index 66% rename from .github/workflows/tests.yml rename to .github/workflows/verify-tests-and-docs.yml index 3bbc0a7a..cce16bbc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/verify-tests-and-docs.yml @@ -1,4 +1,4 @@ -name: package test +name: verify tests pass and docs build on: push: @@ -41,3 +41,21 @@ jobs: - name: Run tests with coverage run: | pytest --cov=mpol + docs_build: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install doc dependencies + run: | + pip install .[dev] + - name: Install Pandoc dependency + run: | + sudo apt-get install pandoc + - name: Build the docs + run: | + make -C docs clean + make -C docs html diff --git a/docs/download_external_files.py b/docs/download_external_files.py deleted file mode 100644 index e5882862..00000000 --- a/docs/download_external_files.py +++ /dev/null @@ -1,18 +0,0 @@ -from astropy.utils.data import download_file -from mpol.__init__ import zenodo_record - -slug = "https://zenodo.org/record/{:d}/files/{:}" - -fnames = [ - "logo_cube.noise.npz", - "HD143006_continuum.npz", - "logo_cube.tclean.fits", -] - -for fname in fnames: - url = slug.format(zenodo_record, fname) - download_file( - url, - cache=True, - pkgname="mpol", - ) diff --git a/src/mpol/tests.mplstyle b/src/mpol/tests.mplstyle new file mode 100644 index 00000000..429e682b --- /dev/null +++ b/src/mpol/tests.mplstyle @@ -0,0 +1,4 @@ +image.cmap: inferno +figure.figsize: 7.1, 5.0 +figure.autolayout: True +savefig.dpi: 300 \ No newline at end of file diff --git a/test/conftest.py b/test/conftest.py index fa151838..24dd326b 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -8,6 +8,9 @@ from mpol import coordinates, fourier, gridding, images, utils from mpol.__init__ import zenodo_record +import matplotlib.pyplot as plt +plt.style.use("mpol.tests") + # private variables to this module _npz_path = files("mpol.data").joinpath("mock_data.npz") _nchan = 4 diff --git a/test/images_test.py b/test/images_test.py index b60b14ec..7fd2e98b 100644 --- a/test/images_test.py +++ b/test/images_test.py @@ -4,168 +4,99 @@ import torch from astropy.io import fits from mpol import coordinates, images, plot, utils +from plot_utils import imshow_two -def test_single_chan(): - coords = coordinates.GridCoords(cell_size=0.015, npix=800) - im = images.ImageCube(coords=coords) - assert im.nchan == 1 +def test_BaseCube_map(coords, tmp_path): + # create a mock cube that includes negative values + nchan = 1 + mean = torch.full((nchan, coords.npix, coords.npix), fill_value=-0.5) + std = torch.full((nchan, coords.npix, coords.npix), fill_value=0.5) + bcube = torch.normal(mean=mean, std=std) + blayer = images.BaseCube(coords=coords, nchan=nchan, base_cube=bcube) -def test_basecube_grad(): + # the default softplus function should map everything to positive values + blayer_output = blayer() + + imshow_two( + tmp_path / "BaseCube_mapped.png", + [bcube, blayer_output], + title=["BaseCube input", "BaseCube output"], + xlabel=["pixel"], + ylabel=["pixel"], + ) + + assert torch.all(blayer_output >= 0) + + +def test_instantiate_ImageCube(): coords = coordinates.GridCoords(cell_size=0.015, npix=800) - bcube = images.BaseCube(coords=coords) - loss = torch.sum(bcube()) - loss.backward() + im = images.ImageCube(coords=coords) + assert im.nchan == 1 -def test_imagecube_grad(coords): +def test_ImageCube_apply_grad(coords): bcube = images.BaseCube(coords=coords) - # try passing through ImageLayer imagecube = images.ImageCube(coords=coords) - - # send things through this layer loss = torch.sum(imagecube(bcube())) - loss.backward() -# test for proper fits scale -def test_imagecube_tofits(coords, tmp_path): - # creating base cube +def test_to_FITS_pixel_scale(coords, tmp_path): + """Test whether the FITS scale was written correctly.""" bcube = images.BaseCube(coords=coords) - - # try passing through ImageLayer imagecube = images.ImageCube(coords=coords) - - # sending the basecube through the imagecube imagecube(bcube()) - # creating output fits file with name 'test_cube_fits_file39.fits' - # file will be deleted after testing + # write FITS to file imagecube.to_FITS(fname=tmp_path / "test_cube_fits_file39.fits", overwrite=True) - # inputting the header from the previously created fits file + # read file and check pixel scale is correct fits_header = fits.open(tmp_path / "test_cube_fits_file39.fits")[0].header assert (fits_header["CDELT1"] and fits_header["CDELT2"]) == pytest.approx( coords.cell_size / 3600 ) -def test_basecube_imagecube(coords, tmp_path): +def test_HannConvCube(coords, tmp_path): # create a mock cube that includes negative values nchan = 1 - mean = torch.full( - (nchan, coords.npix, coords.npix), fill_value=-0.5) - std = torch.full( - (nchan, coords.npix, coords.npix), fill_value=0.5) - - # tensor - base_cube = torch.normal(mean=mean, std=std) - - # layer - basecube = images.BaseCube(coords=coords, nchan=nchan, base_cube=base_cube) - - # the default softplus function should map everything to positive values - output = basecube() - - fig, ax = plt.subplots(ncols=2, nrows=1) - - im = ax[0].imshow( - np.squeeze(base_cube.detach().numpy()), origin="lower", interpolation="none" - ) - plt.colorbar(im, ax=ax[0]) - ax[0].set_title("input") - - im = ax[1].imshow( - np.squeeze(output.detach().numpy()), origin="lower", interpolation="none" - ) - plt.colorbar(im, ax=ax[1]) - ax[1].set_title("mapped") - - fig.savefig(tmp_path / "basecube_mapped.png", dpi=300) - - # try passing through ImageLayer - imagecube = images.ImageCube(coords=coords, nchan=nchan) - - # send things through this layer - imagecube(basecube()) - - fig, ax = plt.subplots(ncols=1) - im = ax.imshow( - np.squeeze(imagecube.sky_cube.detach().numpy()), - extent=imagecube.coords.img_ext, - origin="lower", - interpolation="none", - ) - fig.savefig(tmp_path / "imagecube.png", dpi=300) - - plt.close("all") - - -def test_base_cube_conv_cube(coords, tmp_path): - # test whether the HannConvCube functions appropriately - - # create a mock cube that includes negative values - nchan = 1 - mean = torch.full( - (nchan, coords.npix, coords.npix), fill_value=-0.5) - std = torch.full( - (nchan, coords.npix, coords.npix), fill_value=0.5) + mean = torch.full((nchan, coords.npix, coords.npix), fill_value=-0.5) + std = torch.full((nchan, coords.npix, coords.npix), fill_value=0.5) # The HannConvCube expects to function on a pre-packed ImageCube, - # so in order to get the plots looking correct on this test image, - # we need to faff around with packing - - # tensor test_cube = torch.normal(mean=mean, std=std) test_cube_packed = utils.sky_cube_to_packed_cube(test_cube) - # layer conv_layer = images.HannConvCube(nchan=nchan) conv_output_packed = conv_layer(test_cube_packed) conv_output = utils.packed_cube_to_sky_cube(conv_output_packed) - fig, ax = plt.subplots(ncols=2, nrows=1) - - im = ax[0].imshow( - np.squeeze(test_cube.detach().numpy()), origin="lower", interpolation="none" - ) - plt.colorbar(im, ax=ax[0]) - ax[0].set_title("input") - - im = ax[1].imshow( - np.squeeze(conv_output.detach().numpy()), origin="lower", interpolation="none" + imshow_two( + tmp_path / "convcube.png", + [test_cube, conv_output], + title=["input", "convolved"], + xlabel=["pixel"], + ylabel=["pixel"], ) - plt.colorbar(im, ax=ax[1]) - ax[1].set_title("convolved") - - fig.savefig(tmp_path / "convcube.png", dpi=300) - - plt.close("all") -def test_multi_chan_conv(coords, tmp_path): - # create a mock channel cube that includes negative values - # and make sure that the HannConvCube works across channels - +def test_HannConvCube_multi_chan(coords): + """Make sure HannConvCube functions with multi-channeled input""" nchan = 10 - mean = torch.full( - (nchan, coords.npix, coords.npix), fill_value=-0.5) - std = torch.full( - (nchan, coords.npix, coords.npix), fill_value=0.5) + mean = torch.full((nchan, coords.npix, coords.npix), fill_value=-0.5) + std = torch.full((nchan, coords.npix, coords.npix), fill_value=0.5) - # tensor test_cube = torch.normal(mean=mean, std=std) - # layer conv_layer = images.HannConvCube(nchan=nchan) - conv_layer(test_cube) -def test_image_flux(coords): +def test_flux(coords): + """Make sure we can read the flux attribute.""" nchan = 20 bcube = images.BaseCube(coords=coords, nchan=nchan) im = images.ImageCube(coords=coords, nchan=nchan) @@ -180,16 +111,15 @@ def test_plot_test_img(packed_cube, coords, tmp_path): # put back to sky sky_cube = utils.packed_cube_to_sky_cube(packed_cube) - im = ax.imshow( - sky_cube[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) + im = ax.imshow(sky_cube[chan], extent=coords.img_ext, origin="lower") plt.colorbar(im) - fig.savefig(tmp_path / "sky_cube.png", dpi=300) + fig.savefig(tmp_path / "sky_cube.png") plt.close("all") -def test_taper(coords, tmp_path): - for r in np.arange(0.0, 0.2, step=0.02): + +def test_uv_gaussian_taper(coords, tmp_path): + for r in np.arange(0.0, 0.2, step=0.04): fig, ax = plt.subplots(ncols=1) taper_2D = images.uv_gaussian_taper(coords, r, r, 0.0) @@ -205,179 +135,165 @@ def test_taper(coords, tmp_path): ) plt.colorbar(im, ax=ax) - fig.savefig(tmp_path / f"taper{r:.2f}.png", dpi=300) + fig.savefig(tmp_path / f"taper{r:.2f}.png") plt.close("all") -def test_gaussian_kernel(coords, tmp_path): + +def test_GaussConvImage_kernel(coords, tmp_path): rs = np.array([0.02, 0.06, 0.10]) nchan = 3 - fig, ax = plt.subplots(nrows=len(rs), ncols=nchan, figsize=(10,10)) - for i,r in enumerate(rs): + fig, ax = plt.subplots(nrows=len(rs), ncols=nchan, figsize=(10, 10)) + for i, r in enumerate(rs): layer = images.GaussConvImage(coords, nchan=nchan, FWHM_maj=r, FWHM_min=0.5 * r) weight = layer.m.weight.detach().numpy() for j in range(nchan): - im = ax[i,j].imshow(weight[j,0], interpolation="none", origin="lower") - plt.colorbar(im, ax=ax[i,j]) + im = ax[i, j].imshow(weight[j, 0], interpolation="none", origin="lower") + plt.colorbar(im, ax=ax[i, j]) - fig.savefig(tmp_path / "filter.png", dpi=300) + fig.savefig(tmp_path / "filter.png") plt.close("all") -def test_gaussian_kernel_rotate(coords, tmp_path): + +def test_GaussConvImage_kernel_rotate(coords, tmp_path): r = 0.04 - Omegas = [0, 20, 40] # degrees + Omegas = [0, 20, 40] # degrees nchan = 3 fig, ax = plt.subplots(nrows=len(Omegas), ncols=nchan, figsize=(10, 10)) for i, Omega in enumerate(Omegas): - layer = images.GaussConvImage(coords, nchan=nchan, FWHM_maj=r, FWHM_min=0.5 * r, Omega=Omega) + layer = images.GaussConvImage( + coords, nchan=nchan, FWHM_maj=r, FWHM_min=0.5 * r, Omega=Omega + ) weight = layer.m.weight.detach().numpy() for j in range(nchan): - im = ax[i, j].imshow(weight[j, 0], interpolation="none",origin="lower") + im = ax[i, j].imshow(weight[j, 0], interpolation="none", origin="lower") plt.colorbar(im, ax=ax[i, j]) - fig.savefig(tmp_path / "filter.png", dpi=300) + fig.savefig(tmp_path / "filter.png") plt.close("all") -def test_GaussConvImage(sky_cube, coords, tmp_path): - # show only the first channel +@pytest.mark.parametrize("FWHM", [0.02, 0.06, 0.1]) +def test_GaussConvImage(sky_cube, coords, tmp_path, FWHM): chan = 0 nchan = sky_cube.size()[0] - for r in np.arange(0.02, 0.11, step=0.04): + layer = images.GaussConvImage(coords, nchan=nchan, FWHM_maj=FWHM, FWHM_min=FWHM) + c_sky = layer(sky_cube) - layer = images.GaussConvImage(coords, nchan=nchan, FWHM_maj=r, FWHM_min=r) + imgs = [sky_cube[chan], c_sky[chan]] + fluxes = [coords.cell_size**2 * torch.sum(img).item() for img in imgs] + title = [f"tot flux: {flux:.3f} Jy" for flux in fluxes] - print("Kernel size", layer.m.weight.size()) - - fig, ax = plt.subplots(ncols=2) - - im = ax[0].imshow( - sky_cube[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(sky_cube[chan]) - ax[0].set_title(f"tot flux: {flux:.3f} Jy") - plt.colorbar(im, ax=ax[0]) - - c_sky = layer(sky_cube) - im = ax[1].imshow( - c_sky[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(c_sky[chan]) - ax[1].set_title(f"tot flux: {flux:.3f} Jy") + imshow_two( + tmp_path / f"convolved_{FWHM:.2f}.png", + imgs, + sky=True, + suptitle=f"Image Plane Gauss Convolution FWHM={FWHM}", + title=title, + extent=[coords.img_ext], + ) - plt.colorbar(im, ax=ax[1]) - fig.savefig(tmp_path / f"convolved_{r:.2f}.png", dpi=300) + assert pytest.approx(fluxes[0]) == fluxes[1] - plt.close("all") -def test_GaussConvImage_rotate(sky_cube, coords, tmp_path): - # show only the first channel +@pytest.mark.parametrize("Omega", [0, 15, 30, 45]) +def test_GaussConvImage_rotate(sky_cube, coords, tmp_path, Omega): chan = 0 nchan = sky_cube.size()[0] - for Omega in [0, 20, 40]: - layer = images.GaussConvImage(coords, nchan=nchan, FWHM_maj=0.16, FWHM_min=0.06, Omega=Omega) - - fig, ax = plt.subplots(ncols=2) - - im = ax[0].imshow( - sky_cube[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(sky_cube[chan]) - ax[0].set_title(f"tot flux: {flux:.3f} Jy") - plt.colorbar(im, ax=ax[0]) + FWHM_maj = 0.10 + FWHM_min = 0.05 - c_sky = layer(sky_cube) - im = ax[1].imshow( - c_sky[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(c_sky[chan]) - ax[1].set_title(f"tot flux: {flux:.3f} Jy") + layer = images.GaussConvImage( + coords, nchan=nchan, FWHM_maj=FWHM_maj, FWHM_min=FWHM_min, Omega=Omega + ) + c_sky = layer(sky_cube) + + imgs = [sky_cube[chan], c_sky[chan]] + fluxes = [coords.cell_size**2 * torch.sum(img).item() for img in imgs] + title = [f"tot flux: {flux:.3f} Jy" for flux in fluxes] + + imshow_two( + tmp_path / f"convolved_{Omega:.0f}_deg.png", + imgs, + sky=True, + suptitle=r"Image Plane Gauss Convolution: $\Omega$=" + + f'{Omega}, {FWHM_maj}", {FWHM_min}"', + title=title, + extent=[coords.img_ext], + ) - plt.colorbar(im, ax=ax[1]) - fig.savefig(tmp_path / f"convolved_{Omega:.2f}.png", dpi=300) + assert pytest.approx(fluxes[0], abs=4e-7) == fluxes[1] - plt.close("all") -def test_GaussFourier(packed_cube, coords, tmp_path): +@pytest.mark.parametrize("FWHM", [0.02, 0.1, 0.2, 0.3, 0.5]) +def test_GaussConvFourier(packed_cube, coords, tmp_path, FWHM): chan = 0 + sky_cube = utils.packed_cube_to_sky_cube(packed_cube) - for FWHM in np.linspace(0.02, 0.5, num=10): - fig, ax = plt.subplots(ncols=2) - # put back to sky - sky_cube = utils.packed_cube_to_sky_cube(packed_cube) - im = ax[0].imshow( - sky_cube[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(sky_cube[chan]) - ax[0].set_title(f"tot flux: {flux:.3f} Jy") - plt.colorbar(im, ax=ax[0]) - - # set base resolution - layer = images.GaussConvFourier(coords, FWHM, FWHM) - c = layer(packed_cube) - # put back to sky - c_sky = utils.packed_cube_to_sky_cube(c) - flux = coords.cell_size**2 * torch.sum(c_sky[chan]) - im = ax[1].imshow( - c_sky[chan].detach().numpy(), - extent=coords.img_ext, - origin="lower", - cmap="inferno", - ) - ax[1].set_title(f"tot flux: {flux:.3f} Jy") + layer = images.GaussConvFourier(coords, FWHM, FWHM) + c = layer(packed_cube) + c_sky = utils.packed_cube_to_sky_cube(c) - plt.colorbar(im, ax=ax[1]) - fig.savefig(tmp_path / "convolved_FWHM_{:.2f}.png".format(FWHM), dpi=300) + imgs = [sky_cube[chan], c_sky[chan]] + fluxes = [coords.cell_size**2 * torch.sum(img).item() for img in imgs] + title = [f"tot flux: {flux:.3f} Jy" for flux in fluxes] + + imshow_two( + tmp_path / "convolved_FWHM_{:.2f}.png".format(FWHM), + imgs, + sky=True, + suptitle=f"Fourier Plane Gauss Convolution: FWHM={FWHM}", + title=title, + extent=[coords.img_ext], + ) - plt.close("all") + assert pytest.approx(fluxes[0], abs=4e-7) == fluxes[1] -def test_GaussFourier_rotate(packed_cube, coords, tmp_path): - chan = 0 +@pytest.mark.parametrize("Omega", [0, 15, 30, 45]) +def test_GaussConvFourier_rotate(packed_cube, coords, tmp_path, Omega): + chan = 0 sky_cube = utils.packed_cube_to_sky_cube(packed_cube) - for Omega in [0, 20, 40]: - layer = images.GaussConvFourier( - coords, FWHM_maj=0.16, FWHM_min=0.06, Omega=Omega - ) - - fig, ax = plt.subplots(ncols=2) - - im = ax[0].imshow( - sky_cube[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(sky_cube[chan]) - ax[0].set_title(f"tot flux: {flux:.3f} Jy") - plt.colorbar(im, ax=ax[0]) + FWHM_maj = 0.10 + FWHM_min = 0.05 + layer = images.GaussConvFourier( + coords, FWHM_maj=FWHM_maj, FWHM_min=FWHM_min, Omega=Omega + ) - c_sky = layer(sky_cube) - im = ax[1].imshow( - c_sky[chan], extent=coords.img_ext, origin="lower", cmap="inferno" - ) - flux = coords.cell_size**2 * torch.sum(c_sky[chan]) - ax[1].set_title(f"tot flux: {flux:.3f} Jy") + c = layer(packed_cube) + c_sky = utils.packed_cube_to_sky_cube(c) - plt.colorbar(im, ax=ax[1]) - fig.savefig(tmp_path / f"convolved_{Omega:.2f}.png", dpi=300) + imgs = [sky_cube[chan], c_sky[chan]] + fluxes = [coords.cell_size**2 * torch.sum(img).item() for img in imgs] + title = [f"tot flux: {flux:.3f} Jy" for flux in fluxes] + + imshow_two( + tmp_path / f"convolved_{Omega:.0f}_deg.png", + imgs, + sky=True, + suptitle=r"Fourier Plane Gauss Convolution: $\Omega$=" + + f'{Omega}, {FWHM_maj}", {FWHM_min}"', + title=title, + extent=[coords.img_ext], + ) - plt.close("all") + assert pytest.approx(fluxes[0], abs=4e-7) == fluxes[1] -def test_GaussFourier_point(coords, tmp_path): +def test_GaussConvFourier_point(coords, tmp_path): FWHM = 0.5 # create an image with a point source in the center sky_cube = torch.zeros((1, coords.npix, coords.npix)) - cpix = coords.npix//2 - sky_cube[0,cpix,cpix] = 1.0 + cpix = coords.npix // 2 + sky_cube[0, cpix, cpix] = 1.0 fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True) # put back to sky - im = ax[0].imshow( - sky_cube[0], extent=coords.img_ext, origin="lower", cmap="inferno" - ) + im = ax[0].imshow(sky_cube[0], extent=coords.img_ext, origin="lower") flux = coords.cell_size**2 * torch.sum(sky_cube[0]) ax[0].set_title(f"tot flux: {flux:.3f} Jy") plt.colorbar(im, ax=ax[0]) @@ -401,6 +317,6 @@ def test_GaussFourier_point(coords, tmp_path): ax[1].set_ylim(-r, r) plt.colorbar(im, ax=ax[1]) - fig.savefig(tmp_path / "point_source_FWHM_{:.2f}.png".format(FWHM), dpi=300) + fig.savefig(tmp_path / "point_source_FWHM_{:.2f}.png".format(FWHM)) plt.close("all") diff --git a/test/plot_utils.py b/test/plot_utils.py new file mode 100644 index 00000000..39294f7a --- /dev/null +++ b/test/plot_utils.py @@ -0,0 +1,144 @@ +import matplotlib as mpl +import matplotlib.pyplot as plt +import torch +import numpy as np + + +def extend_list(l, num=2): + """ + Duplicate or extend a list to two items. + + l: list + the list of items to potentially duplicate or truncate. + num: int + the final length of the list + + Returns + ------- + list + Length num list of items. + + Examples + -------- + >>> extend_list(["L Plot", "R Plot"]) + ["L Plot", "R Plot"] + >>> extend_list({["Plot"]) # both L and R will have "Plot" + ["Plot", "Plot"] + >>> extend_list({["L Plot", "R Plot", "Z Plot"]}) # "Z Plot" is ignored + ["L Plot", "R Plot"] + """ + if len(l) == 1: + return num * l + else: + return l[:num] + +def extend_kwargs(kwargs): + """ + This is a helper routine for imshow_two, designed to flexibly consume a variety + of options for each of the two plots. + + kwargs: dict + the kwargs dict provided from the function call + + Returns + ------- + dict + Updated kwargs with length 2 lists of items. + """ + + for key, item in kwargs.items(): + kwargs[key] = extend_list(item) + +def imshow_two(path, imgs, sky=False, suptitle=None, **kwargs): + """Plot two images side by side, with scalebars. + + imgs is a list + Parameters + ---------- + path : string + path and filename to save figure + imgs : list + length-2 list of images to plot. Arguments are designed to be very permissive. If the image is a PyTorch tensor, the routine converts it to numpy, and then numpy.squeeze is called. + sky: bool + If True, treat images as sky plots and label with offset arcseconds. + title: list + if provided, list of strings corresponding to title for each subplot. If only one provided, + xlabel: list + if provided, list of strings + + + Returns + ------- + None + """ + + xx = 7.5 # in + rmargin = 0.8 + lmargin = 0.8 + tmargin = 0.3 if suptitle is None else 0.5 + bmargin = 0.5 + middle_sep = 1.3 + ax_width = (xx - rmargin - lmargin - middle_sep) / 2 + ax_height = ax_width + cax_width = 0.1 + cax_sep = 0.15 + cax_height = ax_height + yy = bmargin + ax_height + tmargin + + with mpl.rc_context({'figure.autolayout': False}): + fig = plt.figure(figsize=(xx, yy)) + + ax = [] + cax = [] + + extend_kwargs(kwargs) + + if "extent" not in kwargs: + kwargs["extent"] = [None, None] + + for i in [0, 1]: + a = fig.add_axes( + [ + (lmargin + i * (ax_width + middle_sep)) / xx, + bmargin / yy, + ax_width / xx, + ax_height / yy, + ] + ) + ax.append(a) + + ca = fig.add_axes( + ( + [ + (lmargin + (i + 1) * ax_width + i * middle_sep + cax_sep) / xx, + bmargin / yy, + cax_width / xx, + cax_height / yy, + ] + ) + ) + cax.append(ca) + + img = imgs[i] + img = img.detach().numpy() if torch.is_tensor(img) else img + + im = a.imshow(np.squeeze(img), origin="lower", interpolation="none", extent=kwargs["extent"][i]) + plt.colorbar(im, cax=ca) + + if "title" in kwargs: + a.set_title(kwargs["title"][i]) + + if sky: + a.set_xlabel(r"$\Delta \alpha\ \cos \delta\;[{}^{\prime\prime}]$") + a.set_ylabel(r"$\Delta \delta\;[{}^{\prime\prime}]$") + else: + if "xlabel" in kwargs: + a.set_xlabel(kwargs["xlabel"][i]) + + if "ylabel" in kwargs: + a.set_ylabel(kwargs["ylabel"][i]) + + if suptitle is not None: + fig.suptitle(suptitle) + fig.savefig(path) + plt.close("all")