From 0d4fc0c42d9a000326ecfc0461ba689130c797a2 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Sat, 11 May 2024 20:52:32 +0100 Subject: [PATCH 01/15] lowered GaussConvCube threshold. --- src/mpol/images.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/mpol/images.py b/src/mpol/images.py index 9e6b1310..c6d48bf9 100644 --- a/src/mpol/images.py +++ b/src/mpol/images.py @@ -366,7 +366,7 @@ def __init__( # store taper to register so it transfers to GPU self.register_buffer("taper_2D", torch.tensor(taper_2D, dtype=torch.float32)) - def forward(self, packed_cube): + def forward(self, packed_cube, thresh=1e-6): r""" Convolve a packed_cube image with a 2D Gaussian PSF. Operation is carried out in the Fourier domain using a Gaussian taper. @@ -400,7 +400,6 @@ def forward(self, packed_cube): convolved_packed_cube = torch.fft.ifftn(tapered_vis, dim=(1, 2)) # assert imaginaries are effectively zero, otherwise something went wrong - thresh = 1e-7 assert ( torch.max(convolved_packed_cube.imag) < thresh ), "Round-tripped image contains max imaginary value {:} > {:} threshold, something may be amiss.".format( From 8612192a90f751805a633261c98331357ec1d2e0 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Thu, 7 Nov 2024 07:30:12 +0000 Subject: [PATCH 02/15] refactored plotting routine for nufft accuracy. --- test/fourier_test.py | 250 ++++++++++++++----------------------------- 1 file changed, 81 insertions(+), 169 deletions(-) diff --git a/test/fourier_test.py b/test/fourier_test.py index e3ef6932..5d95d6e3 100644 --- a/test/fourier_test.py +++ b/test/fourier_test.py @@ -4,13 +4,8 @@ from mpol import fourier, utils from pytest import approx - -def test_fourier_cube(coords, tmp_path): - # test image packing - # test whether we get the same Fourier Transform using the FFT as we could - # calculate analytically - - kw = { +# parameters for analytic Gaussian used for all tests +gauss_kw = { "a": 1, "delta_x": 0.02, # arcsec "delta_y": -0.01, @@ -19,8 +14,14 @@ def test_fourier_cube(coords, tmp_path): "Omega": 20, # degrees } +def test_fourier_cube(coords, tmp_path): + # test image packing + # test whether we get the same Fourier Transform using the FFT as we could + # calculate analytically + + img_packed = utils.sky_gaussian_arcsec( - coords.packed_x_centers_2D, coords.packed_y_centers_2D, **kw + coords.packed_x_centers_2D, coords.packed_y_centers_2D, **gauss_kw ) # calculated the packed FFT using the FourierLayer @@ -31,7 +32,7 @@ def test_fourier_cube(coords, tmp_path): # calculate the analytical FFT fourier_packed_an = utils.fourier_gaussian_lambda_arcsec( - coords.packed_u_centers_2D, coords.packed_v_centers_2D, **kw + coords.packed_u_centers_2D, coords.packed_v_centers_2D, **gauss_kw ) ikw = {"origin": "lower"} @@ -70,17 +71,8 @@ def test_fourier_cube(coords, tmp_path): def test_fourier_cube_grad(coords): # Test that we can calculate a gradient on a loss function using the Fourier layer - kw = { - "a": 1, - "delta_x": 0.02, # arcsec - "delta_y": -0.01, - "sigma_x": 0.02, - "sigma_y": 0.01, - "Omega": 20, # degrees - } - img_packed = utils.sky_gaussian_arcsec( - coords.packed_x_centers_2D, coords.packed_y_centers_2D, **kw + coords.packed_x_centers_2D, coords.packed_y_centers_2D, **gauss_kw ) # calculated the packed FFT using the FourierLayer @@ -218,54 +210,15 @@ def test_nufft_cached_predict_GPU(coords, baselines_1D): np.zeros((nchan, len(uu)), dtype=np.complex128) ) +def plot_nufft_comparison(uu, vv, an_output, num_output, path): + """Plot and save a figure comparing the analytic and numerical FT points. + """ -def test_nufft_accuracy_single_chan(coords, baselines_1D, tmp_path): - # create a single-channel ImageCube using a function we know the true FT analytically - # use NuFFT to FT and sample that image - # assert that the NuFFT samples and the analytic FT samples are close - - # load some data - uu, vv = baselines_1D - nchan = 1 - - # create a NuFFT layer to perform interpolations to these points - layer = fourier.NuFFT(coords=coords, nchan=nchan) - - # a sky Gaussian - kw = { - "a": 1, - "delta_x": 0.02, # arcsec - "delta_y": -0.01, - "sigma_x": 0.02, - "sigma_y": 0.01, - "Omega": 20, # degrees - } - - img_packed = utils.sky_gaussian_arcsec( - coords.packed_x_centers_2D, coords.packed_y_centers_2D, **kw - ) - img_packed_tensor = torch.tensor(img_packed[np.newaxis, :, :], requires_grad=True) - - # use the NuFFT to predict the values of the cube at the u,v locations - num_output = layer(img_packed_tensor, uu, vv)[0] # take the channel dim out - - # calculate the values analytically - an_output = utils.fourier_gaussian_lambda_arcsec(uu, vv, **kw) + qq = utils.torch2npy(torch.hypot(uu, vv)) * 1e-6 - # find max difference diff = num_output - an_output - max_diff = torch.max(torch.abs(diff)) - max = torch.max(torch.abs(num_output)) - print(max_diff, max) - - # collapse the function into 1D by doing q - qq = utils.torch2npy(torch.hypot(uu, vv)) - - # convert to numpy for plotting - num_output = utils.torch2npy(num_output) - diff = utils.torch2npy(diff) - - fig, ax = plt.subplots(nrows=4, sharex=True) + + fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(7,5)) ax[0].scatter(qq, an_output.real, s=3, label="analytic") ax[0].scatter(qq, num_output.real, s=1, label="NuFFT") ax[0].set_ylabel("Real") @@ -280,12 +233,43 @@ def test_nufft_accuracy_single_chan(coords, baselines_1D, tmp_path): ax[3].scatter(qq, diff.imag, s=1, c="k") ax[3].set_ylabel("diff Imag") - ax[3].set_xlabel(r"$q$ [k lambda]") + ax[3].set_xlabel(r"$q$ [M$\lambda$]") + fig.subplots_adjust(hspace=0.2, left=0.15, right=0.85, top=0.92) fig.suptitle("NuFFT Accuracy compared to analytic") - fig.savefig(tmp_path / "nufft_comparison.png", dpi=300) + fig.savefig(path, dpi=300) + - # should be < 2.5e-6, based on plot + +def test_nufft_accuracy_single_chan(coords, baselines_1D, tmp_path): + """Create a single-channel ImageCube using an analytic function for which we know + the true FT. + Then use the NuFFT to FT and sample that image. + Plot both and their difference. + Assert that the NuFFT samples and the analytic FT samples are close. + """ + + uu, vv = baselines_1D + + # NuFFT layer to perform interpolations to these points + layer = fourier.NuFFT(coords=coords, nchan=1) + + img_packed = utils.sky_gaussian_arcsec( + coords.packed_x_centers_2D, coords.packed_y_centers_2D, **gauss_kw + ) + img_packed_tensor = torch.tensor(img_packed[np.newaxis, :, :], requires_grad=True) + + # use the NuFFT to predict the values of the cube at the u,v locations + num_output = layer(img_packed_tensor, uu, vv)[0] # take the channel dim out + num_output = utils.torch2npy(num_output) + + # calculate the values analytically + an_output = utils.fourier_gaussian_lambda_arcsec(uu, vv, **gauss_kw) + an_output = utils.torch2npy(an_output) + + plot_nufft_comparison(uu, vv, an_output, num_output, tmp_path / "nufft_comparison.png") + + # threshold based on visual inspection of plot assert num_output == approx(an_output, abs=2.5e-6) @@ -296,70 +280,30 @@ def test_nufft_cached_accuracy_single_chan(coords, baselines_1D, tmp_path): # load some data uu, vv = baselines_1D - nchan = 1 # create a NuFFT layer to perform interpolations to these points - layer = fourier.NuFFTCached(coords=coords, nchan=nchan, uu=uu, vv=vv) - - # a sky Gaussian - kw = { - "a": 1, - "delta_x": 0.02, # arcsec - "delta_y": -0.01, - "sigma_x": 0.02, - "sigma_y": 0.01, - "Omega": 20, # degrees - } + layer = fourier.NuFFTCached(coords=coords, nchan=1, uu=uu, vv=vv) img_packed = utils.sky_gaussian_arcsec( - coords.packed_x_centers_2D, coords.packed_y_centers_2D, **kw + coords.packed_x_centers_2D, coords.packed_y_centers_2D, **gauss_kw ) img_packed_tensor = torch.tensor(img_packed[np.newaxis, :, :], requires_grad=True, dtype=torch.float32) # use the NuFFT to predict the values of the cube at the u,v locations num_output = layer(img_packed_tensor)[0] # take the channel dim out - - # calculate the values analytically - an_output = utils.fourier_gaussian_lambda_arcsec(uu, vv, **kw) - - # find max difference - diff = num_output - an_output - max_diff = torch.max(torch.abs(diff)) - max = torch.max(torch.abs(num_output)) - print(max_diff, max) - - # collapse the function into 1D by doing q - qq = utils.torch2npy(torch.hypot(uu, vv)) - - # convert to numpy for plotting num_output = utils.torch2npy(num_output) - diff = utils.torch2npy(diff) - - fig, ax = plt.subplots(nrows=4, sharex=True) - ax[0].scatter(qq, an_output.real, s=3, label="analytic") - ax[0].scatter(qq, num_output.real, s=1, label="NuFFT") - ax[0].set_ylabel("Real") - ax[0].legend() - ax[1].scatter(qq, diff.real, s=1, c="k") - ax[1].set_ylabel("diff Real") - - ax[2].scatter(qq, an_output.imag, s=3) - ax[2].scatter(qq, num_output.imag, s=1) - ax[2].set_ylabel("Imag") - - ax[3].scatter(qq, diff.imag, s=1, c="k") - ax[3].set_ylabel("diff Imag") - ax[3].set_xlabel(r"$q$ [k lambda]") + # calculate the values analytically + an_output = utils.fourier_gaussian_lambda_arcsec(uu, vv, **gauss_kw) + an_output = utils.torch2npy(an_output) - fig.suptitle("NuFFT Accuracy compared to analytic") - fig.savefig(tmp_path / "nufft_cached_comparison.png", dpi=300) + plot_nufft_comparison(uu, vv, an_output, num_output, tmp_path / "nufft_cached_comparison.png") - # should be < 2e-8, based on plot + # threshold based on visual inspection of plot assert num_output == approx(an_output, abs=2e-8) -def test_nufft_cached_accuracy_coil_broadcast(coords, baselines_1D): +def test_nufft_cached_accuracy_coil_broadcast(coords, baselines_1D, tmp_path): # create a multi-channel ImageCube using a function we know the true FT analytically # use NuFFT to FT and sample that image # assert that the NuFFT samples and the analytic FT samples are close @@ -373,18 +317,8 @@ def test_nufft_cached_accuracy_coil_broadcast(coords, baselines_1D): # this should use the coil dimension of NuFFT to do the broadcasting layer = fourier.NuFFTCached(coords=coords, nchan=nchan, uu=uu, vv=vv) - # a sky Gaussian - kw = { - "a": 1, - "delta_x": 0.02, # arcsec - "delta_y": -0.01, - "sigma_x": 0.02, - "sigma_y": 0.01, - "Omega": 20, # degrees - } - img_packed = utils.sky_gaussian_arcsec( - coords.packed_x_centers_2D, coords.packed_y_centers_2D, **kw + coords.packed_x_centers_2D, coords.packed_y_centers_2D, **gauss_kw ) # broadcast to 5 channels -- the image will be the same for each @@ -394,14 +328,23 @@ def test_nufft_cached_accuracy_coil_broadcast(coords, baselines_1D): ) # use the NuFFT to predict the values of the cube at the u,v locations - num_output = layer(img_packed_tensor).detach().numpy() + num_output = layer(img_packed_tensor) + num_output = utils.torch2npy(num_output) + + # plot a single channel, to check + ichan = 1 # calculate the values analytically, for a single channel - an_output = utils.fourier_gaussian_lambda_arcsec(uu, vv, **kw) + an_output = utils.fourier_gaussian_lambda_arcsec(uu, vv, **gauss_kw) + an_output = utils.torch2npy(an_output) + + plot_nufft_comparison( + uu, vv, an_output, num_output[ichan], tmp_path / "nufft_cached_comparison.png" + ) # loop through each channel and assert that things are the same for i in range(nchan): - # should be < 2e-8, based on plot for single-channel + # threshold based on visual inspection of plot for single channel assert num_output[i] == approx(an_output, abs=2e-8) @@ -421,18 +364,8 @@ def test_nufft_cached_accuracy_batch_broadcast(coords, baselines_2D_t, tmp_path) coords=coords, nchan=nchan, uu=uu, vv=vv, sparse_matrices=False ) - # a sky Gaussian - kw = { - "a": 1, - "delta_x": 0.02, # arcsec - "delta_y": -0.01, - "sigma_x": 0.02, - "sigma_y": 0.01, - "Omega": 20, # degrees - } - img_packed = utils.sky_gaussian_arcsec( - coords.packed_x_centers_2D, coords.packed_y_centers_2D, **kw + coords.packed_x_centers_2D, coords.packed_y_centers_2D, **gauss_kw ) # broadcast to all channels -- the image will be the same for each @@ -443,44 +376,23 @@ def test_nufft_cached_accuracy_batch_broadcast(coords, baselines_2D_t, tmp_path) # use the NuFFT to predict the values of the cube at the u,v locations num_output = layer(img_packed_tensor) + num_output = utils.torch2npy(num_output) # plot a single channel, to check ichan = 1 - an_output = utils.fourier_gaussian_lambda_arcsec(uu[ichan], vv[ichan], **kw) - - diff = num_output[ichan] - an_output + an_output = utils.fourier_gaussian_lambda_arcsec(uu[ichan], vv[ichan], **gauss_kw) + an_output = utils.torch2npy(an_output) - # convert for plotting - qq = utils.torch2npy(torch.hypot(uu[ichan], vv[ichan])) - num_output = utils.torch2npy(num_output) - diff = utils.torch2npy(diff) - - fig, ax = plt.subplots(nrows=4, sharex=True) - ax[0].scatter(qq, an_output.real, s=3, label="analytic") - ax[0].scatter(qq, num_output[ichan].real, s=1, label="NuFFT") - ax[0].set_ylabel("Real") - ax[0].legend() - - ax[1].scatter(qq, diff.real, s=1, c="k") - ax[1].set_ylabel("diff Real") - - ax[2].scatter(qq, an_output.imag, s=3) - ax[2].scatter(qq, num_output[ichan].imag, s=1) - ax[2].set_ylabel("Imag") - - ax[3].scatter(qq, diff.imag, s=1, c="k") - ax[3].set_ylabel("diff Imag") - ax[3].set_xlabel(r"$q$ [k lambda]") - - fig.suptitle("NuFFT Accuracy compared to analytic") - fig.savefig(tmp_path / "nufft_comparison.png", dpi=300) + plot_nufft_comparison( + uu[ichan], vv[ichan], an_output, num_output[ichan], tmp_path / "nufft_cached_comparison.png" + ) # loop through each channel and assert that things are the same for i in range(nchan): # calculate the values analytically for this channel - an_output = utils.fourier_gaussian_lambda_arcsec(uu[i], vv[i], **kw) + an_output = utils.fourier_gaussian_lambda_arcsec(uu[i], vv[i], **gauss_kw) # using table-based interpolation, so the accuracy bar is lower - # should be < 3e-6, based on plot for single-channel + # threshold based on visual inspection of plot of single channel assert num_output[i] == approx(an_output, abs=3e-6) From 31453f8a3ec29d159e72b39d52eea6c0fee20190 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Thu, 7 Nov 2024 08:08:22 +0000 Subject: [PATCH 03/15] bumping python version. --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 358f569e..657fe2d6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.13" - name: Install dependencies needed to download files # we're just installing mpol here to reference the zenodo record number # in __init__. below we'll reinstall for the tests. @@ -40,7 +40,7 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: ["3.10", "3.11"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} From e1e26ecfcfa63b041115099d49bba120c848f82e Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Thu, 7 Nov 2024 08:10:42 +0000 Subject: [PATCH 04/15] try dl on py3.12 --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 657fe2d6..05b6d5be 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.13" + python-version: "3.12" - name: Install dependencies needed to download files # we're just installing mpol here to reference the zenodo record number # in __init__. below we'll reinstall for the tests. From c02d56a0109e26324da50e5e2465b0f420b432eb Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Thu, 7 Nov 2024 08:14:09 +0000 Subject: [PATCH 05/15] not ready for 3.13 --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 05b6d5be..55d02c06 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -40,7 +40,7 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: ["3.10", "3.11", "3.12", "3.13"] + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} From 47d1328c6923494a5df63ee971762c57c80a4a29 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Sun, 1 Dec 2024 21:37:57 +0000 Subject: [PATCH 06/15] commenting out failing http test --- test/input_output_test.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/input_output_test.py b/test/input_output_test.py index 31821534..78f6370f 100644 --- a/test/input_output_test.py +++ b/test/input_output_test.py @@ -3,14 +3,14 @@ from mpol.input_output import ProcessFitsImage -def test_ProcessFitsImage(): - # get a .fits file produced with casa - fname = download_file( - "https://zenodo.org/record/4711811/files/logo_cube.tclean.fits", - cache=True, - show_progress=True, - pkgname="mpol", - ) +# def test_ProcessFitsImage(): +# # get a .fits file produced with casa +# fname = download_file( +# "https://zenodo.org/record/4711811/files/logo_cube.tclean.fits", +# cache=True, +# show_progress=True, +# pkgname="mpol", +# ) - fits_image = ProcessFitsImage(fname) - clean_im, clean_im_ext, clean_beam = fits_image.get_image(beam=True) +# fits_image = ProcessFitsImage(fname) +# clean_im, clean_im_ext, clean_beam = fits_image.get_image(beam=True) From 8c733b252b36aaa8b32c00c64ba4961b75d7d16f Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Sun, 1 Dec 2024 21:40:35 +0000 Subject: [PATCH 07/15] see if we can get away with removing dl files. --- .github/workflows/tests.yml | 38 ------------------------------------- 1 file changed, 38 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 55d02c06..6a111039 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -6,36 +6,7 @@ on: pull_request: jobs: - dl_files: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.12" - - name: Install dependencies needed to download files - # we're just installing mpol here to reference the zenodo record number - # in __init__. below we'll reinstall for the tests. - run: | - pip install astropy - pip install frank - pip install . - - name: Cache/Restore the .mpol folder cache - uses: actions/cache@v3 - env: - cache-name: cache-mpol-dls - with: - # files are stored in .mpol - path: ~/.mpol - # the "key" is the hash of the download script - key: ${{ hashFiles('docs/download_external_files.py') }} - - name: Download large files - run: | - python3 docs/download_external_files.py - tests: - needs: dl_files # don't bother running if we didn't succeed getting the files runs-on: ubuntu-20.04 strategy: max-parallel: 4 @@ -71,15 +42,6 @@ jobs: - name: Check types with MyPy run: | mypy src/mpol --pretty - - name: Cache/Restore the .mpol folder cache - uses: actions/cache@v3 - env: - cache-name: cache-mpol-dls - with: - # files are stored in .mpol - path: ~/.mpol - # the "key" is the hash of the download script - key: ${{ hashFiles('docs/download_external_files.py') }} - name: Run tests with coverage run: | pytest --cov=mpol From f339b3f80663a731a46d765499c6d33f4732b5d2 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Sun, 1 Dec 2024 21:44:53 +0000 Subject: [PATCH 08/15] remove unused import ruff fail. --- test/input_output_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/input_output_test.py b/test/input_output_test.py index 78f6370f..3beb71dd 100644 --- a/test/input_output_test.py +++ b/test/input_output_test.py @@ -1,6 +1,6 @@ -from astropy.utils.data import download_file -from mpol.input_output import ProcessFitsImage +# from astropy.utils.data import download_file +# from mpol.input_output import ProcessFitsImage # def test_ProcessFitsImage(): From 61dbd82d9e7681f06b091f89320bade9bd5ad900 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 06:43:53 +0000 Subject: [PATCH 09/15] bumping checkout v and ubuntu --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6a111039..5bacceb9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -7,13 +7,13 @@ on: jobs: tests: - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 strategy: max-parallel: 4 matrix: python-version: ["3.10", "3.11", "3.12"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: From f0729b04eaf0c9ab5c36602362c1a1b3d5e7211b Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 06:55:10 +0000 Subject: [PATCH 10/15] use updated python action. --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5bacceb9..4dde257d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} # cache the Python environment, including installed dependencies From a17f398ff351fbbfb07ec0b3bb99e5dbcdc6e560 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 07:05:27 +0000 Subject: [PATCH 11/15] bumping cache version, too. --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4dde257d..41d5ba5c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -21,7 +21,7 @@ jobs: # cache the Python environment, including installed dependencies # (unique to each python-version; speeds up tests more than caching pip cache) - name: Cache/Restore the Python env - uses: actions/cache@v3 + uses: actions/cache@v4 env: cache-name: cache-python${{ matrix.python-version }}-env with: From cc0822ae300c2109e38fbaaf8756185c6162250b Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 07:16:13 +0000 Subject: [PATCH 12/15] removing cache to see if we can fix ssl. --- .github/workflows/tests.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 41d5ba5c..d1f3c1e5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,13 +20,13 @@ jobs: python-version: ${{ matrix.python-version }} # cache the Python environment, including installed dependencies # (unique to each python-version; speeds up tests more than caching pip cache) - - name: Cache/Restore the Python env - uses: actions/cache@v4 - env: - cache-name: cache-python${{ matrix.python-version }}-env - with: - path: ${{ env.pythonLocation }} - key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }} + # - name: Cache/Restore the Python env + # uses: actions/cache@v4 + # env: + # cache-name: cache-python${{ matrix.python-version }}-env + # with: + # path: ${{ env.pythonLocation }} + # key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }} - name: Install pip run: | pip install --upgrade pip From b083d5220263cf5d8e37b22cb6608e70948a0331 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 07:41:49 +0000 Subject: [PATCH 13/15] added in 3.13 --- .github/workflows/tests.yml | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d1f3c1e5..149df380 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,22 +11,13 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: ["3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - # cache the Python environment, including installed dependencies - # (unique to each python-version; speeds up tests more than caching pip cache) - # - name: Cache/Restore the Python env - # uses: actions/cache@v4 - # env: - # cache-name: cache-python${{ matrix.python-version }}-env - # with: - # path: ${{ env.pythonLocation }} - # key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }} - name: Install pip run: | pip install --upgrade pip From eb1365430d7148a35a0e958fa2a67788761811c1 Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 09:57:42 +0000 Subject: [PATCH 14/15] make 3.13 experimental. --- .github/workflows/tests.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 149df380..dc359650 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -8,10 +8,15 @@ on: jobs: tests: runs-on: ubuntu-24.04 + continue-on-error: ${{ matrix.experimental }} strategy: - max-parallel: 4 + fail-fast: true matrix: - python-version: ["3.10", "3.11", "3.12", "3.13"] + python-version: ["3.10", "3.11", "3.12"] + experimental: [false] + include: + - version: "3.13" + experimental: true steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} From caf9f2af6b6bb5118c52a0e1a7090a63f8d6b03f Mon Sep 17 00:00:00 2001 From: Ian Czekala Date: Mon, 2 Dec 2024 10:03:42 +0000 Subject: [PATCH 15/15] updating to python-version. --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index dc359650..3bbc0a7a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,7 +15,7 @@ jobs: python-version: ["3.10", "3.11", "3.12"] experimental: [false] include: - - version: "3.13" + - python-version: "3.13" experimental: true steps: - uses: actions/checkout@v4