Skip to content

Commit

Permalink
Merge pull request #9 from jmbhughes/make-ruff-pass
Browse files Browse the repository at this point in the history
Make ruff pass
  • Loading branch information
jmbhughes authored Mar 1, 2024
2 parents 25a5b3b + 61492e3 commit b3ca891
Show file tree
Hide file tree
Showing 32 changed files with 3,477 additions and 1,783 deletions.
14 changes: 6 additions & 8 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,9 @@ jobs:
- name: Test with pytest
run: |
pip install .
python setup.py pytest --addopts "--cov ."
# - name: Upload coverage to Codecov
# uses: codecov/codecov-action@v4
# env:
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
# with:
# fail_ci_if_error: true
# verbose: true
coverage run --branch -m pytest tests
- name: Upload coverage reports to Codecov
uses: codecov/[email protected]
with:
token: ${{ secrets.CODECOV_TOKEN }}
slug: jmbhughes/overlappogram
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
Overlappogram is a Python package for inverting overlappogram observations of the Sun, for examples MaGIXS observations
or ECCCO observations.

This code was originally written by Dyana Beabout.

## How to Use

`python run_multiion_inversion.py ./path/to/image.fits ./path/to/config.toml`
Expand Down
2 changes: 1 addition & 1 deletion configs/old/img_norm.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ rhos = [1.0]
# Inversion Time = 75.56018900871277 # my no shared memory thread pool executor with 11 max workers
# Inversion Time = 77.73930597305298 # my process pool executor with copy on write for global with 11 max workers
# Inversion Time = 88.20389604568481 # same as above with numpy having 2 threads
# Inversion Time = 42.56954908370972 # same as above wtih numpy having 1 thread and selection='cyclic'
# Inversion Time = 42.56954908370972 # same as above with numpy having 1 thread and selection='cyclic'

# changing alpha can dramatically slow things
# set alpha = 0.1 then Inversion Time = 1208.5097889900208
Expand Down
17 changes: 8 additions & 9 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,22 @@
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information

project = 'overlappogram'
copyright = '2024, Dyana Beabout, J. Marcus Hughes'
author = 'Dyana Beabout, J. Marcus Hughes'
release = '0.0.1'
project = "overlappogram"
copyright = "2024, Dyana Beabout, J. Marcus Hughes"
author = "Dyana Beabout, J. Marcus Hughes"
release = "0.0.1"

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

extensions = []

templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']

templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]


# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output

html_theme = 'alabaster'
html_static_path = ['_static']
html_theme = "alabaster"
html_static_path = ["_static"]
169 changes: 98 additions & 71 deletions experiment_scripts/ECCCO_multiion_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@

import numpy as np
from astropy.io import fits
#from overlappogram.inversion_field_angles_logts_ions import Inversion
# from overlappogram.inversion_field_angles_logts_ions import Inversion
from sklearn.linear_model import ElasticNet as enet

from magixs_data_products import MaGIXSDataProducts
from overlappogram.elasticnet_model import ElasticNetModel as model
from overlappogram.inversion_field_angles import Inversion

'''def calculate_weights(data, weights, sig_read, exp_time):
"""def calculate_weights(data, weights, sig_read, exp_time):
# Read image
image_hdul = fits.open(data)
image = image_hdul[0].data
Expand All @@ -31,90 +31,107 @@
hdu = fits.PrimaryHDU(data = sample_weights)
hdulist = fits.HDUList([hdu])
hdulist.writeto(sample_weight_file, overwrite=True)
return sample_weight_file'''

if __name__ == '__main__':

channel = 'all'
abund='feldman'
fwhm='0'
#psfs=[2,3,4,5]
#psfs=[1,2,3,4,5]
#psfs=[3,5]
psfs=[4]
return sample_weight_file"""

if __name__ == "__main__":
channel = "all"
abund = "feldman"
fwhm = "0"
# psfs=[2,3,4,5]
# psfs=[1,2,3,4,5]
# psfs=[3,5]
psfs = [4]
for psf in psfs:
mdp = MaGIXSDataProducts()
# PSA for magixs1

# Response function file path.
response_dir = "data/"

mdp=MaGIXSDataProducts()
#PSA for magixs1

# Response function file path.
response_dir ='data/'

# Response file.
#cube_file = response_dir + 'eccco_is_response_feldman_m_el_with_tables_lw_pm1230_'+str(psf)+'pix.fits'
# Response file.
# cube_file = response_dir + 'eccco_is_response_feldman_m_el_with_tables_lw_pm1230_'+str(psf)+'pix.fits'
# cube_file = response_dir +'D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_slw_coopersun.fits'
cube_file = response_dir + 'D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_lw_coopersun.fits'
#cube_file = response_dir + "response_img_normalized.fits"
#cube_file = response_dir + 'D1Aug2023_eccco_response_feldman_m_el_with_tables_lw.fits'
#cube_file = response_dir + 'D14Feb2024_eccco_response_feldman_m_el_with_tables_lw.fits'

#weight_file = response_dir + 'oawave_eccco_is_lw.txt'

#Data directory and data file
data_dir ='data/'
# summed_image = data_dir + 'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el_decon.fits'
summed_image = data_dir+'eccco_is_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el.fits'
#summed_img = data_dir+'forwardmodel_img_normalized.fits'
sample_weights_data = data_dir +'eccco_is_lw_forwardmodel_sample_weights_psf'+str(psf)+'pix_el.fits'
#sample_weights_data = data_dir + 'weights_img_normalized.fits'
#summed_image = data_dir+'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el.fits'
#sample_weights_data = data_dir +'eccco_lw_forwardmodel_sample_weights_psf'+str(psf)+'pix_el.fits'

#The inversion directory is where the output will be written
inversion_dir = 'output/'

#Read in response,
cube_file = (
response_dir
+ "D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_lw_coopersun.fits"
)
# cube_file = response_dir + "response_img_normalized.fits"
# cube_file = response_dir + 'D1Aug2023_eccco_response_feldman_m_el_with_tables_lw.fits'
# cube_file = response_dir + 'D14Feb2024_eccco_response_feldman_m_el_with_tables_lw.fits'

# weight_file = response_dir + 'oawave_eccco_is_lw.txt'

# Data directory and data file
data_dir = "data/"
# summed_image = data_dir + 'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el_decon.fits'
summed_image = (
data_dir
+ "eccco_is_lw_forwardmodel_thermal_response_psf"
+ str(psf)
+ "pix_el.fits"
)
# summed_img = data_dir+'forwardmodel_img_normalized.fits'
sample_weights_data = (
data_dir
+ "eccco_is_lw_forwardmodel_sample_weights_psf"
+ str(psf)
+ "pix_el.fits"
)
# sample_weights_data = data_dir + 'weights_img_normalized.fits'
# summed_image = data_dir+'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el.fits'
# sample_weights_data = data_dir +'eccco_lw_forwardmodel_sample_weights_psf'+str(psf)+'pix_el.fits'

# The inversion directory is where the output will be written
inversion_dir = "output/"

# Read in response,

rsp_func_hdul = fits.open(cube_file)

solution_fov_width = 2
detector_row_range = [450, 455]
#detector_row_range = None
# detector_row_range = None
field_angle_range = [-2160, 2160]
#field_angle_range = [-1260,1260]
# field_angle_range = [-1260,1260]
# field_angle_range = None

rsp_dep_name = 'logt'
rsp_dep_name = "logt"
rsp_dep_list = np.round((np.arange(57, 78, 1) / 10.0), decimals=1)

#smooth_over = 'spatial'
smooth_over = 'dependence'

inversion = Inversion(rsp_func_cube_file=cube_file,
rsp_dep_name=rsp_dep_name, rsp_dep_list=rsp_dep_list,
solution_fov_width=solution_fov_width,smooth_over=smooth_over,field_angle_range=field_angle_range)

#inversion.initialize_input_data(summed_image)#,image_mask_file)
#sample_weights_data = calculate_weights(summed_image, weight_file, 8., 1.)
#print(sample_weights_data)
#syntax (summed image, mask image, sample weights image)
# smooth_over = 'spatial'
smooth_over = "dependence"

inversion = Inversion(
rsp_func_cube_file=cube_file,
rsp_dep_name=rsp_dep_name,
rsp_dep_list=rsp_dep_list,
solution_fov_width=solution_fov_width,
smooth_over=smooth_over,
field_angle_range=field_angle_range,
)

# inversion.initialize_input_data(summed_image)#,image_mask_file)
# sample_weights_data = calculate_weights(summed_image, weight_file, 8., 1.)
# print(sample_weights_data)
# syntax (summed image, mask image, sample weights image)
inversion.initialize_input_data(summed_image, None, sample_weights_data)

#new forweights
# new forweights
# (photon convert file name:str, sigma read:float,exptime:float)
#error_parameters=(response_dir + 'oawave_eccco_is_lw.txt',8.,1.)
# error_parameters=(response_dir + 'oawave_eccco_is_lw.txt',8.,1.)
alphas = [5]
rhos = [.1]
rhos = [0.1]
for rho in rhos:
for alpha in alphas:
enet_model = enet(alpha=alpha,
l1_ratio=rho,
max_iter=100000,
precompute=True,
positive=True,
fit_intercept=False,
selection='cyclic')
enet_model = enet(
alpha=alpha,
l1_ratio=rho,
max_iter=100000,
precompute=True,
positive=True,
fit_intercept=False,
selection="cyclic",
)
inv_model = model(enet_model)

# regressor = SGDRegressor(penalty='elasticnet', alpha=alpha, l1_ratio=rho, fit_intercept=False)
Expand All @@ -126,10 +143,20 @@
basename = os.path.splitext(os.path.basename(summed_image))[0]

start = time.time()
inversion.multiprocessing_invert(inv_model, inversion_dir, output_file_prefix=basename,
#inversion.invert(inv_model, inversion_dir, output_file_prefix=basename,
output_file_postfix='x'+str(solution_fov_width)+'_'+str(rho*10)+'_'+str(alpha)+'_wpsf' ,
detector_row_range=detector_row_range,
score=False)
inversion.multiprocessing_invert(
inv_model,
inversion_dir,
output_file_prefix=basename,
# inversion.invert(inv_model, inversion_dir, output_file_prefix=basename,
output_file_postfix="x"
+ str(solution_fov_width)
+ "_"
+ str(rho * 10)
+ "_"
+ str(alpha)
+ "_wpsf",
detector_row_range=detector_row_range,
score=False,
)
end = time.time()
print("Inversion Time =", end - start)
36 changes: 22 additions & 14 deletions experiment_scripts/compare_spectrally_pure.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,19 @@
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from scipy.signal import convolve2d
from scipy.ndimage import gaussian_filter

output_dir = "output/photons/plots/"
ground_truth_path = "output/photons/ground_truth_spectrally_pure_data_cube_reshaped.fits"
ground_truth_path = (
"output/photons/ground_truth_spectrally_pure_data_cube_reshaped.fits"
)
dep_ref = "data/ECCCO_speedtest_runs/eccco_sp_puremaps_psf4pix_inelectrons_cm3perspersr_with_tables.fits"
output_paths = ["output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.1_wpsf.fits",
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.01_wpsf.fits",
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.2_wpsf.fits",
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.005_wpsf.fits"]
output_paths = [
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.1_wpsf.fits",
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.01_wpsf.fits",
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.2_wpsf.fits",
"output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.005_wpsf.fits",
]

with fits.open(dep_ref) as hdul:
dep_table = hdul[2].data
Expand All @@ -31,12 +34,17 @@

# initial image comparison
fig, axs = plt.subplots(ncols=3, figsize=(30, 10))
im1 = axs[0].imshow(gt, vmin=vmin, vmax=vmax, origin='lower')
im1 = axs[0].imshow(gt, vmin=vmin, vmax=vmax, origin="lower")
axs[0].set_title("Ground truth")
axs[1].imshow(output_data[i], vmin=vmin, vmax=vmax, origin='lower')
axs[1].imshow(output_data[i], vmin=vmin, vmax=vmax, origin="lower")
axs[1].set_title("Output")
im2 = axs[2].imshow(gt - output_data[i],
origin='lower', cmap='seismic', vmin=-interval, vmax=interval)
im2 = axs[2].imshow(
gt - output_data[i],
origin="lower",
cmap="seismic",
vmin=-interval,
vmax=interval,
)
axs[2].set_title("Ground truth - output")
for ax in axs:
ax.set_aspect(0.5)
Expand All @@ -48,8 +56,8 @@

# scatter plots
fig, ax = plt.subplots()
ax.plot(gt.flatten(), output_data[i].flatten(), '.', ms=1)
ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], 'r-')
ax.plot(gt.flatten(), output_data[i].flatten(), ".", ms=1)
ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], "r-")
ax.set_title(dep_table[i][1])
ax.set_xlabel("Ground truth")
ax.set_ylabel("Unfolded recreation")
Expand All @@ -59,8 +67,8 @@
# histogram
Z, xedges, yedges = np.histogram2d(gt.flatten(), output_data[i].flatten(), 50)
fig, ax = plt.subplots()
im = ax.pcolormesh(xedges, yedges, np.log10(Z.T + 1E-3))
ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], 'r-')
im = ax.pcolormesh(xedges, yedges, np.log10(Z.T + 1e-3))
ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], "r-")
ax.set_title(dep_table[i][1])
fig.colorbar(im, ax=ax)
fig.savefig(output_dir + f"histogram_{dep_table[i][1]}_{short_path}.png")
Expand Down
18 changes: 0 additions & 18 deletions experiment_scripts/create_spectrally_pure_images.py

This file was deleted.

Loading

0 comments on commit b3ca891

Please sign in to comment.