From f3131adb5f765b734c60d410d78d20b62e1787d8 Mon Sep 17 00:00:00 2001 From: Eddie Schlafly Date: Wed, 18 Sep 2024 14:37:53 -0400 Subject: [PATCH] More doc updates (#156) * Documentation updates following RTB review. --- docs/conf.py | 7 +- docs/romanisim/bandpass.rst | 2 +- docs/romanisim/image-input.rst | 4 +- docs/romanisim/image.rst | 4 +- docs/romanisim/install.rst | 29 ++++--- docs/romanisim/l1.rst | 14 ++-- docs/romanisim/l2.rst | 12 +-- docs/romanisim/overview.rst | 22 ++--- docs/romanisim/parameters.rst | 10 ++- docs/romanisim/psf.rst | 4 +- docs/romanisim/refs.rst | 16 +++- docs/romanisim/running.rst | 11 +-- romanisim/bandpass.py | 12 +-- romanisim/catalog.py | 22 +++-- romanisim/cr.py | 29 +++---- romanisim/image.py | 76 ++++++++--------- romanisim/l1.py | 144 +++++++++++++++------------------ romanisim/nonlinearity.py | 59 ++++++++------ romanisim/parameters.py | 2 +- romanisim/persistence.py | 19 +++-- romanisim/psf.py | 20 +++-- romanisim/ramp.py | 30 +++---- romanisim/util.py | 39 +++++---- romanisim/wcs.py | 17 ++-- 24 files changed, 313 insertions(+), 291 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index e175d4a8..ffe67bc0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -3,7 +3,7 @@ from datetime import datetime from pathlib import Path -import stsci_rtd_theme +# import stsci_rtd_theme if sys.version_info < (3, 11): import tomli as tomllib else: @@ -46,6 +46,7 @@ def setup(app): "sphinx_automodapi.automodapi", "sphinx_automodapi.smart_resolver", "sphinx.ext.intersphinx", + "sphinx_rtd_theme", ] autosummary_generate = True @@ -67,7 +68,9 @@ def setup(app): html_theme_options = { "collapse_navigation": True } -html_theme_path = [stsci_rtd_theme.get_html_theme_path()] +# html_theme_path = [stsci_rtd_theme.get_html_theme_path()] +# the internet reports that this can cause problems with searching, +# which was hanging at readthedocs. html_domain_indices = True html_sidebars = {"**": ["globaltoc.html", "relations.html", "searchbox.html"]} html_use_index = True diff --git a/docs/romanisim/bandpass.rst b/docs/romanisim/bandpass.rst index db0ca52a..4af04d7c 100644 --- a/docs/romanisim/bandpass.rst +++ b/docs/romanisim/bandpass.rst @@ -3,7 +3,7 @@ Bandpasses The simulator can render scenes in a number of different bandpasses. The choice of bandpass affects the point spread function used, the sky backgrounds, the fluxes of sources, and the reference files requested. -At present, romanisim simply passes the choice of bandpass to other packages---to webbpsf for PSF modeling, to galsim.roman for sky background estimation, to CRDS for reference file selection, or to the catalog for the selection of appropriate fluxes. However, because catalog fluxes are specified in "maggies" (i.e., in linear fluxes on the AB scale), the simulator needs to know how to convert between a maggie and the number of photons Roman receives from a source. Accordingly, the simulator knows about the AB zero points of the Roman filters, as derived from https://roman.gsfc.nasa.gov/science/WFI_technical.html . The conversion between photons received and fluxes in AB "maggies" is determined by integrating a flat 3631 Jy AB reference spectrum over the Roman filters to determine the number of photons per second corresponding to a zeroth magnitude AB source. +At present, romanisim simply passes the choice of bandpass to other packages---to webbpsf for PSF modeling, to galsim.roman for sky background estimation, to CRDS for reference file selection, or to the catalog for the selection of appropriate fluxes. However, because catalog fluxes are specified in "maggies" (i.e., such that :math:`\mathrm{flux} = 10^{-\mathrm{AB mag} / 2.5}`), the simulator needs to know how to convert between a maggie and the number of electrons Roman receives from a source. Accordingly, the simulator knows about the AB zero points of the Roman filters, as derived from https://roman.gsfc.nasa.gov/science/WFI_technical.html . The conversion between electrons received and fluxes in AB "maggies" is determined by integrating a flat 3631 Jy AB reference spectrum over the Roman filters to determine the number of electrons per second corresponding to a zeroth magnitude AB source. One technical note: it is unclear what aperture is used for the bandpasses provided by Goddard. The Roman PSF formally extends to infinity and some light is received by the detector but is so far from the center of the PSF that it is not useful for flux, position, or shape measurements. Often for the purposes of computing effective area curves only light landing within a fixed aperture is counted. Presently the simulator assumes that an infinite aperture is used. This can result in an approximately 10% different flux scale than more reasonable aperture selections. diff --git a/docs/romanisim/image-input.rst b/docs/romanisim/image-input.rst index a0feb643..bc33281d 100644 --- a/docs/romanisim/image-input.rst +++ b/docs/romanisim/image-input.rst @@ -1,7 +1,7 @@ Image Input =========== -The simulator can alternatively take as input a catalog that references a separate list of images of sources. The format of the catalog is described below, and the underlying "list of images" is described by a ``RealGalaxyCatalog``; see :ref:`constructing-a-rgc`. The concept here is that users may have noiseless images of galaxies from, for example, hydro simulations, and may wish to inject those into simulated images at specific locations. Or users may have reference images from, for example, the COSMOS field, which could be used as truth images for simulations. +The simulator can alternatively take as input a catalog that references a separate list of images of sources. The format of the catalog is described below, and the underlying "list of images" is described by a ``RealGalaxyCatalog``; see :ref:`constructing-a-rgc`. The concept here is that users may have noiseless images of galaxies from, for example, hydrodynamical simulations, and may wish to inject those into simulated images at specific locations. Or users may have reference images from, for example, the COSMOS field, which could be used as truth images for simulations. The simulator leverages GalSim's ``RealGalaxyCatalog`` framework to support this use case. There are a handful of existing catalogs of images in the appropriate format for use with the ``RealGalaxyCatalog``---in particular, `GalSim's COSMOS reference catalog `_ . @@ -26,7 +26,7 @@ For image input, the simulator takes a catalog with the following structure:: 269.9 66.1 1 6.91 109.7 0.5 0.4 2.5e-07 269.9 66.0 0 12.04 44.2 0.6 0.1 2.7e-07 -Moreover, the table metadata must include the keyword ``real_galaxy_catalog_filename`` specifying the location of the ``RealGalaxyCatalog`` catalog file. The ``ident`` keyword then specifies the id of the galaxy image to use from the RealGalaxyCatalog, which is rendered at the specified location (ra, dec) and is optionally rotated (rotate), sheared (shear_pa, shear_ba), and dilated (dilate). Finally, total fluxes in maggies (see the catalog :doc:`docs `) in specified bandpasses are given. +Moreover, the table metadata must include the keyword ``real_galaxy_catalog_filename`` specifying the location of the ``RealGalaxyCatalog`` catalog file. The ``ident`` keyword then specifies the ID of the galaxy image to use from the RealGalaxyCatalog, which is rendered at the specified location (ra, dec) and is optionally rotated (rotate), sheared (shear_pa, shear_ba), and dilated (dilate). Finally, total fluxes in maggies (see the catalog :doc:`docs `) in specified bandpasses are given. The following fields must be specified for each source: diff --git a/docs/romanisim/image.rst b/docs/romanisim/image.rst index 4df992fc..e93edd10 100644 --- a/docs/romanisim/image.rst +++ b/docs/romanisim/image.rst @@ -1,7 +1,7 @@ Making images ============= -Ultimately, romanisim builds image by feeding source profiles, world coordinate system objects, and point spread functions to galsim. The ``image`` and ``l1`` modules currently implement this functionality. +Ultimately, romanisim builds images by feeding source profiles, world coordinate system objects, and point spread functions to galsim. The ``image`` and ``l1`` modules currently implement this functionality. The ``image`` module is responsible for translating a metadata object that specifies everything about the conditions of the observation into objects that the simulation can understand. The metadata object follows the metadata that real WFI images will include; see `here `_ for more information. @@ -9,7 +9,7 @@ The parsed metadata is used to make a ``counts`` image that is an idealized imag * WFI pixels have a very uncertain pedestal. * WFI pixels are sampled "up the ramp" during an observation, so a number of reads contribute to the final estimate for the rate of photons entering each pixel. -* WFI reads are averaged on the telescope into resultants; ground images see only resultants. +* WFI reads are averaged on the telescope into resultants; these resultants are downlinked to the ground, instead of the individual reads. These idealized count images are then used to either make a level 2 image or a level 1 image, which are intended to include the effects of these complications. The construction of L1 images is described :doc:`here `, and the construction of L2 images is described :doc:`here `. diff --git a/docs/romanisim/install.rst b/docs/romanisim/install.rst index 558186bd..58784919 100644 --- a/docs/romanisim/install.rst +++ b/docs/romanisim/install.rst @@ -7,8 +7,12 @@ To install :: and you should be largely set! -There are a few dependencies that may cause more difficulty. First, -`WebbPSF `_ requires data files to +The most frequently encountered difficulty installing romanisim is +when GalSim is unable to find FFTW. If your system does not have the +FFTW library, these should be installed before romanisim and GalSim. + +Another problematic dependency is `WebbPSF +`_, which requires data files to operate. See the `docs `_ for instructions on obtaining the relevant data files and pointing the @@ -16,24 +20,23 @@ for instructions on obtaining the relevant data files and pointing the avoided by not setting the ``--webbpsf`` argument, in which case ``romanisim`` uses the GalSim modeling of the Roman PSF. -Second, some synthetic scene generation tools use images of galaxies +Additionally, some synthetic scene generation tools use images of galaxies distributed separately from the main GalSim source. See `here `_ for information on obtaining the COSMOS galaxies for use with GalSim. The ``romanisim`` package also has a less sophisticated scene modeling toolkit, which just renders Sersic galaxies. The command line -interface to ``romanisim`` presently uses supports Sersic galaxy +interface to ``romanisim`` presently uses Sersic galaxy rendering, and so many users may not need to download the COSMOS galaxies. -Third, ``romanisim`` can work with the Roman `CRDS -`_ system. This functionality -is not available to the general community at the time of writing. +Finally, ``romanisim`` can work with the Roman `CRDS +`_ system. Using CRDS requires specifying the ``CRDS_PATH`` and -``CRDS_SERVER_URL`` variables. CRDS is not used unless the -``--usecrds`` argument is specified; do not include this argument -unless you have access to the Roman CRDS. +``CRDS_SERVER_URL`` variables. Using CRDS brings in the latest +reference files and its use is encouraged; specify the +``--usecrds`` argument to enable it. -That said, the basic install process looks like this:: +In summary, the basic install process looks like this:: pip install romanisim # to get a specific version, use instead @@ -58,7 +61,3 @@ That said, the basic install process looks like this:: You may wish to, for example, set up a new python virtual environment before running the above, or choose a different directory for WebbPSF's data files. - -Some users report issues with the FFTW dependency of galsim on Mac Arm -systems. See galsim's installation page for hints there. In -particular it may be helpful to install FFTW before galsim and romanisim. diff --git a/docs/romanisim/l1.rst b/docs/romanisim/l1.rst index a9c0a85c..5a593927 100644 --- a/docs/romanisim/l1.rst +++ b/docs/romanisim/l1.rst @@ -1,13 +1,13 @@ Making L1 images ================ -An L1 (level 1) image is a "raw" image received from the detectors. The actual measurements made on the spacecraft consist of a number of non-destructive reads of the pixels of the H4RG detectors. These reads have independent read noise but because the pixels count the total number of photons having entered each pixel, the Poisson noise in different reads of the same pixel is correlated. +An L1 (level 1) image is a "raw" image received from the detectors. The actual measurements made on the spacecraft consist of a number of non-destructive reads of the pixels of the H4RG detectors. These reads have independent read noise but because the pixels count the total number of electrons having entered each pixel, the Poisson noise in different reads of the same pixel is correlated. Because the telescope has limited bandwidth, every read is not transferred to ground stations. Instead, reads are averaged into "resultants" according to a specification called a MultiAccum table, and these resultants are transferred, archived, and analyzed. These resultants make up an L1 image, which romanisim simulates. -L1 images are created using an idealized ``counts`` image described :doc:`here `, which contains the number of photons each pixel of the detector would receive absent any instrumental systematics. To transform this into an L1 image, these counts must be apportioned into reads and averaged into resultants, and instrumental effects must be added. +L1 images are created starting with an idealized ``counts`` image described :doc:`here `, which contains the number of electrons each pixel of the detector would receive in the absence of any instrumental systematics. To transform this into an L1 image, these electrons must be apportioned into reads and averaged into resultants, and instrumental effects must be added. -This process proceeds by simulating each read, drawing the appropriate number of photons from the total number of photons for each read following a binomial distribution. These photons are added to a running sum that is then averaged into a resultant according to the MultiAccum table specification. This process requires drawing random numbers from the binomial distribution for every read of every pixel, and so can take on the order of a minute, but it allows detailed simulation of the statistics of the noise in each resultant together with their correlations. It also makes it straightforward to add various instrumental effects into the simulation accurately, since these usually apply to individual reads rather than to resultants (e.g., cosmic rays affect individual reads, and their affect on a resultant depends on the read in the resultant to which they apply). +This process proceeds by simulating each read, drawing the appropriate number of electrons from the total number of electrons for each read following a binomial distribution. These electrons are added to a running sum that is then averaged into a resultant according to the MultiAccum table specification. This process requires drawing random numbers from the binomial distribution for every read of every pixel, and so can take on the order of a minute, but it allows detailed simulation of the statistics of the noise in each resultant together with their correlations. It also makes it straightforward to add various instrumental effects into the simulation accurately, since these usually apply to individual reads rather than to resultants (e.g., cosmic rays affect individual reads, and their affect on a resultant depends on the read in the resultant to which they apply). After apportioning counts to resultants, systematic effects are added to the resultants. Presently only read noise is added. The read noise is averaged down like :math:`1/\sqrt{N}`, where :math:`N` is the number of reads contributing to the resultant. @@ -24,7 +24,7 @@ Interpixel Capacitance Interpixel capacitance (IPC) is added following non-linearity and before read-out. Read noise remains independent among different pixels but the Poisson noise is correlated between pixels by the IPC. We simply convolve the resultants by a 3x3 kernel after apportioning counts to resultants and applying non-linearity but before adding read noise. -This is slightly different than including IPC in the PSF kernel because including IPC in the PSF kernel leaves the Poisson noise uncorrelated. +This is slightly different than including IPC in the PSF kernel because including IPC in the PSF kernel leaves the Poisson noise uncorrelated between pixels. Persistence ----------- @@ -33,15 +33,15 @@ the Fermi description of persistence implemented in GalSim, where the flux in el .. math:: P(t) = A \frac{1}{1+\exp\left(\frac{-(x-x_0)}{\delta x}\right)} \left(\frac{x}{x_0}\right)^\alpha \left(\frac{t}{1000 \mathrm{s}}\right)^\gamma \, . -Here :math:`P(x, t)` is the rate in electrons per second that the pixel records :math:`t` seconds following receiving a total number of electrons :math:`x`. The parameters :math:`A`, :math:`x_0`, :math:`\delta x`, :math:`\alpha`, :math:`\gamma` may vary from pixel to pixel, though are presently fixed to global constants. This equation for the rate only applies to pixels which were illuminated more than to fill more than their half-well. We follow GalSim and linearly increase the persistence from 0 to the half-well value for illuminations between 0 and half-well. +Here :math:`P(x, t)` is the rate in electrons per second that the pixel records :math:`t` seconds following receiving a total number of electrons :math:`x`. The parameters :math:`A`, :math:`x_0`, :math:`\delta x`, :math:`\alpha`, :math:`\gamma` may vary from pixel to pixel, though are presently fixed to global constants. This equation for the rate only applies to pixels that were illuminated more than to fill more than their half-well. We follow GalSim and linearly increase the persistence from 0 to the half-well value for illuminations between 0 and half-well. This persistence rate is sampled with a Poisson distribution and added to each pixel read-by-read and incorporated into the resultants in the L1 images. -Persistence-affected pixels are expected to be rare, and are tracked sparsely via a list of the indices of affected pixels, the amount of the illumination, and the times of their illumination. Pixels are dropped from persistence tracking when their persistence rate is less than one electron per 100 seconds. If the same pixel is receives large fluxes multiple times, these are treated as two independent events and the resulting persistence flux is handled by summing the persistence rates given above over each event. +Persistence-affected pixels are expected to be rare, and are tracked sparsely via a list of the indices of affected pixels, the amount of the illumination, and the times of their illumination. Pixels are dropped from persistence tracking when their persistence rate is less than one electron per 100 seconds. If the same pixel receives large fluxes multiple times in past exposures, these are treated as independent events and the resulting persistence flux is handled by summing the persistence rates for each past event. Cosmic rays ----------- -Cosmic rays are added to the simulation read-by-read. The cosmic ray parameters follow Wu et al. (2023). The locations of cosmic rays are chosen at random to sample the focal plane uniformly. Lengths are chosen according to a power law distribution :math:`p(l) \\sim l^{-4.33}`, with lengths between 10 and 10,000 microns. Charge deposition rates per micron are selected from a Moyal distribution located at 120 electrons per micron with a width of 50 electrons per micron. An idealized charge is computed for each pixel in a read according to the product of the deposition rate per micron and the length of the cosmic ray's path within that pixel. This idealized charge is Poisson sampled and added to the relevant pixels in a read. +Cosmic rays are added to the simulation read-by-read. The cosmic ray parameters follow Wu et al. (2023). The locations of cosmic rays are chosen at random to sample the focal plane uniformly. Lengths are chosen according to a power law distribution :math:`p(l) \propto l^{-4.33}`, with lengths between 10 and 10,000 microns. Charge deposition rates per micron are selected from a Moyal distribution located at 120 electrons per micron with a width of 50 electrons per micron. An idealized charge is computed for each pixel in a read according to the product of the deposition rate per micron and the length of the cosmic ray's path within that pixel. This idealized charge is Poisson sampled and added to the relevant pixels in a read. Gain ---- diff --git a/docs/romanisim/l2.rst b/docs/romanisim/l2.rst index e210699f..776d0f6a 100644 --- a/docs/romanisim/l2.rst +++ b/docs/romanisim/l2.rst @@ -1,20 +1,20 @@ L2 images ========= -L2 images are constructed from :doc:`L1 ` images, which are in turn constructed from idealized :doc:`count ` images. This means that even when constructing L2 images, one must go through the process of simulating how counts get apportioned among the various reads. It is challenging to realistically model the statistics in the noise of L2 images without going through this process. +L2 images are constructed from :doc:`L1 ` images, which are in turn constructed from idealized :doc:`count ` images. This means that even when constructing L2 images, one must go through the process of simulating how electrons counts get apportioned among the various reads. It is challenging to realistically model the statistics in the noise of L2 images without going through this process. L2 images are constructed by doing "ramp fitting" on the level 1 images. Each pixel of a level 1 image is a series of "resultants", giving the measured value of that pixel averaged over a series on non-destructive reads as the exposure is being observed. A simple model for a pixel is that its flux as a function of time is simply two numbers: a pedestal and a linear ramp representing the rate at which photons are detected by the pixel. Ramp fitting turns these one-dimensional series of resultants into a "slope" image that is of interest astronomically. Due to details of the H4RG detectors, the pedestals of the ramp vary widely from exposure to exposure, and so current fitting completely throws away as non-astronomical any information in the ramp that is sensitive to the pedestal. -The package contains two algorithms for ramp fitting. The first uses "optimal" weighting, considering the full covariance matrix between each of the resultants stemming from read \& Poisson noise from the sources. The covariance is inverted and combined with the design matrix in the usual least-squares approach to solve for the optimal slope and pedestal measurements for each pixel. This approach is naively expensive, but because the covariance matrices for each pixel for a one-dimensional family depending only on the ratio of the flux in the pixel to the read variance, the relevant matrices can be precomputed. These are then interpolated between for each pixel and summed over to get the parameters and variances for each pixel. +The package contains two algorithms for ramp fitting. The first uses "optimal" weighting, considering the full covariance matrix between each of the resultants stemming from read \& Poisson noise from the sources. The covariance is inverted and combined with the design matrix in the usual least-squares approach to solve for the optimal slope and pedestal measurements for each pixel. This approach is naively expensive, but because the covariance matrices for each pixel form a one-dimensional family depending only on the ratio of the flux in the pixel to the read variance, the relevant matrices can be precomputed. The package then then interpolated between these precomputed quantities for each pixel in order to compute the parameters and variances for each pixel. This approach does not handle cosmic rays or saturated pixels well, though for modest sized sets of resultants introducing an additional series of fits for the roughly :math:`2 n_\mathrm{resultant}` sub-ramps would be straightforward. That approach would also naturally handle saturated ramps. Even explicitly computing every possible :math:`n_\mathrm{resultant} (n_\mathrm{resultant} - 1) / 2` subramp would likely still be quite inexpensive for modestly sized ramps. -The second approach follows `Casertano+2022 `_. In this approach, a diagonal set of weights is used in place of the full covariance matrix. The choice of weights depend on the particular pattern of reads assigned to each resultant and the amount of flux in the ramp, allowing them to interpolate from simply differencing the first and last resultants when the flux is very large to weighting the resultants by the number of reads when the flux is zero. This approach more efficiently handles dealing with ramps that have been split by cosmic rays, and obtaining uncertainties within a few percent of the "optimal" weighting approach. For these cases, we report final ramp slopes and variances derived from the inverse variance weighted subramp slopes and variances, using the read-noise derived variances. +The second approach follows `Casertano (2022) `_. In this approach, a diagonal set of weights is used in place of the full covariance matrix. The choice of weights depend on the particular pattern of reads assigned to each resultant and the amount of flux in the ramp, allowing them to interpolate from simply differencing the first and last resultants when the flux is very large to weighting the resultants by the number of reads when the flux is zero. This approach more efficiently handles dealing with ramps that have been split by cosmic rays, and obtaining uncertainties within a few percent of the "optimal" weighting approach. For these cases, we report final ramp slopes and variances derived from the inverse variance weighted subramp slopes and variances, using the read-noise derived variances. -This is a fairly faithful representation of how level two image construction works, so there are not many additional effects to add here. But mentioning some limitations: +This is a fairly faithful representation of how level two image construction works, so there are not many additional effects to add here. But there are some limitations: -* We have a simplistic saturation treatment, just clipping resultants that exceed - the saturation level to the saturation level and throwing a flag. +* We have a simplistic saturation treatment, and simply clip resultants that exceed + the saturation level to the saturation level and throw a flag. L2 source injection diff --git a/docs/romanisim/overview.rst b/docs/romanisim/overview.rst index d053eee8..7fd11d20 100644 --- a/docs/romanisim/overview.rst +++ b/docs/romanisim/overview.rst @@ -4,11 +4,12 @@ Overview romanisim simulates Roman Wide-Field Imager images of astronomical scenes described by catalogs of sources. The simulation includes: -* convolution of the sources by the Roman point spread function +* convolution of the sources by the Roman point spread function (PSF) * optical distortion * sky background -* level 1 image support (3D image stack of up-the-ramp samples) -* level 2 image support (2D image after calibration & ramp fitting) +* level 1 image support (L1, 3D image stack of up-the-ramp samples) +* level 2 image support (L2, 2D image after calibration & ramp fitting) +* level 3 image support (L3, approximate coadded images) * point sources and analytic galaxy profiles * expected system throughput * dark current @@ -22,20 +23,11 @@ described by catalogs of sources. The simulation includes: The simulator is based on galsim and most of these features directly invoke the equivalents in the galsim.roman package. The chief additions of this package on top of the galsim.roman implementation are using "official" webbpsf -PSF, and distortion and reference files from the Roman CRDS (not yet -public!). This +PSFs, and many reference files from the Roman `CRDS `_ CRDS. This package also implements WFI up-the-ramp sampled and averaged images like those that will be downlinked from the telescope, and the official Roman WFI file format (asdf). -Future expected features include: - -* frame zero effects -* L3 image simulations -* "image-based" simulation inputs (i.e., provide an input image based - on a galaxy hydro sim; romanisim applies the Roman PSF & - instrumental effects on top to produce a detailed instrumental simulation) - The best way to interact with romanisim is to make an image. Running :: romanisim-make-image out.asdf @@ -48,7 +40,7 @@ being made. A more complete invocation would be :: romanisim-make-image --catalog input.ecsv --radec 270 66 --bandpass F087 --sca 7 --date 2026 1 1 --level 1 out.asdf where ``input.ecsv`` includes a list of sources in the scene, the -telescope boresight is pointing to (r, d) = (270, 66), the desired +telescope boresight is pointing to (RA, dec) = (270, 66), the desired bandpass is F087, the sensor is WFI07, the date is Jan 1, 2026, and a level 1 image (3D cube of samples up the ramp) is requested. @@ -72,7 +64,7 @@ adds an additional top-level branch to the asdf tree with the name └─webbpsf (bool): True These fields are simply the arguments to ``romanisim-make-image``, -plus an additional ``simcatobj`` field with contains the ``x``, ``y``, +plus an additional ``simcatobj`` field which contains the ``x``, ``y``, and number of photons of each simulated source. Features not included so far: diff --git a/docs/romanisim/parameters.rst b/docs/romanisim/parameters.rst index 72549cc6..12ab1db9 100644 --- a/docs/romanisim/parameters.rst +++ b/docs/romanisim/parameters.rst @@ -5,14 +5,14 @@ The parameters module contains useful constants describing the Roman telescope. These include: * the default read noise when CRDS is not used -* the number of border pixels +* the number of border reference pixels (4) * the specification of the read indices going into resultants for a fiducial L1 image for a handful of MA tables -* the read time +* the read time (3.04 s) * the default saturation limit * the default IPC kernel * the definition of the reference V2/V3 location in the focal plane to - which to place the given ra/dec + which to place the given right ascenscon and declination * the default persistence parameters * the default cosmic ray parameters * the default gain when CRDS is not used @@ -20,5 +20,9 @@ telescope. These include: These values can be overridden by specifying a yaml config file on the command line to romanisim-make-image. +See the `code +`_ +for the default values of these quantities. + .. automodapi:: romanisim.parameters diff --git a/docs/romanisim/psf.rst b/docs/romanisim/psf.rst index e43caa3f..3225ec4b 100644 --- a/docs/romanisim/psf.rst +++ b/docs/romanisim/psf.rst @@ -1,11 +1,11 @@ Point Spread Function Modeling ============================== -The simulator has two mechanisms for point source modeling. The first uses the galsim implementation of the Roman point spread function; for more information, see the galsim Roman documentation. The second uses the webbpsf package to make a model of the Roman PSF. +The simulator has two mechanisms for point source modeling. The first uses the galsim implementation of the Roman point spread function (PSF); for more information, see the galsim Roman documentation. The second uses the webbpsf package to make a model of the Roman PSF. In the current implementation, the simulator uses a linearly varying, achromatic bandpass for each filter when using webbpsf. That is, the PSF does not vary depending on the spectrum of the source being rendered. However, it seems straightforward to implement either of these modes in the context of galsim, albeit at some computational expense. -When using the galsim PSF, galsim's "photon shooting" mode is used for efficient rendering of chromatic sources. When using webbpsf, FFTs are used to to do the convolution of the intrinsic source profile with the PSF and pixel grid of the instrument. +When using the galsim PSF, galsim's "photon shooting" mode is used for efficient rendering of chromatic sources. When using webbpsf, FFTs are used to do the convolution of the intrinsic source profile with the PSF and pixel grid of the instrument. .. automodapi:: romanisim.psf diff --git a/docs/romanisim/refs.rst b/docs/romanisim/refs.rst index fa2558b9..bad83790 100644 --- a/docs/romanisim/refs.rst +++ b/docs/romanisim/refs.rst @@ -8,6 +8,8 @@ romanisim uses reference files from `CRDS ` to simulate, the right ascension and declination of the telescope -[#boresight]_, the :doc:`bandpass `, the SCA to +[#boresight]_, the :doc:`bandpass `, the Sensor +Chip Assembly (SCA) to simulate, the level of the image to simulate (:doc:`L1 ` or :doc:`L2 `), the MA table to use, and the time of the observation. Additional arguments control some details of the simulation. The ``--usecrds`` argument indicates that reference files should be pulled -from the Roman CRDS server; this is the recommended option when CRDS -is available. The ``--webbpsf`` argument indicates that the `WebbPSF +from the Roman CRDS server, and is recommended. The ``--webbpsf`` +argument indicates that the `WebbPSF `_ package should be used to simulate the PSF; note that this presently disables chromatic PSF rendering. diff --git a/romanisim/bandpass.py b/romanisim/bandpass.py index bbdb0ec6..7e38b364 100644 --- a/romanisim/bandpass.py +++ b/romanisim/bandpass.py @@ -1,6 +1,6 @@ """Roman bandpass routines -The primary purpose of this module is to provide the number of counts +The primary purpose of this module is to provide the number of electrons per second expected for sources observed by Roman given a source with the nominal flat AB spectrum of 3631 Jy. The ultimate source of this information is https://roman.gsfc.nasa.gov/science/WFI_technical.html . @@ -70,7 +70,7 @@ def read_gsfc_effarea(filename=None): def compute_abflux(effarea=None): """Compute the AB zero point fluxes for each filter. - How many photons would a zeroth magnitude AB star deposit in + How many electrons would a zeroth magnitude AB star deposit in Roman's detectors in a second? Parameters @@ -81,7 +81,7 @@ def compute_abflux(effarea=None): Returns ------- dict[str] : float - lookup table of zero point fluxes for each filter (photons / s) + lookup table of zero point fluxes for each filter (electrons / s) """ if effarea is None: @@ -112,7 +112,7 @@ def get_abflux(bandpass): Returns ------- float - the zero point flux (photons / s) + the zero point flux (electrons / s) """ bandpass = galsim2roman_bandpass.get(bandpass, bandpass) @@ -128,7 +128,7 @@ def get_abflux(bandpass): def compute_count_rate(flux, bandpass, filename=None, effarea=None, wavedist=None): """Compute the count rate in a given filter, for a specified SED. - How many photons would an object with SED given by + How many electrons would an object with SED given by flux deposit in Roman's detectors in a second? Parameters @@ -147,7 +147,7 @@ def compute_count_rate(flux, bandpass, filename=None, effarea=None, wavedist=Non Returns ------- float - the total bandpass flux (photons / s) + the total bandpass flux (electrons / s) """ # Read in default Roman effective areas from Goddard, if areas not supplied if effarea is None: diff --git a/romanisim/catalog.py b/romanisim/catalog.py index 5938d85a..67c8d43c 100644 --- a/romanisim/catalog.py +++ b/romanisim/catalog.py @@ -174,8 +174,9 @@ def make_galaxies(coord, index : int power law index of magnitudes faintmag : float - faintest AB magnitude for which to generate sources - Note this magnitude is in a "fiducial" band which is not observed. + Faintest AB magnitude for which to generate sources. + + Note this magnitude is in a "fiducial" band that is not observed. Actual requested bandpasses are equal to this fiducial band plus 1 mag of Gaussian noise. hlr_at_faintmag : float @@ -273,6 +274,7 @@ def make_stars(coord, implies 3/5. faintmag : float faintest AB magnitude for which to generate sources + Note this magnitude is in a "fiducial" band which is not observed. Actual requested bandpasses are equal to this fiducial band plus 1 mag of Gaussian noise. @@ -336,14 +338,14 @@ def make_stars(coord, def image_table_to_catalog(table, bandpasses): - """Read a astropy Table into a list of CatalogObjects. + """Read an astropy Table into a list of CatalogObjects. We want to read in an image catalog and make a list of CatalogObjects. The image catalog indicates that specific galaxies stored as images in a RealGalaxyCatalog should be rendered at specific locations in the images, - for example, if one had postage stamps of galaxies from a hydro sim and - wanted to render them in a Roman simulation. The table must have the - following columns: + for example, if one had postage stamps of galaxies from a hydrodynamical + simulation and wanted to render them in a Roman simulation. The table + must have the following columns: * ra : float, right ascension in degrees * dec : float, declination in degrees @@ -413,7 +415,7 @@ def make_image_catalog(image_filenames, psf, out_base_filename, inserted into output images. This function makes it easy to produce a set of files that can be used as a GalSim RealGalaxyCatalog from a list of fits input images. These input images can come from anywhere, but are - expected to come from either real imaging or from hydro simulations. + expected to come from either real imaging or from hydrodynamical simulations. This routine assumes that all images share a common PSF, which is given as the PSF argument. This PSF is deconvolved before reconvolving with the @@ -435,10 +437,6 @@ def make_image_catalog(image_filenames, psf, out_base_filename, output filename to use for RealGalaxyCatalog files output here pixel_scale : float pixel scale of PSF and images - - Returns - ------- - None - this routine only writes files to out_base_filename """ outdtype = [('ident', 'i4'), ('gal_filename', 'U200'), ('psf_filename', 'U200'), ('noise_file_name', 'U200'), ('gal_hdu', 'i4'), ('psf_hdu', 'i4'), @@ -469,7 +467,7 @@ def make_image_catalog(image_filenames, psf, out_base_filename, def table_to_catalog(table, bandpasses): - """Read a astropy Table into a list of CatalogObjects. + """Read an astropy Table into a list of CatalogObjects. We want to read in a catalog and make a list of CatalogObjects. The table must have the following columns: diff --git a/romanisim/cr.py b/romanisim/cr.py index b7f1ccf2..93ba97d9 100644 --- a/romanisim/cr.py +++ b/romanisim/cr.py @@ -10,16 +10,16 @@ def create_sampler(pdf, x): Parameters ---------- pdf : callable - A function or empirical set of tabulated values which can + A function or empirical set of tabulated values that can be used to call or evaluate `x`. - x : 1-d array of floats + x : 1D array of floats A grid of values where the pdf should be evaluated. Returns ------- inverse_cdf : callable Callable that gives the cumulative distribution function - which allows sampling from the `pdf` distribution within + that allows sampling from the `pdf` distribution within the bounds described by the grid `x`. """ @@ -37,16 +37,17 @@ def moyal_distribution(x, location=120, scale=50): Parameters ---------- - x : 1-d array + x : 1D array An array of dE/dx values (units: eV/micron) that forms the grid on which the Moyal distribution will be evaluated. location : float The peak location of the distribution, units of eV / micron. scale : float A width parameter for the distribution, units of eV / micron. + Returns ------- - moyal : 1-d array of floats + moyal : 1D array of floats Moyal distribution (pdf) evaluated on `x` grid of points. """ xs = (x - location) / scale @@ -60,7 +61,7 @@ def power_law_distribution(x, slope=-4.33): Parameters ---------- - x : 1-d array of floats + x : 1D array of floats An array of cosmic ray path lengths (units: micron). slope : float The log-log slope of the distribution, default based on @@ -68,7 +69,7 @@ def power_law_distribution(x, slope=-4.33): Returns ------- - power_law : 1-d array of floats + power_law : 1D array of floats Power-law distribution (pdf) evaluated on `x` grid of points. """ power_law = np.power(x, slope) @@ -173,10 +174,10 @@ def traverse(trail_start, trail_end, N_i=4096, N_j=4096, eps=1e-10): ---------- trail_start : (float, float) The starting coordinates in (i, j) of the cosmic ray trail, - in units of pix. + in units of pixels. trail_end : (float, float) The ending coordinates in (i, j) of the cosmic ray trail, in - units of pix. + units of pixels. N_i : int Number of pixels along i-axis of detector N_j : int @@ -187,11 +188,11 @@ def traverse(trail_start, trail_end, N_i=4096, N_j=4096, eps=1e-10): Returns ------- ii : np.ndarray[int] - i-axis positions of traversed trail, in units of pix. + i-axis positions of traversed trail, in units of pixels. jj : np.ndarray[int] - j-axis positions of traversed trail, in units of pix. + j-axis positions of traversed trail, in units of pixels. lengths : np.ndarray[float] - Chord lengths for each traversed pixel, in units of pix. + Chord lengths for each traversed pixel, in units of pixels. """ # increase in i-direction @@ -287,7 +288,7 @@ def simulate_crs( Parameters ---------- - image : 2-d array of floats + image : 1D array of floats The detector image with values in units of electrons. time : float The exposure time, units of s. @@ -310,7 +311,7 @@ def simulate_crs( Returns ------- - image : 2-d array of floats + image : 2D array of floats The detector image, in units of electrons, updated to include all of the generated cosmic ray hits. """ diff --git a/romanisim/image.py b/romanisim/image.py index 20be1e32..1bea7114 100644 --- a/romanisim/image.py +++ b/romanisim/image.py @@ -61,19 +61,19 @@ def make_l2(resultants, read_pattern, read_noise=None, gain=None, flat=None, Parameters ---------- - resultants : np.ndarray[nresultants, nx, ny] + resultants : np.ndarray[nresultants, ny, nx] resultants array read_pattern : list[list] (int) - list of list of indices of reads entering each resultant - read_noise : np.ndarray[nx, ny] (float) + list of lists of indices of reads entering each resultant + read_noise : np.ndarray[ny, nx] (float) read_noise image to use. If None, use galsim.roman.read_noise. - flat : np.ndarray[nx, ny] (float) + flat : np.ndarray[ny, nx] (float) flat field to use linearity : romanisim.nonlinearity.NL object or None non-linearity correction to use. - darkrate : np.ndarray[nx, ny] (float) + darkrate : np.ndarray[ny, nx] (float) dark rate image to subtract from ramps (electron / s) - dq : np.ndarray[nresultants, nx, ny] (int) + dq : np.ndarray[nresultants, ny, nx] (int) DQ image corresponding to resultants Returns @@ -146,7 +146,7 @@ def make_l2(resultants, read_pattern, read_noise=None, gain=None, flat=None, def in_bounds(xx, yy, imbd, margin): - """Filter sources to those landing on an image. + """Filter sources to those landing near an image. Parameters ---------- @@ -155,12 +155,12 @@ def in_bounds(xx, yy, imbd, margin): imbd : galsim.Image.Bounds bounds of image margin : int - keep sources up to margin outside of bounds + keep sources within this number of pixels of the image edge Returns ------- keep : np.ndarray (bool) - whether each source lands near the image (True) or not (False) + whether each source's center lands near the image (True) or not (False) """ keep = ((xx > imbd.xmin - margin) & (xx < imbd.xmax + margin) & ( @@ -169,14 +169,14 @@ def in_bounds(xx, yy, imbd, margin): def trim_objlist(objlist, image): - """Trim a Table of objects down to those falling near an image. + """Trim objects down to those falling near an image. Objects must fall in a circle centered at the center of the image with radius 1.1 times the separation between the center and corner of the image. In contrast to in_bounds, this doesn't require the x and y coordinates of - the sources, and just uses the ra/dec directly without needing to do the - WCS transformation. + the individual sources, and just uses the source celestial coordinates + directly. Parameters ---------- @@ -211,21 +211,22 @@ def add_objects_to_image(image, objlist, xpos, ypos, psf, """Add sources to an image. Note: this includes Poisson noise when photon shooting is used - (i.e., for chromatic source profiles), and otherwise is noise free. + (i.e., for chromatic source profiles), and otherwise is noise free, unless + add_noise is set to True. Parameters ---------- image : galsim.Image Image to which to add sources with associated WCS. Updated in place. objlist : list[CatalogObject] - Objects to add to image + Objects to add to image. These may be chromatic or achromatic. xpos, ypos : array_like x & y positions of sources (pixel) at which sources should be added psf : galsim.Profile PSF for image flux_to_counts_factor : float or list physical fluxes in objlist (whether in profile SEDs or flux arrays) - should be multiplied by this factor to convert to total counts in the + should be multiplied by this factor to convert to total electrons in the image outputunit_to_electrons : array_like One output image unit corresponds to this many electrons. If None, @@ -236,6 +237,9 @@ def add_objects_to_image(image, objlist, xpos, ypos, psf, filter_name : str filter to use to select appropriate flux from objlist. This is only used when achromatic PSFs and sources are being rendered. + add_noise : bool + if True, add Poisson noise to noiseless FFT simulated images produced + when achromatic profiles are used. rng : galsim.BaseDeviate random number generator to use seed : int @@ -245,7 +249,7 @@ def add_objects_to_image(image, objlist, xpos, ypos, psf, ------- outinfo : np.ndarray Array structure containing rows for each source. The columns give - the total number of counts from the source entering the image and + the total number of electrons from the source entering the image and the time taken to render the source. """ if rng is None and seed is None: @@ -321,14 +325,15 @@ def simulate_counts_generic(image, exptime, objlist=None, psf=None, **kwargs): """Add some simulated counts to an image. - No Roman specific code allowed! To do this, we need to have an image + This routine intends to need to know nothing about Roman specifically. + To do this, we need to have an image to start with with an attached WCS. We also need an exposure time and potentially a zpflux so we know how to translate between the catalog - fluxes and the counts entering the image. For chromatic rendering, this - role instead is played by the bandpass, though the exposure time is still - needed to handle that part of the conversion from flux to counts. + fluxes and the electrons entering the image. For chromatic rendering, this + role is instead played by the bandpass, though the exposure time is still + needed to handle that part of the conversion from flux to electrons. - Then there are a few of individual components that can be added on to + Then there are a few individual components that can be added on to an image: * objlist: a list of CatalogObjects to render, or a Table. Can be chromatic @@ -336,7 +341,7 @@ def simulate_counts_generic(image, exptime, objlist=None, psf=None, * sky: a sky background model. This is different from a dark in that it is sensitive to the flat field. * dark: a dark model. - * flat: a flat field for modulating the object and sky counts + * flat: a flat field for modulating the object and sky electrons Parameters ---------- @@ -349,11 +354,11 @@ def simulate_counts_generic(image, exptime, objlist=None, psf=None, psf : galsim.Profile PSF to use when rendering sources zpflux : float - For non-chromatic profiles, the factor converting flux to counts / s. + For non-chromatic profiles, the factor converting flux to electrons / s. sky : float or array_like - Image or constant with the counts / pix / sec from sky. + Image or constant with the electrons / pix / sec from sky. dark : float or array_like - Image or constant with the counts / pix / sec from dark current. + Image or constant with the electrons / pix / sec from dark current. flat : array_like Image giving the relative QE of different pixels. xpos, ypos : array_like (float) @@ -483,10 +488,11 @@ def simulate_counts(metadata, objlist, webbpsf=True, darkrate=None, flat=None, psf_keywords=dict()): - """Simulate total counts in a single SCA. + """Simulate total electrons in a single SCA. - This gives the total counts in an idealized instrument with no systematics; - it includes only distortion & PSF convolution. + This gives the total electrons recorded in an idealized instrument with no systematics; + it includes only distortion & PSF convolution. This total includes an appropriate amount + of Poisson noise. Parameters ---------- @@ -578,10 +584,6 @@ def gather_reference_data(image_mod, usecrds=False): all CRDS files should be used, parameters.reference_data must contain only Nones. - This functionality is intended to allow users to specify different - levels via a configuration file and not have them be overwritten - by the CRDS defaults, but it's not terribly clean. - The input metadata is updated with CRDS software versions if CRDS is used. @@ -707,12 +709,13 @@ def simulate(metadata, objlist, objlist : list[CatalogObject] or Table List of objects in the field to simulate usecrds : bool - use CRDS to get distortion maps + use CRDS to get reference files webbpsf : bool use webbpsf to generate PSF level : int 0, 1 or 2, specifying level 1 or level 2 image - 0 makes a special idealized 'counts' image + 0 makes a special idealized total electrons image; these are only + intended for testing purposes and are not supported. persistence : romanisim.persistence.Persistence persistence object to use; None for no persistence crparam : dict @@ -830,7 +833,8 @@ def simulate(metadata, objlist, def make_test_catalog_and_images( seed=12345, sca=7, filters=None, nobj=1000, usecrds=True, webbpsf=True, galaxy_sample_file_name=None, **kwargs): - """This routine kicks the tires on everything in this module.""" + """This is a test routine that exercises many options but is not intended for + general use.""" log.info('Making catalog...') if filters is None: filters = ['Y106', 'J129', 'H158'] @@ -916,7 +920,7 @@ def inject_sources_into_l2(model, cat, x=None, y=None, psf=None, rng=None, gain=None, webbpsf=True): """Inject sources into an L2 image. - This routine allows sources to be injected onto an existing L2 image. + This routine allows sources to be injected into an existing L2 image. Source injection into an L2 image relies on knowing the objects' x and y locations, the PSF, and the image gain; if these are not provided, reasonable defaults are generated from the input model. diff --git a/romanisim/l1.py b/romanisim/l1.py index b11fd29e..f63c4231 100644 --- a/romanisim/l1.py +++ b/romanisim/l1.py @@ -1,16 +1,20 @@ -"""Convert images into L1 images, with ramps. +"""This module contains routines that convert total electron images into L1 images. -We imagine starting with an image that gives the total number of counts from -all Poisson processes (at least: sky, sources, dark current). We then need -to redistribute these counts over the resultants of an L1 image. +We imagine starting with an image that gives the total number of electrons from +all Poisson processes (at least: sky, sources, dark current). One then needs +to redistribute these electrons over the resultants of an L1 image. -The easiest thing to do, and probably where I should start, is to sample the +The remainder of this module-level discussion covers some of the philosophical +choices taken in this module and the consequences thereof. It is more +colloquial than much of the documentantion and may be ignored. + +The easiest approach is to sample the image read-by-read with a binomial draw from the total counts weighted by the -chance that each count landed in this particular time window. Then gather +chance that each count landed in this particular time window, and then gather those for each resultant and average, as done on the spacecraft. It's tempting to go straight to making the appropriate resultants. Following -Casertano (2022?), the variance in each resultant is: +Casertano (2022), the variance in each resultant is: .. math:: V = \\sigma_{read}^2/N + f \\tau @@ -28,82 +32,74 @@ where t_0 is the time of the first read in the resultant and d is the spacing of the reads. -So that gives the variance from Poisson counts in resultant. But how should -we draw random numbers to get that variance and the right mean? I can +That gives the variance from Poisson counts in the resultants. But how should +we draw random numbers to get that variance and the right mean? One can separately control the mean and variance by scaling the Poisson distribution, -but I'm not sure that's doing the right thing with the higher order moments. +but that will not do the the right thing with the higher order moments. -It probably isn't that expensive to just work out all of the reads, -individually, which will also allow more natural incorporation of -cosmic rays down the road. So let's take that approach instead for -the moment. +It isn't that expensive to just work out all of the reads, +individually, which also allows more natural incorporation of +cosmic rays. So we take that approach. -How do we want to specify an L1 image? An L1 image is defined by a -total count image and a list of lists :math:`t_{i, j}`, where +What is the input for constructing an L1 image? An L1 image is defined by a +total electron image and a list of lists :math:`t_{i, j}`, where :math:`t_{i, j}` is the time at which the jth read in the ith resultant is made. We demand :math:`t_{i, j} > t_{k, l}` whenever i > k or whenever i = k and j > l. -Things this doesn't allow neatly: +This approach doesn't allow for some effects, for example: * jitter in telescope pointing: the rate image is the same for each read/resultant * weird non-linear systematics in darks? Some systematics need to be applied to the individual reads, rather than to -the final image. Currently linearity, persistenc, and CRs are implemented at -individual read time. I need to think about when in the chain -things like IPC, etc., come in. But it still seems correct to first generate the total -number of counts that an ideal detector would measure from sources, and then apply -these effects read-by-read as we build up the resultants---i.e., I expect the current -framework will be able to handle this without major issue. +the final image. Currently linearity, persistence, and CRs are implemented at +individual read time. This approach is not super fast. For a high latitude set of resultants, generating all of the random numbers to determine the apportionment takes -43 s on the machine I'm currently using; this will scale linearly with the +43 s on a 2020 Mac laptop; this will scale linearly with the number of reads. That's longer than the actual image production for the dummy scene I'm using (though only ~2x longer). -I don't have a good way to speed this up. Explicitly doing the Poisson +It's unclear how to speed this up. Explicitly doing the Poisson noise on each read from a rate image (rather than the apportionment of an image that already has Poisson noise) is 2x slower---generating billions of random numbers just takes a little while. -Plausibly I could figure out how to draw numbers directly from what a +Plausibly one could figure out how to draw numbers directly from what a resultant is rather than looking at each read individually. That would likely bring a ~10x speed-up. The read noise there is easy. The -poisson noise is a sum of scaled Poisson variables: +Poisson noise is a sum of scaled Poisson variables: .. math:: \\sum_{i=0, ..., N-1} (N-i) c_i \\, , where :math:`c_i` is a Poisson-distributed variable. The sum of Poisson-distributed variables is Poisson-distributed, but I wasn't -immediately able to find anything about the sum of scaled Poisson-distributed -variables. The result is clearly not Poisson-distributed, but maybe there's -some special way to sample from that directly. - -If we did sample from that directly, we'd still also need to get the sum -of the counts in the reads comprising the resultant. So I think you'd need -a separate draw for that, conditional on the number you got for the resultant. -Or, reversing that, you might want to draw the total number of counts first, -e.g., via the binomial distribution, and then you'd want to draw a number +able to find an easy distribution to use for the sum of scaled Poisson-distributed +quantities. + +If one did sample from that directly, we'd still also need to get the sum +of the counts in the reads comprising the resultant. So one would need +a separate draw for that, conditional on the average obtained for the resultant. +Or, reversing that, one could draw the total number of counts first, +e.g., via the binomial distribution, and then would draw the number for what the average number of counts was among the reads comprising the resultant, conditional on the total number of counts. Then .. math:: \\sum_{i=0, ..., N-1} (N-i) c_i -is some kind of statistic of the multinomial distribution. That sounds a -little more tractable? +is some kind of statistic of the multinomial distribution. That looks more tractable: .. math:: c_i \\sim \\mathrm{multinomial}(\\mathrm{total}, [1/N, ..., 1/N]) -We want to draw from :math:`\\sum (N-i) c_i`. -I think the probabilities are always :math:`1/N`, with the possible small but +We need to draw from :math:`\\sum (N-i) c_i`. +The probabilities are always :math:`1/N`, with the possible small but important exception of 'skipped' or 'dropped' reads, in which case the first read would -be more like :math:`2/(N+1)` and all the others :math:`1/(N+1)`. If the probabilities -were always 1/N, this looks vaguely like it could have a nice analytic -solution. Otherwise, I don't immediately see a route forward. So I am not -going to pursue this avenue further. +be more like :math:`2/(N+1)` and all the others :math:`1/(N+1)`. If the +probabilities were always 1/N, this looks vaguely like it could have a nice analytic +solution. We don't pursue this avenue further here. """ import numpy as np @@ -117,12 +113,12 @@ def validate_times(tij): - """Verify that a set of times tij for a valid resultant. + """Verify that a set of times tij is ascending. Parameters ---------- tij : list[list[float]] - a list of list of times at which each read in a resultant is performed + a list of lists of times at which each read in a resultant is performed Returns ------- @@ -139,7 +135,7 @@ def tij_to_pij(tij, remaining=False): The probabilities are those needed for sampling from a binomial distribution for each read. These are delta_t / sum(delta_t), the fraction of time in each read over the total time, when `remaining` is - False. When remaining is true, we scale these probabilities not by + False. When remaining is true, these probabilities are scaled not by the total time but by the remaining time, so that subsequent reads get subsequent reads get scaled up so that each pij is delta_t / time_remaining, and the last read always has pij = 1. @@ -147,7 +143,7 @@ def tij_to_pij(tij, remaining=False): Parameters ---------- tij : list[list[float]] - list of list of readout times for each read entering a resultant + list of lists of readout times for each read entering a resultant remaining : bool scale by remaining time rather than total time @@ -181,19 +177,13 @@ def apportion_counts_to_resultants( tstart=None, rng=None, seed=None): """Apportion counts to resultants given read times. - This finds a statistically appropriate assignment of counts to each + This finds a statistically appropriate assignment of electrons to each read composing a resultant, and averages the reads together to make the resultants. - There's an alternative approach where you have a count rate image and - need to do Poisson draws from it. That's easier, and isn't this function. - On some systems I've used Poisson draws have been slower than binomial - draws, so it's not clear that approach offers any advantages, either--- - though I've had mixed experience there. - We loop over the reads, each time sampling from the counts image according to the probability that a photon lands in that particular read. This - is just np.random.binomial(number of counts left, p/p_left) + is implemented via np.random.binomial(number of counts left, p/p_left) . We then average the reads together to get a resultant. @@ -207,14 +197,14 @@ def apportion_counts_to_resultants( Parameters ---------- - counts : np.ndarray[nx, ny] (int) + counts : np.ndarray[ny, nx] (int) The number of counts in each pixel from sources in the final image This final image should be a ~conceptual image of the scene observed by an idealized instrument seeing only backgrounds and sources and observing until the end of the last read; no instrumental effects are included beyond PSF & distortion. tij : list[list[float]] - list of list of readout times for each read entering a resultant + list of lists of readout times for each read entering a resultant inv_linearity : romanisim.nonlinearity.NL or None Object implementing inverse non-linearity correction crparam : dict @@ -341,11 +331,11 @@ def add_read_noise_to_resultants(resultants, tij, read_noise=None, rng=None, Parameters ---------- - resultants : np.ndarray[n_resultant, nx, ny] (float) + resultants : np.ndarray[n_resultant, ny, nx] (float) resultants array, giving each of n_resultant resultant images tij : list[list[float]] - list of list of readout times for each read entering a resultant - read_noise : float or np.ndarray[nx, ny] (float) + list of lists of readout times for each read entering a resultant + read_noise : float or np.ndarray[ny, nx] (float) read noise or read noise image for adding to resultants rng : galsim.BaseDeviate Random number generator to use @@ -357,7 +347,7 @@ def add_read_noise_to_resultants(resultants, tij, read_noise=None, rng=None, Returns ------- - np.ndarray[n_resultant, nx, ny] (float) + np.ndarray[n_resultant, ny, nx] (float) resultants with added read noise """ if rng is None and seed is None: @@ -402,11 +392,11 @@ def make_asdf(resultants, dq=None, filepath=None, metadata=None, persistence=Non Parameters ---------- - resultants : np.ndarray[n_resultant, nx, ny] (float) + resultants : np.ndarray[n_resultant, ny, nx] (float) resultants array, giving each of n_resultant resultant images filepath : str if not None, path of asdf file to L1 image into - dq : np.ndarray[n_resultant, nx, ny] (int) + dq : np.ndarray[n_resultant, ny, nx] (int) dq array flagging saturated / CR hit pixels Returns @@ -452,7 +442,7 @@ def read_pattern_to_tij(read_pattern): Returns ------- list[list[float]] - list of list of readout times for each read entering a resultant + list of lists of readout times for each read entering a resultant """ if isinstance(read_pattern, int): read_pattern = parameters.read_pattern[read_pattern] @@ -465,12 +455,12 @@ def add_ipc(resultants, ipc_kernel=None): Parameters ---------- - resultants : np.ndarray[n_resultant, nx, ny] - resultants describing scene + resultants : np.ndarray[n_resultant, ny, nx] + resultants describing a scene Returns ------- - np.ndarray[n_resultant, nx, ny] + np.ndarray[n_resultant, ny, nx] resultants with IPC """ # add in IPC @@ -490,28 +480,28 @@ def make_l1(counts, read_pattern, rng=None, seed=None, gain=None, inv_linearity=None, crparam=None, persistence=None, tstart=None, saturation=None): - """Make an L1 image from a counts image. + """Make an L1 image from a total electrons image. - This apportions the total counts among the different resultants and adds + This apportions the total electrons among the different resultants and adds some instrumental effects (linearity, IPC, CRs, persistence, ...). Parameters ---------- counts : galsim.Image - total counts delivered to each pixel + total electrons delivered to each pixel read_pattern : int or list[list] MA table number or list of lists giving indices of reads entering each resultant. - read_noise : np.ndarray[nx, ny] (float) or float + read_noise : np.ndarray[ny, nx] (float) or float Read noise entering into each read - pedestal_extra_noise : np.ndarray[nx, ny] (float) or float + pedestal_extra_noise : np.ndarray[ny, nx] (float) or float Extra noise entering into each pixel (i.e., degenerate with pedestal) rng : galsim.BaseDeviate Random number generator to use seed : int Seed for populating RNG. Only used if rng is None. gain : float or np.ndarray[float] - Gain (electrons / count) for converting counts to electrons + Gain (electrons / DN) for converting DN to electrons inv_linearity : romanisim.nonlinearity.NL or None Object describing the inverse non-linearity corrections. crparam : dict @@ -525,14 +515,14 @@ def make_l1(counts, read_pattern, Returns ------- l1, dq - l1: np.ndarray[n_resultant, nx, ny] + l1: np.ndarray[n_resultant, ny, nx] resultants image array including systematic effects - dq: np.ndarray[n_resultant, nx, ny] + dq: np.ndarray[n_resultant, ny, nx] DQ array marking saturated pixels and cosmic rays """ tij = read_pattern_to_tij(read_pattern) - log.info('Apportioning counts to resultants...') + log.info('Apportioning electrons to resultants...') resultants, dq = apportion_counts_to_resultants( counts.array, tij, inv_linearity=inv_linearity, crparam=crparam, persistence=persistence, tstart=tstart, diff --git a/romanisim/nonlinearity.py b/romanisim/nonlinearity.py index ff603cc0..8222caac 100644 --- a/romanisim/nonlinearity.py +++ b/romanisim/nonlinearity.py @@ -1,14 +1,22 @@ """Routines to handle non-linearity in simulating ramps. The approach taken here is straightforward. The detector is accumulating -photons, but the capacitance of the pixel varies with flux level and so -the mapping between accumulated photons and read-out digital numbers +electrons, but the capacitance of the pixel varies with flux level and so +the mapping between accumulated electrons and read-out digital numbers changes with flux level. The CRDS linearity and inverse-linearity reference files describe the mapping between linear DN and observed DN. This module -implements that mapping. When simulating an image, the photons entering +implements that mapping. When simulating an image, the electrons entering each pixel are simulated, and then before being "read out" into a buffer, -are transformed with this mapping into observed counts. These are then +are transformed with this mapping into observed electrons. These are then averaged and emitted as resultants. + +Note that there is an approximation happening here surrounding +the treatment of electrons vs. DN. During the simulation of the individual +reads, all operations, including linearity, work in electrons. Nevertheless +we apply non-linearity at this time, transforming electrons into "non-linear" +electrons using this module, which will be proportional to the final DN. Later +in the L1 simulation these "non-linear" electrons are divided by the gain to +construct final DN image. """ import numpy as np @@ -20,8 +28,8 @@ def repair_coefficients(coeffs, dq): """Fix cases of zeros and NaNs in non-linearity coefficients. This function replaces suspicious-looking non-linearity coefficients with - no-op coefficients from a non-linearity perspective; all coefficients are - zero except for the linear term, which is set to 1. + identity transformation coefficients from a non-linearity perspective; all + coefficients are zero except for the linear term, which is set to 1. This function doesn't try to make sure that the derivative of the correction is greater than 1, which we would expect for a non-linearity @@ -29,20 +37,20 @@ def repair_coefficients(coeffs, dq): Parameters ---------- - coeffs : np.ndarray[ncoeff, nx, ny] (float) + coeffs : np.ndarray[ncoeff, ny, nx] (float) Nonlinearity coefficients, starting with the constant term and increasing in power. - dq : np.ndarray[n_resultant, nx, ny] + dq : np.ndarray[n_resultant, ny, nx] Data Quality array Returns ------- - coeffs : np.ndarray[ncoeff, nx, ny] (float) + coeffs : np.ndarray[ncoeff, ny, nx] (float) "repaired" coefficients with NaNs and weird coefficients replaced with linear values with slopes of unity. - dq : np.ndarray[n_resultant, nx, ny] + dq : np.ndarray[n_resultant, ny, nx] DQ array marking pixels with improper non-linearity coefficients """ res = coeffs.copy() @@ -61,19 +69,18 @@ def repair_coefficients(coeffs, dq): def evaluate_nl_polynomial(counts, coeffs, reversed=False): - """Correct the observed counts for non-linearity. + """Correct the observed DN for non-linearity. - As photons arrive, they make it harder for the device to count - future photons due to classical non-linearity. This function - converts some observed counts to what would have been seen absent - non-linearity given some non-linearity corrections described by - polynomials with given coefficients. + As electrons accumulate, they make it harder for the device to count + future electrons due to classical non-linearity. This function + converts observed DN to what would have been seen absent + non-linearity, using the provided non-linearity coefficients. Parameters ---------- - counts : np.ndarray[nx, ny] (float) - Number of counts already in pixel - coeffs : np.ndarray[ncoeff, nx, ny] (float) + counts : np.ndarray[ny, nx] (float) + Number of DN already in pixel + coeffs : np.ndarray[ncoeff, ny, nx] (float) Coefficients of the non-linearity correction polynomials reversed : bool If True, the coefficients are in reversed order, which is the @@ -83,7 +90,7 @@ def evaluate_nl_polynomial(counts, coeffs, reversed=False): Returns ------- corrected : np.ndarray[nx, ny] (float) - The corrected number of counts + The corrected number of DN """ if reversed: cc = coeffs @@ -120,7 +127,7 @@ def __init__(self, coeffs, dq=None, gain=None): Data Quality array gain : float or np.ndarray[float] - Gain (electrons / count) for converting counts to electrons + Gain (electrons / DN) for converting DN to electrons """ if dq is None: dq = np.zeros(coeffs.shape[1:], dtype='uint32') @@ -130,7 +137,11 @@ def __init__(self, coeffs, dq=None, gain=None): self.gain = gain def apply(self, counts, electrons=False, reversed=False): - """Compute the correction of observed to true counts + """Compute the correction of DN to linearized DN. + + Alternatively, when electrons = True, rescale these to DN, + correct the DN, and scale them back to electrons using + the gain. Parameters ---------- @@ -139,7 +150,7 @@ def apply(self, counts, electrons=False, reversed=False): electrons : bool Set to True for 'counts' being in electrons, with coefficients - designed for DN. Accrdingly, the gain needs to be removed and + designed for DN. Accordingly, the gain needs to be removed and reapplied. reversed : bool @@ -150,7 +161,7 @@ def apply(self, counts, electrons=False, reversed=False): Returns ------- corrected : np.ndarray[nx, ny] (float) -¯ The corrected counts. + The corrected DN or electrons. """ if electrons: return self.gain * evaluate_nl_polynomial(counts / self.gain, self.coeffs, reversed) diff --git a/romanisim/parameters.py b/romanisim/parameters.py index 5728a86c..0a09b888 100644 --- a/romanisim/parameters.py +++ b/romanisim/parameters.py @@ -1,4 +1,4 @@ -"""Parameters class storing a few useful constants for Roman simulations. +"""Parameters class storing useful constants for Roman simulations. """ import numpy as np diff --git a/romanisim/persistence.py b/romanisim/persistence.py index 28f4d5bd..fce8aebd 100644 --- a/romanisim/persistence.py +++ b/romanisim/persistence.py @@ -118,7 +118,8 @@ def current(self, tnow): Returns ------- - Current in electron / s in pixels due to past persistence events. + current + current in electron / s in pixels due to past persistence events. """ if np.ndim(self.t) > 0 and len(self.t) > 0 and (tnow < np.min(self.t)): raise ValueError( @@ -161,7 +162,8 @@ def to_dict(self): Returns ------- - dictionary representing persistence object. + crdict : dict + dictionary representing persistence object. """ return dict(x=self.x, t=self.t, index=self.index, A=self.A, x0=self.x0, dx=self.dx, alpha=self.alpha, gamma=self.gamma) @@ -177,7 +179,8 @@ def from_dict(d): Returns ------- - Persistence object represented by d. + persistence : Persistence + Persistence object represented by d. """ return Persistence(**d) @@ -192,7 +195,8 @@ def read(filename): Returns ------- - Persistence object stored in filename. + persistence : Persistence + Persistence object stored in filename. """ af = asdf.open(filename) persistdict = af['romanisim']['persistence'] @@ -215,7 +219,7 @@ def fermi(x, dt, A, x0, dx, alpha, gamma): """ The Fermi model for persistence: A * (x/x0)**alpha * (t/1000.)**(-gamma) / (exp(-(x-x0)/dx) + 1) - For influence level below the half well, the persistence is linear in x. + For fluence level below the half well, the persistence is linear in x. Parameters ---------- @@ -236,8 +240,9 @@ def fermi(x, dt, A, x0, dx, alpha, gamma): Returns ------- - The persistence signal at the current time for the persistence-affected pixels - described by persistence. + persistence + The persistence signal at the current time for the persistence-affected + pixels described by persistence. """ scalar = np.isscalar(x) x = np.atleast_1d(x) diff --git a/romanisim/psf.py b/romanisim/psf.py index db104a85..ee869b2c 100644 --- a/romanisim/psf.py +++ b/romanisim/psf.py @@ -1,19 +1,17 @@ -"""Roman PSF interface for galsim. +"""Roman point spread function (PSF) interface for galsim. -galsim.roman has an implementation of Roman's PSF based on the aperture and -some estimates for the wavefront errors over the aperture as described by -amplitudes of various Zernicke modes. This seems like a very good approach, -but we want to add here a mode using the official PSFs coming out of +galsim.roman has an implementation of Roman's point spread function (PSF) based on +the aperture and some estimates for the wavefront errors over the aperture as +described by amplitudes of various Zernicke modes. This seems like a very good +approach, but we want to add here a mode using the official PSFs coming out of webbpsf, which takes a very similar overall approach. galsim's InterpolatedImage class makes this straightforward. Future work should consider the following: -* how do we want to deal with the dependence of the PSF on the source SED? - It's possible I can just subclass ChromaticObject and implement - evaluateAtWavelength, possibly also stealing the _shoot code from - ChromaticOpticalPSF? - +* How do we want to deal with the dependence of the PSF on the source SED? + It's possible we can just subclass ChromaticObject and implement + evaluateAtWavelength, using the _shoot code from ChromaticOpticalPSF. """ import numpy as np @@ -132,7 +130,7 @@ def make_psf(sca, filter_name, wcs=None, webbpsf=True, pix=None, """Make a PSF profile for Roman. Optionally supports spatially variable PSFs via interpolation between - the four corners. + the four corners of an SCA. Parameters ---------- diff --git a/romanisim/ramp.py b/romanisim/ramp.py index 6ed10bb3..3e5fe6c3 100644 --- a/romanisim/ramp.py +++ b/romanisim/ramp.py @@ -196,7 +196,7 @@ def ki_and_variance_grid(read_pattern, flux_on_readvar_pts): flux_on_readvar. The :math:`k` and corresponding covariances needed to do ramp fitting - form essentially a one dimensional family in the flux in the ramp divided + essentially form a one dimensional family in the flux in the ramp divided by the square of the read noise. This function constructs these quantities for a large number of different flux / read_noise^2 to be used in interpolation. @@ -247,10 +247,10 @@ class RampFitInterpolator: pixels, the ramp fitting parameters are just a linear combination of the resultants. The weights of this linear combination are a single parameter family in the flux in the ramp divided by the read variance. So rather than - explicitly calculating those weights for each pixel, we can up front calculate - them over a grid in the flux over the read variance, and interpolate off that - grid for each point. That can all be done in a vectorized way, allowing one - to avoid doing something like a matrix inverse for each of a 16 million pixels. + explicitly calculating those weights for each pixel, they can be calculated + up front over a grid in the flux over the read variance. Then + we can interpolate among these precomputed values to avoid needing to perform + a matrix inverse for each of the 16 million pixels. The tool pre-calculates the grid and interpolators it needs at initialization, and then uses the results of that calculation when invoked to get the weights @@ -364,7 +364,7 @@ def fit_ramps(self, resultants, read_noise, fluxest=None): Parameters ---------- - resultants : np.ndarray[n_resultants, nx, ny] (numeric) + resultants : np.ndarray[n_resultants, ny, nx] (numeric) Resultants to fit read_noise : float or array_like like resultants read noise in array @@ -375,10 +375,10 @@ def fit_ramps(self, resultants, read_noise, fluxest=None): Returns ------- - par : np.ndarray[nx, ny, 2] (float) + par : np.ndarray[ny, nx, 2] (float) the best fit pedestal and slope for each pixel - var : np.ndarray[nx, ny, 3, 2, 2] (float) + var : np.ndarray[ny, nx, 3, 2, 2] (float) the covariance matrix of par, for each of three noise terms: the read noise, Poisson source noise, and total noise. """ @@ -400,18 +400,18 @@ def fit_ramps(self, resultants, read_noise, fluxest=None): def resultants_to_differences(resultants): """Convert resultants to their finite differences. - This is essentially np.diff(...), but retains the first + This essentially is np.diff(...), but retains the first resultant. The resulting structure has tri-diagonal covariance, which can be a little useful. Parameters ---------- - resultants : np.ndarray[n_resultant, nx, ny] (float) + resultants : np.ndarray[n_resultant, ny, nx] (float) The resultants Returns ------- - differences : np.ndarray[n_resultant, nx, ny] (float) + differences : np.ndarray[n_resultant, ny, nx] (float) Differences of resultants """ return np.vstack([resultants[0][None, :], @@ -467,7 +467,7 @@ def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, read_pattern=None): def fit_ramps_casertano(resultants, dq, read_noise, read_pattern): - """Fit ramps following Casertano+2022, including averaging partial ramps. + """Fit ramps following Casertano (2022), including averaging partial ramps. Ramps are broken where dq != 0, and fits are performed on each sub-ramp. Resultants containing multiple ramps have their ramp fits averaged using @@ -561,7 +561,7 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_pattern): def fit_ramps_casertano_no_dq(resultants, read_noise, read_pattern): - """Fit ramps following Casertano+2022, only using full ramps. + """Fit ramps following Casertano (2022), only using full ramps. This is a simpler implementation of fit_ramps_casertano, which doesn't address the case of partial ramps broken by CRs. This case is easier @@ -579,9 +579,9 @@ def fit_ramps_casertano_no_dq(resultants, read_noise, read_pattern): Returns ------- - par : np.ndarray[nx, ny, 2] (float) + par : np.ndarray[ny, nx, 2] (float) the best fit pedestal and slope for each pixel - var : np.ndarray[nx, ny, 3, 2, 2] (float) + var : np.ndarray[ny, nx, 3, 2, 2] (float) the covariance matrix of par, for each of three noise terms: the read noise, Poisson source noise, and total noise. """ diff --git a/romanisim/util.py b/romanisim/util.py index 69df7afd..b7e6e046 100644 --- a/romanisim/util.py +++ b/romanisim/util.py @@ -22,7 +22,7 @@ def skycoord(celestial): Returns ------- - astropy.coordinates.SkyCoord + coord: astropy.coordinates.SkyCoord SkyCoord corresponding to celestial """ if isinstance(celestial, SkyCoord): @@ -39,11 +39,11 @@ def celestialcoord(sky): Parameters ---------- sky : astropy.coordinates.SkyCoord - astropy.coordinates.SkyCoord to transform into an galsim.CelestialCoord + astropy.coordinates.SkyCoord to transform into a galsim.CelestialCoord Returns ------- - galsim.CelestialCoord + coord: galsim.CelestialCoord CelestialCoord corresponding to skycoord """ if isinstance(sky, galsim.CelestialCoord): @@ -65,7 +65,7 @@ def scalergb(rgb, scales=None, lumrange=None): Parameters ---------- - rgb : np.ndarray[npix, npix, 3] + rgb : np.ndarray[ny, nx, 3] the RGB images to scale scales : list[float] (must contain 3 floats) rescale each image by this amount @@ -74,7 +74,7 @@ def scalergb(rgb, scales=None, lumrange=None): Returns ------- - im : np.ndarray[npix, npix, 3] + im : np.ndarray[ny, nx, 3] scaled RGB image suitable for displaying """ @@ -109,7 +109,8 @@ def random_points_in_cap(coord, radius, nobj, rng=None): Returns ------- - astropy.coordinates.SkyCoord + coords : astropy.coordinates.SkyCoord + Coordinates selected at random in cap. """ if rng is None: rng = galsim.UniformDeviate() @@ -138,7 +139,8 @@ def random_points_in_king(coord, rc, rt, nobj, rng=None): Returns ------- - astropy.coordinates.SkyCoord + coords : astropy.coordinates.SkyCoord + sample coordinates selected """ distances = sample_king_distances(rt, rt, nobj, rng=rng) * u.deg return random_points_at_radii(coord, distances, rng=rng) @@ -158,8 +160,8 @@ def random_points_at_radii(coord, radii, rng=None): Returns ------- - astropy.coordinates.SkyCoord - + coords : astropy.coordinates.SkyCoord + random coordinates selected """ if rng is None: rng = galsim.UniformDeviate() @@ -264,8 +266,8 @@ def add_more_metadata(metadata): def update_aperture_and_wcsinfo_metadata(metadata, gwcs): """Update aperture and wcsinfo keywords to use the aperture for this SCA. - Updates metadata in place, setting v2v3ref to be equal to the v2 and v3 of - the center of the detector, and radecref accordingly. Also updates the + Updates metadata in place, setting v2/v3_ref to be equal to the V2 and V3 of + the center of the detector, and ra/dec_ref accordingly. Also updates the aperture to refer to this SCA. No updates are performed if gwcs is not a gWCS object or if aperture and @@ -314,7 +316,7 @@ def update_aperture_and_wcsinfo_metadata(metadata, gwcs): def king_profile(r, rc, rt): - """Compute the King (1962) profile. + """Compute the King profile. Parameters ---------- @@ -327,13 +329,14 @@ def king_profile(r, rc, rt): Returns ------- + rho : np.ndarray[float] 2D number density of stars at r. """ return (1 / np.sqrt(1 + (r / rc)**2) - 1 / np.sqrt(1 + (rt / rc)**2))**2 def sample_king_distances(rc, rt, npts, rng=None): - """Sample distances from a King (1962) profile. + """Sample distances from a King profile. Parameters ---------- @@ -371,7 +374,7 @@ def decode_context_times(context, exptimes): Parameters ---------- context: numpy.ndarray - A 3D `~numpy.ndarray` of integral data type. + A 3D numpy.ndarray of integral data type. exptimes: list of floats Exposure times for each component image. @@ -379,7 +382,7 @@ def decode_context_times(context, exptimes): Returns ------- - total_exptimes: numpy.ndarray + total_exptimes : numpy.ndarray A 2D array of total exposure time for each pixel. """ @@ -466,7 +469,8 @@ def default_image_meta(time=None, ma_table=1, filter_name='F087', Returns ------- - Metadata dictionary corresponding to input parameters. + meta : dict + Metadata dictionary corresponding to input parameters. """ if time is None: @@ -564,7 +568,8 @@ def merge_dicts(a, b): Returns ------- - a, mutated to contain keys from b. + a : dict + a, mutated to contain keys from b. """ for key in b: if key in a and isinstance(a[key], dict) and isinstance(b[key], dict): diff --git a/romanisim/wcs.py b/romanisim/wcs.py index 0ee540e5..2941f56e 100644 --- a/romanisim/wcs.py +++ b/romanisim/wcs.py @@ -1,7 +1,7 @@ """Roman WCS interface for galsim. galsim.roman has an implementation of Roman's WCS based on some SIP -coefficients for each SCA. This is presumably plenty good, but here +coefficients for each SCA. This could presumably be adequate, but here we take the alternative approach of using the distortion functions provided in CRDS. These naturally are handled by the gWCS library, but galsim only naturally supports astropy WCSes via the ~legacy @@ -41,18 +41,19 @@ def fill_in_parameters(parameters, coord, pa_aper=0, boresight=True): Parameters ---------- parameters : dict - Metadata dictionary - Dictionaries like pointing, aperture, and wcsinfo may be modified + Metadata dictionary. Dictionaries like pointing, aperture, and + wcsinfo may be modified coord : astropy.coordinates.SkyCoord or galsim.CelestialCoord - world coordinates at V2 / V3 ref (boresight or center of WFI CCDs) + world coordinates at V2 / V3 ref (boresight or center of WFI SCAs) pa_aper : float - position angle (North to YIdl) at the aperture V2Ref/V3Ref + position angle (North relative to the idealized Y axis) at the aperture + V2Ref/V3Ref boresight : bool whether coord is the telescope boresight (V2 = V3 = 0) or the center of - the WFI CCD array + the WFI SCA array """ coord = util.skycoord(coord) @@ -91,7 +92,7 @@ def fill_in_parameters(parameters, coord, pa_aper=0, boresight=True): def get_wcs(image, usecrds=True, distortion=None): - """Get a WCS object for a given sca or set of CRDS parameters. + """Get a WCS object for a given SCA or set of CRDS parameters. Parameters ---------- @@ -415,7 +416,7 @@ def convert_wcs_to_gwcs(wcs): def get_mosaic_wcs(mosaic, shape=None, xpos=None, ypos=None, coord=None): - """Get a WCS object for a given sca or set of CRDS parameters. + """Get a WCS object for a given SCA or set of CRDS parameters. - if xpos, ypos, and coords are provided, then a GWCS compatible object will be created (and meta updated with it) - if not, a functional CelestialWCS is created [useful for quick computation, but GWCS needed for validation of a final simulation]