diff --git a/config/config.example.yaml b/config/config.example.yaml
index 0351a59f..06b48cad 100644
--- a/config/config.example.yaml
+++ b/config/config.example.yaml
@@ -11,16 +11,9 @@ db_path: ~/.iop4data/iop4.db # Path to iop4 sqlite database file.
astrometry_cache_path: ~/.astrometry_cache/ #
Path to store the astromery index files.
max_concurrent_threads: 4 # Number of threads / processes to use (e.g. 4).
astrometry_timeout: 20 # Timeout in minutes for astrometry solving.
-
-###################
-### RAY CLUSTER ###
-###################
-
-ray_use_cluster: False # True/False (use ray for parallelization), let it to false if you have not configured it.
-ray_cluster_address: null # Aaddress for the cluster, needs ssh keys for current user; e.g. 'user@address'.
-ray_cluster_config: null # Path to ray cluster config file, e.g. /path/to/iop4/priv.rayconfig.yaml'.
-ray_db_path: null # Path in ray cluster, e.g. '~/iop4data/iop4.db'.
-ray_datadir: null # Path in ray cluster, e.g. '~/iop4data/'.
+astrometry_allsky_allow: false # Whether to allow all sky searches in the astrometry.net solver. If there are many all-sky images and max_concurrent_threads is too high, it might cause
+# very high memory consumption.
+astrometry_allsky_septhreshold: 25 # Threshold of detected poinint mismatch to trigger an all-sky search.
################
### GRAPHICS ###
diff --git a/iop4admin/modeladmins/fitfile.py b/iop4admin/modeladmins/fitfile.py
index fed84834..342ef1d7 100644
--- a/iop4admin/modeladmins/fitfile.py
+++ b/iop4admin/modeladmins/fitfile.py
@@ -23,6 +23,9 @@
logger = logging.getLogger(__name__)
class AdminFitFile(admin.ModelAdmin):
+
+ list_per_page = 25
+ list_max_show_all = 200
@admin.display(description='TELESCOPE', ordering='epoch__telescope')
def telescope(self, obj):
diff --git a/iop4admin/modeladmins/rawfit.py b/iop4admin/modeladmins/rawfit.py
index d1d751da..84dc22a6 100644
--- a/iop4admin/modeladmins/rawfit.py
+++ b/iop4admin/modeladmins/rawfit.py
@@ -18,7 +18,6 @@ class AdminRawFit(AdminFitFile):
readonly_fields = [field.name for field in RawFit._meta.fields]
search_fields = ['id', 'filename', 'epoch__telescope', 'epoch__night']
ordering = ['-epoch__night','-epoch__telescope']
- list_per_page = 25
list_filter = (
RawFitIdFilter,
RawFitTelescopeFilter,
diff --git a/iop4admin/modeladmins/reducedfit.py b/iop4admin/modeladmins/reducedfit.py
index 743e1a2b..5d778300 100644
--- a/iop4admin/modeladmins/reducedfit.py
+++ b/iop4admin/modeladmins/reducedfit.py
@@ -22,7 +22,6 @@ class AdminReducedFit(AdminFitFile):
readonly_fields = [field.name for field in ReducedFit._meta.fields]
search_fields = ['id', 'filename', 'epoch__telescope', 'epoch__night', 'sources_in_field__name']
ordering = ['-epoch__night', '-epoch__telescope', '-juliandate']
- list_per_page = 25
list_filter = (
RawFitIdFilter,
RawFitTelescopeFilter,
diff --git a/iop4admin/templates/iop4admin/view_fitdetails.html b/iop4admin/templates/iop4admin/view_fitdetails.html
index 214739ad..853d54ac 100644
--- a/iop4admin/templates/iop4admin/view_fitdetails.html
+++ b/iop4admin/templates/iop4admin/view_fitdetails.html
@@ -118,13 +118,13 @@ Reduction information:
Calibration Frames
@@ -315,6 +315,8 @@ Header {{forloop.counter }}:
(scroll down)
#img_preview_range_form label {
text-align: right;
+ display: flex;
+ align-items: center;
}
/* header div */
diff --git a/iop4lib/config.py b/iop4lib/config.py
index 660c37f5..24c79dc1 100644
--- a/iop4lib/config.py
+++ b/iop4lib/config.py
@@ -2,9 +2,24 @@
import os, pathlib, yaml, logging
import matplotlib, matplotlib.pyplot
+
+# Disable matplotlib logging except for warnings and above
+
matplotlib.pyplot.set_loglevel('warning')
+# Set journal_mode=WAL and synchronous=NORMAL for sqlite3 databases on
+# connection, to improve support for concurrent write access to the
+# database during parallel reduction
+
+from django.db.backends.signals import connection_created
+from django.dispatch.dispatcher import receiver
+@receiver(connection_created)
+def enable_wal_sqlite(sender, connection, **kwargs) -> None:
+ if connection.vendor == "sqlite":
+ with connection.cursor() as cursor:
+ cursor.execute('PRAGMA synchronous=NORMAL;')
+ cursor.execute('PRAGMA journal_mode=WAL;')
class Config(dict):
r""" Configuration class for IOP4.
diff --git a/iop4lib/db/aperphotresult.py b/iop4lib/db/aperphotresult.py
index b8d7beb7..a539ed65 100644
--- a/iop4lib/db/aperphotresult.py
+++ b/iop4lib/db/aperphotresult.py
@@ -26,6 +26,8 @@ class AperPhotResult(models.Model):
reducedfit = models.ForeignKey("ReducedFit", on_delete=models.CASCADE, related_name='aperphotresults', help_text="The ReducedFit this AperPhotResult has been computed for.")
astrosource = models.ForeignKey("AstroSource", on_delete=models.CASCADE, related_name='aperphotresults', help_text="The AstroSource this AperPhotResult has been computed for.")
aperpix = models.FloatField(null=True, blank=True)
+ r_in = models.FloatField(null=True, blank=True)
+ r_out = models.FloatField(null=True, blank=True)
pairs = models.TextField(null=True, blank=True, choices=PAIRS.choices, help_text="Whether this AperPhotResult is for the Ordinary or Extraordinary pair.")
## photometry results
@@ -46,7 +48,7 @@ class Meta:
verbose_name = 'Aperture Photometry Result'
verbose_name_plural = 'Aperture Photometry Results'
constraints = [
- models.UniqueConstraint(fields=['reducedfit', 'astrosource', 'aperpix', 'pairs'], name='unique_aperphotresult')
+ models.UniqueConstraint(fields=['reducedfit', 'astrosource', 'aperpix', 'r_in', 'r_out', 'pairs'], name='unique_aperphotresult')
]
# repr and str
diff --git a/iop4lib/db/epoch.py b/iop4lib/db/epoch.py
index f968dc15..d8f45ceb 100644
--- a/iop4lib/db/epoch.py
+++ b/iop4lib/db/epoch.py
@@ -27,7 +27,7 @@
from iop4lib.instruments import Instrument
from .fields import FlagChoices, FlagBitField
from iop4lib.utils import get_mem_parent_from_child, get_total_mem_from_child, get_mem_current, get_mem_children
-from iop4lib.utils.parallel import epoch_bulkreduce_multiprocesing, epoch_bulkreduce_ray
+from iop4lib.utils.parallel import epoch_bulkreduce_multiprocesing
# logging
@@ -478,9 +478,7 @@ def reduce_reducedfits(reduced_L, epoch=None):
"""
if len(reduced_L) > 0:
- if iop4conf.ray_use_cluster:
- epoch_bulkreduce_ray(reduced_L)
- elif iop4conf.max_concurrent_threads > 1:
+ if iop4conf.max_concurrent_threads > 1:
epoch_bulkreduce_multiprocesing(reduced_L, epoch=epoch)
else:
epoch_bulkreduce_onebyone(reduced_L, epoch=epoch)
@@ -500,17 +498,29 @@ def by_epochname(cls, epochname):
- def compute_relative_photometry(self):
+ def compute_relative_photometry(self, redf_qs=None):
+ """ Computes the relative photometry results for this epoch.
+
+ Parameters
+ ----------
+ redf_qs : QuerySet, optional
+ If provided, the relative photometry will be computed only for the ReducedFit objects in the QuerySet.
+ If not provided, the relative photometry will be computed for all ReducedFit objects in the epoch.
+ """
+
from .reducedfit import ReducedFit
- redf_qs = ReducedFit.objects.filter(epoch=self, obsmode=OBSMODES.PHOTOMETRY, flags__has=ReducedFit.FLAGS.BUILT_REDUCED).order_by('juliandate').all()
+ if redf_qs is None:
+ redf_qs = ReducedFit.objects.filter(epoch=self, obsmode=OBSMODES.PHOTOMETRY, flags__has=ReducedFit.FLAGS.BUILT_REDUCED).order_by('juliandate').all()
+ else:
+ redf_qs = redf_qs.filter(epoch=self, obsmode=OBSMODES.PHOTOMETRY, flags__has=ReducedFit.FLAGS.BUILT_REDUCED).order_by('juliandate').all()
for redf in redf_qs:
redf.compute_relative_photometry()
- def make_polarimetry_groups(self):
+ def make_polarimetry_groups(self, redf_qs=None):
"""
To reduce the polarimetry data, we need to group the reduced fits corresponding to rotated polarization angles.
@@ -525,7 +535,10 @@ def make_polarimetry_groups(self):
from .reducedfit import ReducedFit
- redf_qs = ReducedFit.objects.filter(epoch=self, obsmode=OBSMODES.POLARIMETRY, flags__has=ReducedFit.FLAGS.BUILT_REDUCED).order_by('juliandate').all()
+ if redf_qs is None:
+ redf_qs = ReducedFit.objects.filter(epoch=self, obsmode=OBSMODES.POLARIMETRY, flags__has=ReducedFit.FLAGS.BUILT_REDUCED).order_by('juliandate').all()
+ else:
+ redf_qs = redf_qs.filter(epoch=self, obsmode=OBSMODES.POLARIMETRY, flags__has=ReducedFit.FLAGS.BUILT_REDUCED).order_by('juliandate').all()
# Create a groups with the same keys (object, band, exptime)
@@ -592,23 +605,38 @@ def make_polarimetry_groups(self):
return split_groups, split_groups_keys
- def compute_relative_polarimetry(self, *args, **kwargs):
+ def compute_relative_polarimetry(self, redf_qs=None):
+ """Computes the relative polarimetry results for this epoch.
+
+ Parameters
+ ----------
+ redf_qs : QuerySet, optional
+ If provided, the relative polarimetry will be computed only for the ReducedFit objects in the QuerySet.
+ If not provided, the relative polarimetry will be computed for all ReducedFit objects in the epoch.
+ """
+
from .reducedfit import ReducedFit
- clusters_L, groupkeys_L = self.make_polarimetry_groups()
+ clusters_L, groupkeys_L = self.make_polarimetry_groups(redf_qs=redf_qs)
logger.info(f"{self}: computing relative polarimetry over {len(groupkeys_L)} polarimetry groups.")
logger.debug(f"{self}: {groupkeys_L=}")
- f = lambda x: Instrument.by_name(x[1]['instrument']).compute_relative_polarimetry(x[0], *args, **kwargs)
-
- for i, (group, keys) in enumerate(zip(clusters_L, groupkeys_L)):
- try:
- Instrument.by_name(keys['instrument']).compute_relative_polarimetry(group, *args, **kwargs)
- except Exception as e:
- logger.error(f"{self}: error computing relative polarimetry for group n {i} {keys}: {e}")
- finally:
- logger.info(f"{self}: computed relative polarimetry for group n {i} {keys}.")
+ f = lambda x: Instrument.by_name(x[1]['instrument']).compute_relative_polarimetry(x[0])
+
+ if iop4conf.max_concurrent_threads > 1:
+ # parallel
+ from iop4lib.utils.parallel import parallel_relative_polarimetry
+ parallel_relative_polarimetry(groupkeys_L, clusters_L)
+ else:
+ # one by one
+ for i, (group, keys) in enumerate(zip(clusters_L, groupkeys_L)):
+ try:
+ Instrument.by_name(keys['instrument']).compute_relative_polarimetry(group)
+ except Exception as e:
+ logger.error(f"{self}: error computing relative polarimetry for group n {i} {keys}: {e}")
+ finally:
+ logger.info(f"{self}: computed relative polarimetry for group n {i} {keys}.")
diff --git a/iop4lib/db/fitfilemodel.py b/iop4lib/db/fitfilemodel.py
index a2b83c29..60fa3863 100644
--- a/iop4lib/db/fitfilemodel.py
+++ b/iop4lib/db/fitfilemodel.py
@@ -168,7 +168,7 @@ def get_imgbytes_preview_image(self, force_rebuild=False, **kwargs):
fpath = os.path.join(self.filedpropdir, "img_preview_image.png")
if len(kwargs) == 0 and not force_rebuild:
- if os.path.isfile(fpath):
+ if os.path.isfile(fpath) and os.path.getmtime(self.filepath) < os.path.getmtime(fpath):
# logger.debug(f"Loading image preview for {self.fileloc} from disk")
with open(fpath, 'rb') as f:
return f.read()
@@ -191,6 +191,20 @@ def get_imgbytes_preview_image(self, force_rebuild=False, **kwargs):
fig = mplt.figure.Figure(figsize=(width/100, height/100), dpi=iop4conf.mplt_default_dpi)
ax = fig.subplots()
ax.imshow(imgdata, cmap=cmap, origin='lower', norm=norm)
+ try:
+ from iop4lib.db import ReducedFit, AstroSource
+ from iop4lib.enums import SRCTYPES
+ # If it is a astro calibrated reduced fit, mark the src position
+ if self.has_flag(ReducedFit.FLAGS.BUILT_REDUCED):
+ if (target_src := self.sources_in_field.exclude(srctype=SRCTYPES.CALIBRATOR).get()) is not None:
+ target_pos_px = target_src.coord.to_pixel(self.wcs1)
+ ax.axhline(y=target_pos_px[1], color='r', linestyle="--", linewidth=1)
+ ax.axvline(x=target_pos_px[0], color='r', linestyle="--", linewidth=1)
+ except Exception as e:
+ # it can fail if there is any problem with the fit calibration (e.g. in early versions the wcs
+ # was not saved into key A, but we still want to be able to explore the data in the admin).
+ logger.warning(f"Coudl not mark the target source position on ReducedFit {self.pk}: {e}")
+
ax.axis('off')
fig.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
fig.clf()
@@ -230,7 +244,7 @@ def get_imgbytes_preview_histogram(self, force_rebuild=False, **kwargs):
fpath = os.path.join(self.filedpropdir, "img_preview_histogram.png")
if len(kwargs) == 0 and not force_rebuild:
- if os.path.isfile(fpath):
+ if os.path.isfile(fpath) and os.path.getmtime(self.filepath) < os.path.getmtime(fpath):
#logger.debug(f"Loading preview histogram for {self.fileloc} from disk")
with open(fpath, 'rb') as f:
return f.read()
diff --git a/iop4lib/db/photopolresult.py b/iop4lib/db/photopolresult.py
index 3ba10fc2..df0c09fe 100644
--- a/iop4lib/db/photopolresult.py
+++ b/iop4lib/db/photopolresult.py
@@ -188,7 +188,7 @@ def create(cls, reducedfits, astrosource, reduction, **kwargs):
logger.debug(f'Creating DB entry photopolresult for {reducedfits=}, {astrosource=} and {reduction=}')
photopolresult = cls(**kwargs)
photopolresult.save() # we need to save it to use the manytomany field
- photopolresult.reducedfits.set(reducedfits, through_defaults={'astrosource': astrosource, 'reduction': reduction})
+ photopolresult.reducedfits.set(reducedfits, through_defaults={'astrosource': astrosource, 'reduction': reduction}, clear=True)
photopolresult.save()
else:
logger.debug(f'Db entry for photopolresult already exists for {reducedfits=}, {astrosource=} and {reduction=}, using it instead.')
@@ -292,4 +292,5 @@ def save(self, *args, **kwargs):
def reducedfits_on_change(sender, instance, action, **kwargs):
""" Updates automatically filled fields when -after- reducedfits are altered."""
if action.startswith('post_'):
- instance.update_fields()
+ if instance.reducedfits.count() > 0:
+ instance.update_fields()
diff --git a/iop4lib/db/rawfit.py b/iop4lib/db/rawfit.py
index 699a7259..97b118b3 100644
--- a/iop4lib/db/rawfit.py
+++ b/iop4lib/db/rawfit.py
@@ -282,7 +282,10 @@ def procure_local_file(self):
if self.fileexists:
if iop4conf.set_rawdata_readonly:
- os.chmod(self.filepath, stat.S_IREAD)
+ # this will remove the write permission from the file for all users
+ current_perms = os.stat(self.filepath).st_mode
+ new_perms = current_perms & (~stat.S_IWUSR) & (~stat.S_IWGRP) & (~stat.S_IWOTH)
+ os.chmod(self.filepath, new_perms)
if self.auto_classify:
self.classify()
diff --git a/iop4lib/db/reducedfit.py b/iop4lib/db/reducedfit.py
index cdcca543..aecace94 100644
--- a/iop4lib/db/reducedfit.py
+++ b/iop4lib/db/reducedfit.py
@@ -207,8 +207,8 @@ def header_hintcoord(self):
def header_hintobject(self):
return self.rawfit.header_hintobject
- def get_astrometry_position_hint(self, allsky=False, n_field_width=1.5):
- return Instrument.by_name(self.instrument).get_astrometry_position_hint(self.rawfit, allsky=allsky, n_field_width=n_field_width)
+ def get_astrometry_position_hint(self, allsky=False, n_field_width=1.5, hintsep=None):
+ return Instrument.by_name(self.instrument).get_astrometry_position_hint(self.rawfit, allsky=allsky, n_field_width=n_field_width, hintsep=hintsep)
def get_astrometry_size_hint(self):
return Instrument.by_name(self.instrument).get_astrometry_size_hint(self.rawfit)
diff --git a/iop4lib/instruments/andor_cameras.py b/iop4lib/instruments/andor_cameras.py
index 54322ad4..73c505f7 100644
--- a/iop4lib/instruments/andor_cameras.py
+++ b/iop4lib/instruments/andor_cameras.py
@@ -149,17 +149,28 @@ def get_header_hintcoord(cls, rawfit):
return hint_coord
@classmethod
- def get_astrometry_position_hint(cls, rawfit, allsky=False, n_field_width=1.5):
- """ Get the position hint from the FITS header as an astrometry.PositionHint."""
+ def get_astrometry_position_hint(cls, rawfit, allsky=False, n_field_width=1.5, hintsep=None):
+ """ Get the position hint from the FITS header as an astrometry.PositionHint.
+
+ Parameters
+ ----------
+ allsky: bool, optional
+ If True, the hint will cover the whole sky, and n_field_width and hintsep will be ignored.
+ n_field_width: float, optional
+ The search radius in units of field width. Default is 1.5.
+ hintsep: Quantity, optional
+ The search radius in units of degrees.
+ """
hintcoord = cls.get_header_hintcoord(rawfit)
if allsky:
- hintsep = 180.0
+ hintsep = 180.0 * u.deg
else:
- hintsep = (n_field_width * cls.field_width_arcmin*u.Unit("arcmin")).to_value(u.deg)
+ if hintsep is None:
+ hintsep = (n_field_width * cls.field_width_arcmin*u.Unit("arcmin"))
- return astrometry.PositionHint(ra_deg=hintcoord.ra.deg, dec_deg=hintcoord.dec.deg, radius_deg=hintsep)
+ return astrometry.PositionHint(ra_deg=hintcoord.ra.deg, dec_deg=hintcoord.dec.deg, radius_deg=hintsep.to_value("deg"))
@classmethod
def get_astrometry_size_hint(cls, rawfit):
diff --git a/iop4lib/instruments/cafos.py b/iop4lib/instruments/cafos.py
index 8c52816a..3a0c5128 100644
--- a/iop4lib/instruments/cafos.py
+++ b/iop4lib/instruments/cafos.py
@@ -116,17 +116,28 @@ def get_header_hintcoord(cls, rawfit):
return hint_coord
@classmethod
- def get_astrometry_position_hint(cls, rawfit, allsky=False, n_field_width=1.5):
- """ Get the position hint from the FITS header as an astrometry.PositionHint object. """
+ def get_astrometry_position_hint(cls, rawfit, allsky=False, n_field_width=1.5, hintsep=None):
+ """ Get the position hint from the FITS header as an astrometry.PositionHint object.
+
+ Parameters
+ ----------
+ allsky: bool, optional
+ If True, the hint will cover the whole sky, and n_field_width and hintsep will be ignored.
+ n_field_width: float, optional
+ The search radius in units of field width. Default is 1.5.
+ hintsep: Quantity, optional
+ The search radius in units of degrees.
+ """
hintcoord = cls.get_header_hintcoord(rawfit)
if allsky:
- hintsep = 180
+ hintsep = 180.0 * u.deg
else:
- hintsep = n_field_width * u.Quantity("16 arcmin").to_value(u.deg) # 16 arcmin is the full field size of the CAFOS T2.2, our cut is smaller (6.25, 800x800, but the pointing kws might be from anywhere in the full field)
+ if hintsep is None:
+ hintsep = n_field_width * u.Quantity("16 arcmin") # 16 arcmin is the full field size of the CAFOS T2.2, our cut is smaller (6.25, 800x800, but the pointing kws might be from anywhere in the full field)
- return astrometry.PositionHint(ra_deg=hintcoord.ra.deg, dec_deg=hintcoord.dec.deg, radius_deg=hintsep)
+ return astrometry.PositionHint(ra_deg=hintcoord.ra.deg, dec_deg=hintcoord.dec.deg, radius_deg=hintsep.to_value(u.deg))
@classmethod
def get_astrometry_size_hint(cls, rawfit):
diff --git a/iop4lib/instruments/dipol.py b/iop4lib/instruments/dipol.py
index 3dc8dba1..5f46e45d 100644
--- a/iop4lib/instruments/dipol.py
+++ b/iop4lib/instruments/dipol.py
@@ -11,6 +11,7 @@
import astrometry
import numpy as np
import matplotlib as mplt
+import matplotlib.patheffects
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.stats import sigma_clipped_stats
@@ -21,13 +22,14 @@
from photutils.segmentation import make_2dgaussian_kernel
from astropy.wcs.utils import fit_wcs_from_points
from astropy.coordinates import Angle, SkyCoord
+from astropy.time import Time
import itertools
import datetime
import math
import gc
# iop4lib imports
-from iop4lib.enums import *
+from iop4lib.enums import IMGTYPES, BANDS, OBSMODES, SRCTYPES, INSTRUMENTS, REDUCTIONMETHODS
from .instrument import Instrument
from iop4lib.utils import imshow_w_sources, get_candidate_rank_by_matchs, get_angle_from_history, build_wcs_centered_on, get_simbad_sources
from iop4lib.utils.sourcedetection import get_sources_daofind, get_segmentation, get_cat_sources_from_segment_map, get_bkg
@@ -216,13 +218,17 @@ def apply_masters(cls, reducedfit):
rf_data = fits.getdata(reducedfit.rawfit.filepath)
mb_data = fits.getdata(reducedfit.masterbias.filepath)[idx]
- mf_data = fits.getdata(reducedfit.masterflat.filepath)[idx]
+
+ if reducedfit.obsmode == OBSMODES.PHOTOMETRY:
+ mf_data = fits.getdata(reducedfit.masterflat.filepath)[idx]
+ else: # no flat fielding for polarimetry
+ mf_data = 1.0
if reducedfit.masterdark is not None:
md_dark = fits.getdata(reducedfit.masterdark.filepath)[idx]
else :
logger.warning(f"{reducedfit}: no masterdark found, assuming dark current = 0, is this a CCD camera and it's cold?")
- md_dark = 0
+ md_dark = 0.0
data_new = (rf_data - mb_data - md_dark*reducedfit.rawfit.exptime) / (mf_data)
@@ -304,8 +310,18 @@ def get_astrometry_size_hint(cls, rawfit: 'RawFit'):
return astrometry.SizeHint(lower_arcsec_per_pixel=0.95*cls.arcsec_per_pix, upper_arcsec_per_pixel=1.05*cls.arcsec_per_pix)
@classmethod
- def get_astrometry_position_hint(cls, rawfit: 'RawFit', allsky=False, n_field_width=1.5):
- """ Get the position hint from the FITS header as an astrometry.PositionHint."""
+ def get_astrometry_position_hint(cls, rawfit: 'RawFit', allsky=False, n_field_width=1.5, hintsep=None):
+ """ Get the position hint from the FITS header as an astrometry.PositionHint.
+
+ Parameters
+ ----------
+ allsky: bool, optional
+ If True, the hint will cover the whole sky, and n_field_width and hintsep will be ignored.
+ n_field_width: float, optional
+ The search radius in units of field width. Default is 1.5.
+ hintsep: Quantity, optional
+ The search radius in units of degrees.
+ """
hintcoord = cls.get_header_hintcoord(rawfit)
@@ -314,11 +330,12 @@ def get_astrometry_position_hint(cls, rawfit: 'RawFit', allsky=False, n_field_wi
return None
if allsky:
- hintsep = 180.0
+ hintsep = 180.0 * u.deg
else:
- hintsep = (n_field_width * cls.field_width_arcmin*u.Unit("arcmin")).to_value(u.deg)
+ if hintsep is None:
+ hintsep = (n_field_width * cls.field_width_arcmin*u.Unit("arcmin"))
- return astrometry.PositionHint(ra_deg=hintcoord.ra.deg, dec_deg=hintcoord.dec.deg, radius_deg=hintsep)
+ return astrometry.PositionHint(ra_deg=hintcoord.ra.deg, dec_deg=hintcoord.dec.deg, radius_deg=hintsep.to_value(u.deg))
@classmethod
def has_pairs(cls, fit_instance: 'ReducedFit' or 'RawFit') -> bool:
@@ -392,8 +409,8 @@ def build_wcs(cls, reducedfit: 'ReducedFit', summary_kwargs : dict = {'build_sum
# Gather some info to perform a good decision on which methods to use
- n_estimate = len(cls._estimate_positions_from_segments(redf=reducedfit, n_seg_threshold=1.5, centered=False))
- n_estimate_centered = len(cls._estimate_positions_from_segments(redf=reducedfit, n_seg_threshold=1.5, centered=True))
+ n_estimate = len(cls._estimate_positions_from_segments(redf=reducedfit, n_seg_threshold=1.3, npixels=64, centered=False))
+ n_estimate_centered = len(cls._estimate_positions_from_segments(redf=reducedfit, n_seg_threshold=1.3, npixels=64, centered=True))
redf_phot = ReducedFit.objects.filter(instrument=reducedfit.instrument,
sources_in_field__in=[reducedfit.header_hintobject],
obsmode=OBSMODES.PHOTOMETRY,
@@ -434,11 +451,13 @@ def _try_quad_method():
n_seg_threshold_L = [300, 200, 200, 100, 50, 25, 12, 6]
npixels_L = [128, 64]
else:
- n_seg_threshold_L = [1.0, 0.9]
+ n_seg_threshold_L = [1.1, 1.0, 0.9]
npixels_L = [64, 32]
- for npixels, n_seg_threshold in itertools.product(npixels_L, n_seg_threshold_L):
- if (build_wcs_result := cls._build_wcs_for_polarimetry_images_photo_quads(reducedfit, summary_kwargs=summary_kwargs, n_seg_threshold=n_seg_threshold, npixels=npixels)):
+ min_quad_distance_L = [4.0, 8.0]
+
+ for npixels, n_seg_threshold, min_quad_distance in itertools.product(npixels_L, n_seg_threshold_L, min_quad_distance_L):
+ if (build_wcs_result := cls._build_wcs_for_polarimetry_images_photo_quads(reducedfit, summary_kwargs=summary_kwargs, n_seg_threshold=n_seg_threshold, npixels=npixels, min_quad_distance=min_quad_distance)):
break
else:
build_wcs_result = BuildWCSResult(success=False)
@@ -450,7 +469,7 @@ def _try_catalog_method():
n_seg_threshold_L = [300, 200, 200, 100, 50, 25, 12, 6]
npixels_L = [128, 64]
else:
- n_seg_threshold_L = [1.0, 0.9, 0.8, 0.7, 0.6]
+ n_seg_threshold_L = [1.1, 1.0, 0.9, 0.8, 0.7, 0.6]
npixels_L = [64, 32]
if n_expected_calibrators > 0 or n_expected_simbad_sources > 0:
@@ -468,8 +487,10 @@ def _try_catalog_method():
method_try_order = [_try_EO_method, _try_quad_method, _try_catalog_method]
elif target_src.srctype == SRCTYPES.BLAZAR:
## reduce flase positives by forcing to use quad method if it must work
- if redf_phot is not None and n_estimate > 6:
+ if redf_phot is not None and n_estimate > 5:
method_try_order = [_try_quad_method]
+ elif redf_phot is not None and n_estimate >= 4:
+ method_try_order = [_try_quad_method, _try_catalog_method, _try_EO_method]
else:
method_try_order = [_try_catalog_method, _try_quad_method, _try_EO_method]
@@ -525,7 +546,10 @@ def _build_shotgun_params(cls, redf: 'ReducedFit'):
@classmethod
- def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summary_kwargs : dict = {'build_summary_images':True, 'with_simbad':True}, n_seg_threshold=1.5, npixels=32):
+ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summary_kwargs : dict = None, n_seg_threshold=1.5, npixels=32, min_quad_distance=4.0):
+
+ if summary_kwargs is None:
+ summary_kwargs = {'build_summary_images':True, 'with_simbad':True}
from iop4lib.db import ReducedFit
@@ -574,6 +598,8 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
logger.debug(f"Using {len(sets_L[0])} sources in polarimetry field and {len(sets_L[1])} in photometry field.")
if summary_kwargs['build_summary_images']:
+ logger.debug(f"Building summary image for astrometry detected sources.")
+
fig = mplt.figure.Figure(figsize=(12,6), dpi=iop4conf.mplt_default_dpi)
axs = fig.subplots(nrows=1, ncols=2)
@@ -587,7 +613,7 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
axs[0].set_title("Polarimetry field")
axs[1].set_title("Photometry field")
- fig.savefig(Path(redf.filedpropdir) / "astrometry_detected_sources.png", bbox_inches="tight")
+ fig.savefig(Path(redf_pol.filedpropdir) / "astrometry_detected_sources.png", bbox_inches="tight")
fig.clf()
# Build the quads for each field
@@ -619,12 +645,12 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
# the best 5 that have less than 1px of error per quad (4 points)
- idx_selected = np.where(all_distances < 4.0)[0]
+ idx_selected = np.where(all_distances < min_quad_distance)[0]
indices_selected = all_indices[idx_selected]
distances_selected = all_distances[idx_selected]
- if np.sum(idx_selected) == 0:
- logger.error(f"No quads with distance < 4.0, minimum at {min(all_distances)=} returning success = False.")
+ if len(idx_selected) == 0:
+ logger.error(f"No quads with distance < {min_quad_distance}, minimum at {min(all_distances)=} returning success = False.")
return BuildWCSResult(success=False, wcslist=None, info={'redf_phot__pk':redf_phot.pk, 'redf_phot__fileloc':redf_phot.fileloc})
else:
idx_selected = np.argsort(distances_selected)[:5]
@@ -632,11 +658,54 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
distances_selected = all_distances[idx_selected]
# get linear transforms
- logger.debug(f"Selected {len(indices_selected)} quads with distance < 4.0. I will get the one with less deviation from the median linear transform.")
+ logger.debug(f"Selected {len(indices_selected)} quads with distance < {min_quad_distance}. I will get the one with less deviation from the median linear transform.")
R_L, t_L = zip(*[find_linear_transformation(qorder(quads_1[i]), qorder(quads_2[j])) for i,j in indices_selected])
logger.debug(f"{t_L=}")
+
+ logger.debug(f"Filtering out big translations (<1020px)")
+
+ _indices_selected = indices_selected[np.array([np.linalg.norm(t) < 1020 for t in t_L])]
+
+ logger.debug(f"Filtered to {len(_indices_selected)} quads with distance < 4.0 and translation < 1020px.")
+
+ if len(_indices_selected) == 0:
+ logger.error(f"No quads with distance < {min_quad_distance} and translation < 1000px, building summary image of the 3 best quads and returning success = False.")
+
+ colors = [color for color in mplt.rcParams["axes.prop_cycle"].by_key()["color"]]
+
+ fig = mplt.figure.Figure(figsize=(12,6), dpi=iop4conf.mplt_default_dpi)
+ axs = fig.subplots(nrows=1, ncols=2)
+
+ for (i, j), color in list(zip(indices_selected, colors))[:3]:
+
+ tij = find_linear_transformation(qorder(quads_1[i]), qorder(quads_2[j]))[1]
+
+ for ax, data, quad, positions in zip(axs, [redf_pol.mdata, photdata_subframe], [quads_1[i], quads_2[j]], sets_L):
+ imshow_w_sources(data, pos1=positions, ax=ax)
+ x, y = np.array(order(quad)).T
+ ax.fill(x, y, edgecolor='k', fill=True, facecolor=mplt.colors.to_rgba(color, alpha=0.2))
+ for pi, p in enumerate(np.array((qorder(quad)))):
+ xp = p[0]
+ yp = p[1]
+ ax.text(xp, yp, f"{pi}", fontsize=16, color=color, path_effects=[mplt.patheffects.Stroke(linewidth=1, foreground='black'), mplt.patheffects.Normal()])
+
+ fig.suptitle(f"dist({i},{j})={distance(hash_func(quads_1[i]),hash_func(quads_2[j])):.3f}, norm(t) = {np.linalg.norm(tij):.0f} px", y=0.83)
+
+ axs[0].set_title("Polarimetry")
+ axs[1].set_title("Photometry")
+
+ fig.savefig(Path(redf_pol.filedpropdir) / "astrometry_matched_quads.png", bbox_inches="tight")
+ fig.clf()
+
+ return BuildWCSResult(success=False, wcslist=None, info={'redf_phot__pk':redf_phot.pk, 'redf_phot__fileloc':redf_phot.fileloc})
+ else:
+ indices_selected = _indices_selected
+
+ R_L, t_L = zip(*[find_linear_transformation(qorder(quads_1[i]), qorder(quads_2[j])) for i,j in indices_selected])
+
+
# get the closest one to the t_L mean
median_t = np.median(t_L, axis=0)
@@ -660,6 +729,8 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
axs = fig.subplots(nrows=1, ncols=2)
for (i, j), color in list(zip(indices_selected, colors))[:1]:
+
+ tij = find_linear_transformation(qorder(quads_1[i]), qorder(quads_2[j]))[1]
for ax, data, quad, positions in zip(axs, [redf_pol.mdata, photdata_subframe], [quads_1[i], quads_2[j]], sets_L):
imshow_w_sources(data, pos1=positions, ax=ax)
@@ -668,14 +739,14 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
for pi, p in enumerate(np.array((qorder(quad)))):
xp = p[0]
yp = p[1]
- ax.text(xp, yp, f"{pi}", fontsize=16, color="y")
+ ax.text(xp, yp, f"{pi}", fontsize=16, color=color, path_effects=[mplt.patheffects.Stroke(linewidth=1, foreground='black'), mplt.patheffects.Normal()])
- fig.suptitle(f"dist({i},{j})={distance(hash_func(quads_1[i]),hash_func(quads_2[j])):.3f}", y=0.83)
+ fig.suptitle(f"dist({i},{j})={distance(hash_func(quads_1[i]),hash_func(quads_2[j])):.3f}, norm(t) = {np.linalg.norm(tij):.0f} px", y=0.83)
axs[0].set_title("Polarimetry")
axs[1].set_title("Photometry")
- fig.savefig(Path(redf.filedpropdir) / "astrometry_matched_quads.png", bbox_inches="tight")
+ fig.savefig(Path(redf_pol.filedpropdir) / "astrometry_matched_quads.png", bbox_inches="tight")
fig.clf()
# Build the WCS
@@ -687,14 +758,16 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
# get the pre wcs with the target in the center of the image
- angle_mean, angle_std = get_angle_from_history(redf, target_src)
- if 'FLIPSTAT' in redf.rawfit.header:
+ angle_mean, angle_std = get_angle_from_history(redf_pol, target_src)
+ if 'FLIPSTAT' in redf_pol.rawfit.header:
# TODO: check that indeed the mere presence of this keyword means that the image is flipped, without the need of checking the value.
# FLIPSTAT is a MaximDL thing only, but it seems that the iamge is flipped whenever the keyword is present, regardless of the value.
angle = - angle_mean
else:
angle = angle_mean
+ logger.debug(f"Using {angle=} for pre wcs.")
+
pre_wcs = build_wcs_centered_on((redf_pol.width//2,redf_pol.height//2), redf=redf_phot, angle=angle)
# get the pixel arrays in the polarimetry field and in the FULL photometry field to relate them
@@ -708,6 +781,7 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
wcslist = [wcs1, wcs2]
if summary_kwargs['build_summary_images']:
+ logger.debug(f"Building summary image for astrometry.")
fig = mplt.figure.Figure(figsize=(6,6), dpi=iop4conf.mplt_default_dpi)
ax = fig.subplots(nrows=1, ncols=1, subplot_kw={'projection': wcslist[0]})
plot_preview_astrometry(redf_pol, with_simbad=True, has_pairs=True, wcs1=wcslist[0], wcs2=wcslist[1], ax=ax, fig=fig)
@@ -722,7 +796,7 @@ def _build_wcs_for_polarimetry_images_photo_quads(cls, redf: 'ReducedFit', summa
@classmethod
- def _build_wcs_for_polarimetry_images_catalog_matching(cls, redf: 'ReducedFit', summary_kwargs : dict = {'build_summary_images':True, 'with_simbad':True}, n_seg_threshold=1.5, npixels=64):
+ def _build_wcs_for_polarimetry_images_catalog_matching(cls, redf: 'ReducedFit', summary_kwargs : dict = None, n_seg_threshold=1.5, npixels=64):
r""" Deprecated. Build WCS for DIPOL polarimetry images by matching the found sources positions with the catalog.
.. warning::
@@ -734,6 +808,9 @@ def _build_wcs_for_polarimetry_images_catalog_matching(cls, redf: 'ReducedFit',
"""
+ if summary_kwargs is None:
+ summary_kwargs = {'build_summary_images':True, 'with_simbad':True}
+
# disp_allowed_err = 1.5*cls.disp_std
disp_allowed_err = np.array([30,30]) # most times should be much smaller (1.5*std)
# but in bad cases, this is ~1 sigma of the gaussians
@@ -989,7 +1066,7 @@ def _build_wcs_for_polarimetry_images_catalog_matching(cls, redf: 'ReducedFit',
@classmethod
- def _build_wcs_for_polarimetry_from_target_O_and_E(cls, redf: 'ReducedFit', summary_kwargs : dict = {'build_summary_images':True, 'with_simbad':True}, n_seg_threshold=3.0, npixels=64) -> BuildWCSResult:
+ def _build_wcs_for_polarimetry_from_target_O_and_E(cls, redf: 'ReducedFit', summary_kwargs : dict = None, n_seg_threshold=3.0, npixels=64) -> BuildWCSResult:
r""" Deprecated. Build WCS for DIPOL polarimetry images by matching the found sources positions with the catalog.
.. warning::
@@ -1001,6 +1078,9 @@ def _build_wcs_for_polarimetry_from_target_O_and_E(cls, redf: 'ReducedFit', summ
"""
+ if summary_kwargs is None:
+ summary_kwargs = {'build_summary_images':True, 'with_simbad':True}
+
# disp_allowed_err = 1.5*cls.disp_std
disp_allowed_err = np.array([30,30]) # most times should be much smaller (1.5*std)
# but in bad cases, this is ~1 sigma of the gaussians
@@ -1127,16 +1207,41 @@ def _build_wcs_for_polarimetry_from_target_O_and_E(cls, redf: 'ReducedFit', summ
@classmethod
def estimate_common_apertures(cls, reducedfits, reductionmethod=None, fit_boxsize=None, search_boxsize=(90,90)):
aperpix, r_in, r_out, fit_res_dict = super().estimate_common_apertures(reducedfits, reductionmethod=reductionmethod, fit_boxsize=fit_boxsize, search_boxsize=search_boxsize, fwhm_min=5.0, fwhm_max=60)
+
sigma = fit_res_dict['sigma']
+ fwhm = fit_res_dict["mean_fwhm"]
+
+ return 1.1*fwhm, 6*fwhm, 10*fwhm, fit_res_dict
+
+ @classmethod
+ def get_instrumental_polarization(cls, reducedfit) -> dict:
+ """ Returns the instrumental polarization for to be used for a given reducedfit.
+
+ The instrumental polarization is a dictionary with the following keys:
+ - Q_inst: instrumental Q Stokes parameter (0-1)
+ - dQ_inst: instrumental Q error (0-1)
+ - U_inst: instrumental U Stokes parameter (0-1)
+ - dU_inst: instrumental U error (0-1)
+ - CPA: zero-angle (deg)
+ """
+
+ if reducedfit.juliandate <= Time("2023-09-28 12:00").jd: # limpieza de espejos
+ CPA = 44.5
+ dCPA = 0.05
+ Q_inst = 0.05777
+ dQ_inst = 0.005
+ U_inst = -3.77095
+ dU_inst = 0.005
+ else:
+ CPA = 45.1
+ dCPA = 0.05
+ Q_inst = -0.0138 / 100
+ dQ_inst = 0.005
+ U_inst = -4.0806 / 100
+ dU_inst = 0.005
+
+ return {'Q_inst':Q_inst, 'dQ_inst':dQ_inst, 'U_inst':U_inst, 'dU_inst':dU_inst, 'CPA':CPA, 'dCPA':dCPA}
- if reducedfits[0].header_hintobject.name == "2200+420":
- r = min(1.8*sigma, 17)
- r_in = max(5*sigma, 80)
- r_out = 2*r_in
- return r, r_in, r_out, fit_res_dict
-
- return 1.8*sigma, 5*sigma, 10*sigma, fit_res_dict
-
@classmethod
def compute_relative_polarimetry(cls, polarimetry_group):
@@ -1176,7 +1281,7 @@ def compute_relative_polarimetry(cls, polarimetry_group):
return
if group_sources != set.union(*map(set, sources_in_field_qs_list)):
- logger.warning(f"Sources in field do not match for all polarimetry groups: {set.difference(*map(set, sources_in_field_qs_list))}")
+ logger.warning("Sources in field do not match for all polarimetry groups (ReducedFit %s): %s" % (",".join([str(redf.pk) for redf in polarimetry_group]), str(set.difference(*map(set, sources_in_field_qs_list)))))
## check rotation angles
@@ -1206,6 +1311,10 @@ def compute_relative_polarimetry(cls, polarimetry_group):
photopolresult_L = list()
for astrosource in group_sources:
+
+ if astrosource.calibrates.count() > 0:
+ continue
+
logger.debug(f"Computing relative polarimetry for {astrosource}.")
# if any angle is missing for some pair, it uses the equivalent angle of the other pair
@@ -1217,11 +1326,13 @@ def compute_relative_polarimetry(cls, polarimetry_group):
continue
if len(aperphotresults) != 32:
- logger.warning(f"There should be 32 aperphotresults for each astrosource in the group, there are {len(aperphotresults)}.")
+ logger.error(f"There should be 32 aperphotresults for each astrosource in the group, there are {len(aperphotresults)} for {astrosource.name}.")
+ continue
values = get_column_values(aperphotresults, ['reducedfit__rotangle', 'flux_counts', 'flux_counts_err', 'pairs'])
angles_L = list(sorted(set(values['reducedfit__rotangle'])))
+
if len(angles_L) != 16:
logger.warning(f"There should be 16 different angles, there are {len(angles_L)}.")
@@ -1231,11 +1342,19 @@ def compute_relative_polarimetry(cls, polarimetry_group):
fluxD[pair] = {}
fluxD[pair][angle] = (flux, flux_err)
- F_O = np.array([(fluxD['O'][angle][0]) for angle in angles_L])
- dF_O = np.array([(fluxD['O'][angle][1]) for angle in angles_L])
+ # Dipol has the ordinary to the left and extraordinary to the right, IOP4 astrocalibration atm works the other way, swap them here
- F_E = np.array([(fluxD['E'][angle][0]) for angle in angles_L])
- dF_E = np.array([(fluxD['E'][angle][1]) for angle in angles_L])
+ F_O = np.array([(fluxD['E'][angle][0]) for angle in angles_L])
+ dF_O = np.array([(fluxD['E'][angle][1]) for angle in angles_L])
+
+ F_E = np.array([(fluxD['O'][angle][0]) for angle in angles_L])
+ dF_E = np.array([(fluxD['O'][angle][1]) for angle in angles_L])
+
+ N = len(angles_L)
+
+ if astrosource.name == "2200+420":
+ F_O = np.roll(F_E, 2)
+ dF_O = np.roll(dF_E, 2)
F = (F_O - F_E) / (F_O + F_E)
dF = 2 / ( F_O + F_E )**2 * np.sqrt(F_E**2 * dF_O**2 + F_O**2 * dF_E**2)
@@ -1243,8 +1362,6 @@ def compute_relative_polarimetry(cls, polarimetry_group):
I = (F_O + F_E)
dI = np.sqrt(dF_O**2 + dF_E**2)
- N = len(angles_L)
-
# Compute both the uncorrected and corrected values
Qr_uncorr = 2/N * sum([F[i] * math.cos(math.pi/2*i) for i in range(N)])
@@ -1255,24 +1372,23 @@ def compute_relative_polarimetry(cls, polarimetry_group):
Ur_uncorr = 2/N * sum([F[i] * math.sin(math.pi/2*i) for i in range(N)])
dUr_uncorr = 2/N * math.sqrt(sum([dF[i]**2 * math.sin(math.pi/2*i)**2 for i in range(N)]))
- # Instrumental polarization
- # TODO: make this value editable and dependent on epoch date
-
- Q_inst = +0.057/100
- dQ_inst = 0
-
- U_inst = -3.77/100
- dU_inst = 0
+ intrumental_polarization = cls.get_instrumental_polarization(reducedfit=polarimetry_group[0])
+ Q_inst = intrumental_polarization['Q_inst']
+ dQ_inst = intrumental_polarization['dQ_inst']
+ U_inst = intrumental_polarization['U_inst']
+ dU_inst = intrumental_polarization['dU_inst']
+ CPA = intrumental_polarization['CPA']
+ dCPA = intrumental_polarization['dCPA']
logger.debug(f"{Q_inst=}, {dQ_inst=}")
logger.debug(f"{U_inst=}, {dU_inst=}")
- Qr = Qr_uncorr + Q_inst
+ Qr = Qr_uncorr - Q_inst
dQr = math.sqrt(dQr_uncorr**2 + dQ_inst**2)
logger.debug(f"{Qr=}, {dQr=}")
- Ur = Ur_uncorr + U_inst
+ Ur = Ur_uncorr - U_inst
dUr = math.sqrt(dUr_uncorr**2 + dU_inst**2)
logger.debug(f"{Ur=}, {dUr=}")
@@ -1283,14 +1399,15 @@ def _get_p_and_chi(Qr, Ur, dQr, dUr):
dP = 1/P * math.sqrt((Qr*dQr)**2 + (Ur*dUr)**2)
# polarization angle (degrees)
- chi = 0.5 * math.degrees(math.atan2(-Qr, Ur))
+ chi = 0.5 * math.degrees(math.atan2(Ur, Qr))
dchi = 0.5 * math.degrees( 1 / (Qr**2 + Ur**2) * math.sqrt((Qr*dUr)**2 + (Ur*dQr)**2) )
return P, chi, dP, dchi
- # linear polarization (0 to 1)
P_uncorr, chi_uncorr, dP_uncorr, dchi_uncorr = _get_p_and_chi(Qr_uncorr, Ur_uncorr, dQr_uncorr, dUr_uncorr)
P, chi, dP, dchi = _get_p_and_chi(Qr, Ur, dQr, dUr)
+ chi = chi + CPA
+ dchi = math.sqrt(dchi**2 + dCPA**2)
logger.debug(f"{P=}, {chi=}, {dP=}, {dchi=}")
diff --git a/iop4lib/instruments/instrument.py b/iop4lib/instruments/instrument.py
index c864e679..73f820e9 100644
--- a/iop4lib/instruments/instrument.py
+++ b/iop4lib/instruments/instrument.py
@@ -16,6 +16,7 @@
import itertools
import datetime
import glob
+import astrometry
# iop4lib imports
from iop4lib.enums import *
@@ -43,19 +44,19 @@ class Instrument(metaclass=ABCMeta):
@abstractmethod
def name(self):
""" The name of the instrument."""
- pass
+ raise NotImplementedError
@property
@abstractmethod
def telescope(self):
""" The telescope this instrument is mounted on."""
- pass
+ raise NotImplementedError
@property
@abstractmethod
def instrument_kw(self):
""" The keyword in the FITS header that identifies this instrument."""
- pass
+ raise NotImplementedError
# Instrument specific properties (subclasses must implement these)
@@ -63,19 +64,19 @@ def instrument_kw(self):
@abstractmethod
def field_width_arcmin(self):
""" Field width in arcmin."""
- pass
+ raise NotImplementedError
@property
@abstractmethod
def arcsec_per_pix(self):
""" Pixel size in arcseconds per pixel."""
- pass
+ raise NotImplementedError
@property
@abstractmethod
def gain_e_adu(self):
""" Gain in e-/ADU. Used to compute the error in aperture photometry."""
- pass
+ raise NotImplementedError
@property
@abstractmethod
@@ -85,7 +86,7 @@ def required_masters(self):
Cooled CCD cameras will only need `required_masters = ['masterbias', 'masterflat']` in the subclass, since dark current is close to zero.
If dark current is not negligible, set `required_masters = ['masterbias', 'masterdark', 'masterflat']` in the subclass.
"""
- pass
+ raise NotImplementedError
# Class methods (you should be using these from the Instrument class, not subclasses)
@@ -178,15 +179,15 @@ def get_header_hintobject(self, rawfit) -> 'AstroSource' or None:
@classmethod
@abstractmethod
- def get_astrometry_position_hint(cls, rawfit, allsky=False, n_field_width=1.5):
+ def get_astrometry_position_hint(cls, rawfit, allsky=False, n_field_width=1.5, hintsep=None) -> astrometry.PositionHint:
""" Get the position hint from the FITS header as an astrometry.PositionHint object. """
- pass
+ raise NotImplementedError
@classmethod
@abstractmethod
- def get_astrometry_size_hint(cls, rawfit):
+ def get_astrometry_size_hint(cls, rawfit) -> astrometry.SizeHint:
""" Get the size hint for this telescope / rawfit."""
- pass
+ raise NotImplementedError
@classmethod
@abstractmethod
@@ -194,10 +195,10 @@ def has_pairs(cls, fit_instance: 'ReducedFit' or 'RawFit') -> bool:
""" Indicates whether both ordinary and extraordinary sources are present
in the file. At the moment, this happens only for CAFOS polarimetry
"""
- pass
+ raise NotImplementedError
@classmethod
- def build_wcs(self, reducedfit: 'ReducedFit', shotgun_params_kwargs : dict = dict(), summary_kwargs : dict = {'build_summary_images':True, 'with_simbad':True}) -> 'BuildWCSResult':
+ def build_wcs(self, reducedfit: 'ReducedFit', shotgun_params_kwargs : dict = None, summary_kwargs : dict = None) -> 'BuildWCSResult':
""" Build a WCS for a reduced fit from this instrument.
By default (Instrument class), this will just call the build_wcs_params_shotgun from iop4lib.utils.astrometry.
@@ -213,13 +214,39 @@ def build_wcs(self, reducedfit: 'ReducedFit', shotgun_params_kwargs : dict = di
Whether to query and plot a few Simbad sources in the image. Might be useful to
check whether the found coordinates are correct. Default is True.
"""
+
+ if shotgun_params_kwargs is None:
+ shotgun_params_kwargs = dict()
+
+ if summary_kwargs is None:
+ summary_kwargs = {'build_summary_images':True, 'with_simbad':True}
+
from iop4lib.utils.astrometry import build_wcs_params_shotgun
from iop4lib.utils.plotting import build_astrometry_summary_images
+
+ # if there is pointing mismatch information and it was not invoked with given allsky or position_hint,
+ # fine-tune the position_hint or set allsky = True if appropriate.
+ # otherwise leave it alone.
+
if reducedfit.header_hintobject is not None and 'allsky' not in shotgun_params_kwargs and 'position_hint' not in shotgun_params_kwargs:
- if reducedfit.header_hintobject.coord.separation(reducedfit.header_hintcoord) > u.Quantity("20 arcmin"):
- logger.debug(f"{reducedfit}: large pointing mismatch detected, setting allsky = True for the position hint.")
- shotgun_params_kwargs["allsky"] = [True]
+
+ pointing_mismatch = reducedfit.header_hintobject.coord.separation(reducedfit.header_hintcoord)
+
+ # minimum of 1.5 the field width, and max the allsky_septhreshold
+
+ hintsep = np.clip(1.1*pointing_mismatch,
+ 1.5*Instrument.by_name(reducedfit.instrument).field_width_arcmin*u.arcmin,
+ iop4conf.astrometry_allsky_septhreshold*u.arcmin)
+ shotgun_params_kwargs["position_hint"] = [reducedfit.get_astrometry_position_hint(hintsep=hintsep)]
+
+ # if the pointing mismatch is too large, set allsky = True if configured
+
+ if pointing_mismatch.to_value('arcmin') > iop4conf.astrometry_allsky_septhreshold:
+ if iop4conf.astrometry_allsky_allow:
+ logger.debug(f"{reducedfit}: large pointing mismatch detected ({pointing_mismatch.to_value('arcmin')} arcmin), setting allsky = True for the position hint.")
+ shotgun_params_kwargs["allsky"] = [True]
+ del shotgun_params_kwargs["position_hint"]
build_wcs_result = build_wcs_params_shotgun(reducedfit, shotgun_params_kwargs, summary_kwargs=summary_kwargs)
@@ -447,6 +474,8 @@ def compute_aperture_photometry(cls, redf, aperpix, r_in, r_out):
bkg_box_size = 100
elif redf.mdata.shape[0] == 900: # dipol polarimetry
bkg_box_size = 90
+ elif redf.mdata.shape[0] == 896: # dipol polarimetry (also)
+ bkg_box_size = 112
elif redf.mdata.shape[0] == 4144: # dipol photometry
bkg_box_size = 518
else:
@@ -479,6 +508,7 @@ def compute_aperture_photometry(cls, redf, aperpix, r_in, r_out):
AperPhotResult.create(reducedfit=redf,
astrosource=astrosource,
aperpix=aperpix,
+ r_in=r_in, r_out=r_out,
pairs=pairs,
bkg_flux_counts=bkg_flux_counts, bkg_flux_counts_err=bkg_flux_counts_err,
flux_counts=flux_counts, flux_counts_err=flux_counts_err)
diff --git a/iop4lib/iop4.py b/iop4lib/iop4.py
index 49965517..485e9e5b 100644
--- a/iop4lib/iop4.py
+++ b/iop4lib/iop4.py
@@ -153,20 +153,20 @@ def retry_failed_files():
def filter_epochname_by_date(epochname_list, date_start=None, date_end=None):
if date_start is not None:
- epochname_list = [epochname for epochname in epochname_list if Epoch.epochname_to_tel_night(epochname)[1] > datetime.date.fromisoformat(date_start)]
+ epochname_list = [epochname for epochname in epochname_list if Epoch.epochname_to_tel_night(epochname)[1] >= datetime.date.fromisoformat(date_start)]
if date_end is not None:
- epochname_list = [epochname for epochname in epochname_list if Epoch.epochname_to_tel_night(epochname)[1] < datetime.date.fromisoformat(date_end)]
+ epochname_list = [epochname for epochname in epochname_list if Epoch.epochname_to_tel_night(epochname)[1] <= datetime.date.fromisoformat(date_end)]
return epochname_list
def filter_filelocs_by_date(fileloc_list, date_start=None, date_end=None):
if date_start is not None:
- fileloc_list = [fileloc for fileloc in fileloc_list if RawFit.fileloc_to_tel_night_filename(fileloc)[1] > datetime.date.fromisoformat(date_start)]
+ fileloc_list = [fileloc for fileloc in fileloc_list if RawFit.fileloc_to_tel_night_filename(fileloc)[1] >= datetime.date.fromisoformat(date_start)]
if date_end is not None:
- fileloc_list = [fileloc for fileloc in fileloc_list if RawFit.fileloc_to_tel_night_filename(fileloc)[1] < datetime.date.fromisoformat(date_end)]
+ fileloc_list = [fileloc for fileloc in fileloc_list if RawFit.fileloc_to_tel_night_filename(fileloc)[1] <= datetime.date.fromisoformat(date_end)]
return fileloc_list
@@ -210,7 +210,6 @@ def main():
# parallelization options
parser.add_argument("--nthreads", dest="nthreads", type=int, default=None, help=" Number of threads to use when possible (default: %(default)s)", required=False)
- parser.add_argument("--use-ray-cluster", dest="ray_use_cluster", action="store_true", help=" Use ray for parallelization", required=False)
# epoch processing options
parser.add_argument('--epoch-list', dest='epochname_list', nargs='+', help=' List of epochs (e.g: T090/230102 T090/230204)', required=False)
@@ -269,9 +268,6 @@ def main():
if args.nthreads is not None:
iop4conf.max_concurrent_threads = args.nthreads
- if args.ray_use_cluster:
- iop4conf.ray_use_cluster = True
-
# Epochs
epochnames_to_process = set()
diff --git a/iop4lib/telescopes/osnt150.py b/iop4lib/telescopes/osnt150.py
index 984ecac0..5936c143 100644
--- a/iop4lib/telescopes/osnt150.py
+++ b/iop4lib/telescopes/osnt150.py
@@ -42,16 +42,3 @@ class OSNT150(OSNT090, Telescope, metaclass=ABCMeta):
ftp_password = iop4conf.osn_t150_password
ftp_encoding = 'utf-8'
- # telescope specific methods
-
-
- @classmethod
- def compute_relative_photometry(cls, rawfit):
- logger.warning(f"OSNT150.compute_relative_photometry not implemented yet, using OSNT090.compute_relative_photometry {super(cls)=}")
- super(OSNT150, cls).compute_relative_photometry(rawfit)
-
- @classmethod
- def compute_relative_polarimetry(cls, polarimetry_group):
- logger.warning(f"OSNT150.compute_relative_polarimetry not implemented yet, using OSNT090.compute_relative_polarimetry {super(cls)=}")
- super(OSNT150, cls).compute_relative_polarimetry(polarimetry_group)
-
diff --git a/iop4lib/utils/__init__.py b/iop4lib/utils/__init__.py
index b9e54943..741e9a49 100644
--- a/iop4lib/utils/__init__.py
+++ b/iop4lib/utils/__init__.py
@@ -349,7 +349,12 @@ def get_angle_from_history(redf: 'ReducedFit' = None,
angle_L = list()
for calibrated_fit in calibrated_fits:
- w = WCS(calibrated_fit.header, key="A")
+ try:
+ w = WCS(calibrated_fit.header, key="A")
+ except Exception as e: # usually KeyError
+ logger.warning(f"Could not get WCS 'A' from ReducedFit {calibrated_fit.id}: {e}")
+ continue
+
# Extract the PC matrix elements
pc_11, pc_12 = w.wcs.pc[0]
@@ -362,6 +367,10 @@ def get_angle_from_history(redf: 'ReducedFit' = None,
angle = - angle
angle_L.append(angle)
+
+ if len(angle_L) == 0:
+ logger.error(f"No angle found in history for {redf.instrument} {target_src.name}")
+ return np.nan, np.nan
angle_mean = np.mean(angle_L)
angle_std = np.std(angle_L)
diff --git a/iop4lib/utils/parallel.py b/iop4lib/utils/parallel.py
index 5366eb4b..c6da61b0 100644
--- a/iop4lib/utils/parallel.py
+++ b/iop4lib/utils/parallel.py
@@ -146,7 +146,7 @@ def _epoch_bulkreduce_multiprocessing_worker(reduced_fit: 'ReducedFit'):
try:
- # Start a timer that will send SIGALRM in 20 minutes
+ # Start a timer that will send SIGALRM in iop4conf.astrometry_timeout minutes
signal.signal(signal.SIGALRM, _epoch_bulkreduce_multiprocessing_worker_timeout_handler)
signal.alarm(iop4conf.astrometry_timeout*60)
reduced_fit.build_file()
@@ -198,132 +198,18 @@ def _epoch_bulkreduce_multiprocesing_mainloop(tasks, counter, reduced_L, epoch=N
return
+# COMPUTE RELATIVE POLARIMETRY IN MULTIPROCESSING POOL
+def _parallel_relative_polarimetry_helper(keys, group):
+ from iop4lib.instruments import Instrument
+ try:
+ Instrument.by_name(keys['instrument']).compute_relative_polarimetry(group)
+ except Exception as e:
+ logger.error(f"Error for {group=}. Exception {e}: {traceback.format_exc()}")
+ finally:
+ logger.info(f"Finished computing relative polarimetry for {group=}.")
-# BULK REDUCTION IN RAY CLUSTER
-
-
-def epoch_bulkreduce_ray(reduced_L):
- """Bulk reduction of a list of ReducedFits in a Ray cluster."""
-
- import os
- import ray
- from ray.util.multiprocessing import Pool
- import socket
- from dataclasses import dataclass
- from iop4lib.db import ReducedFit, AstroSource
-
- logger.info(f"Starting bulk reduction of {len(reduced_L)} ReducedFits in Ray cluster.")
-
- logger.info("Syncing raw files and DB to Ray cluster.")
- os.system(f"ray rsync-up priv.rayconfig.yaml")
- os.system(fr"rsync -v {iop4conf.db_path} {iop4conf.ray_cluster_address}:'{iop4conf.ray_db_path}'")
- os.system(fr"rsync -va --update {iop4conf.datadir}/raw/ {iop4conf.ray_cluster_address}:'{iop4conf.ray_datadir}/raw/'")
- os.system(fr"rsync -va --update --delete {iop4conf.datadir}/masterflat/ {iop4conf.ray_cluster_address}:'{iop4conf.ray_datadir}/masterflat/'")
- os.system(fr"rsync -va --update --delete {iop4conf.datadir}/masterbias/ {iop4conf.ray_cluster_address}:'{iop4conf.ray_datadir}/masterbias/'")
-
- logger.info("Connecting to Ray cluster at localhost:25000. Remember to attach to the cluster with 'ray attach priv.rayconfig.yaml -p 25000' and start the head node with 'ray stop' and 'ray start --head --ray-client-server-port 25000 --num-cpus=128'. Additionaly worker nodes can be started with 'ray start --address:head_address:port', port is usually 6379. It might be enough to use ray priv.rayconfig.yaml --restart-only.")
- ray.init("ray://localhost:25000", ignore_reinit_error=True)
-
- def _init_func():
- import iop4lib
- iop4lib.Config(config_db=True, reconfigure=True)
- import django
- django.db.connections.close_all()
- print(f"Node {socket.gethostname()} initialized.")
-
- def _buildfile(redf_id):
- """Builds a ReducedFit file in the Ray cluster.
-
- It syncs the raw files, masterbias and master flat from local, builds the file and performs astrometric calibration remotely,
- and gets the header and summary images back.
- """
-
- from iop4lib.db import ReducedFit
-
- print(f"Node {socket.gethostname()} starting to build ReducedFit {redf_id}.")
-
- try:
- # simplified version of ReducedFit.build_file(), avoids writing to the DB
- redf = ReducedFit.objects.get(id=redf_id)
- redf.apply_masterbias()
- redf.apply_masterflat()
- redf.astrometric_calibration()
- except Exception as e:
- logger.error(f"ReducedFit {redf.id} in Ray cluster -> exception during build_file(): {e}")
- return redf_id, False, None, None
- else:
- #return [redf_id, True, None, None]
-
- res = [redf_id, True, redf.header, dict()]
-
- print(f"Node {socket.gethostname()} finished ReducedFit {redf_id}.")
-
- try:
- for fpath in glob.glob(f"{redf.filedpropdir}/astrometry_*"):
- with open(fpath, "rb") as f:
- res[3][os.path.basename(fpath)] = f.read()
- except Exception as e:
- logger.error(f"ReducedFit {redf.id} in Ray cluster -> exception during reading of astrometry plots: {e}")
- return redf_id, False, None, None
-
- return tuple(res)
-
-
- time_start = time.time()
-
- with Pool(initializer=_init_func) as pool:
-
- for i, res in enumerate(pool.imap_unordered(_buildfile, [redf.id for redf in reduced_L], chunksize=4)):
-
- redf_id, success, header, files = res
-
- time_elapsed = time.time() - time_start
- avg_pace = time_elapsed / i if i > 0 else float('inf')
-
- if success:
- logger.info(f"ReducedFit {redf_id} was successfully calibrated astrometrically by the RAY cluster.\nProgress: {i} of {len(reduced_L)} files processed in {time_elapsed:.0f} seconds ({avg_pace:.1f} s / file).")
- else:
- logger.error(f"ReducedFit {redf_id} could not be calibrated astrometrically by the RAY cluster.\nProgress: {i} of {len(reduced_L)} files processed in {time_elapsed:.0f} seconds ({avg_pace:.1f} s / file).")
-
- redf = ReducedFit.objects.get(id=redf_id)
-
- if not redf.fileexists:
- try:
- redf.apply_masters()
- except Exception as e:
- logger.error(f"{redf}: exception during .apply_masters(): {e}")
- pass
-
- if success:
-
- with fits.open(redf.filepath, 'update') as hdul:
- hdul[0].header.update(header)
-
- for filename, data in files.items():
- logger.debug(f"Writting {filename} to {redf.filedpropdir}.")
- with open(Path(redf.filedpropdir) / filename, "wb") as f:
- f.write(data)
-
- redf.sources_in_field.set(AstroSource.get_sources_in_field(fit=redf))
-
- redf.unset_flag(ReducedFit.FLAGS.ERROR_ASTROMETRY)
- redf.set_flag(ReducedFit.FLAGS.BUILT_REDUCED)
-
- else:
- redf.set_flag(ReducedFit.FLAGS.ERROR_ASTROMETRY)
- redf.unset_flag(ReducedFit.FLAGS.BUILT_REDUCED)
-
- redf.save()
-
- pool.close()
- pool.join()
- pool.terminate()
-
-
- logger.warning("Not syncing files from Ray cluster! (anyway, all results should have come back already)")
- #os.system(f"rsync -va {iop4conf.ray_cluster_address}:'~/iop4data/calibration' ~/iop4data/calibration")
+def parallel_relative_polarimetry(keys, groups):
+ with multiprocessing.Pool(iop4conf.max_concurrent_threads) as pool:
+ pool.starmap(_parallel_relative_polarimetry_helper, zip(keys, groups))
- n_in_error = ReducedFit.objects.filter(flags__has=ReducedFit.FLAGS.ERROR_ASTROMETRY).count()
- duration_hms_str = time.strftime('%H h %M min %S s', time.gmtime(time.time()-time_start))
- logger.info(f"Finished bulk reduction of {len(reduced_L)} ReducedFits in Ray cluster. {n_in_error} files could not be calibrated astrometrically. Took {duration_hms_str}.")
diff --git a/iop4lib/utils/quadmatching.py b/iop4lib/utils/quadmatching.py
index a25d36ed..7f62a8b8 100644
--- a/iop4lib/utils/quadmatching.py
+++ b/iop4lib/utils/quadmatching.py
@@ -75,6 +75,8 @@ def hash_ish_old(points):
def quad_coords_ish(A,B,C,D):
+ (A,B,C,D), idx = force_AB_maxdist([A,B,C,D])
+
P = (A+B)/2
A, B, C, D = A-P,B-P,C-P,D-P
@@ -91,7 +93,29 @@ def quad_coords_ish(A,B,C,D):
C = np.array([xc,yc])
D = np.array([xd,yd])
- return A,B,C,D
+ FX = np.array([[-1,0],[0,1]])
+ FY = np.array([[1,0],[0,-1]])
+
+ # begin track the idx
+
+ if C[0] + D[0] > 0:
+ idx[2], idx[3] = idx[3], idx[2]
+ idx[0], idx[1] = idx[1], idx[0]
+ if C[0] > D[0]:
+ idx[2], idx[3] = idx[3], idx[2]
+
+ # end track the idx
+
+ if C[0] + D[0] > 0:
+ C,D = [FX@P for P in [C,D]]
+
+ if C[1] + D[1] > 0:
+ C,D = [FY@P for P in [C,D]]
+
+ if C[0] > D[0]:
+ C,D = D,C
+
+ return (A,B,C,D), idx
def force_AB_maxdist(points):
@@ -111,47 +135,36 @@ def force_AB_maxdist(points):
i, j = farthest_i_j
+ result_idx = [i,j]
result = [points[i], points[j]]
+ result_idx.extend([k for k in range(len(points)) if k not in [i,j]])
result.extend([points[k] for k in range(len(points)) if k not in [i,j]])
- return result
+ return result, result_idx
def hash_ish(points):
- A,B,C,D = points
- A,B,C,D = force_AB_maxdist([A,B,C,D])
- A,B,C,D = quad_coords_ish(A,B,C,D)
-
- FX = np.array([[-1,0],[0,1]])
- FY = np.array([[1,0],[0,-1]])
-
- M = np.identity(2)
- if C[0] > D[0]:
- M = FX @ M
-
- if C[1] > D[1]:
- M = FY @ M
-
- C, D = [M @ P for P in [C,D]]
+ A,B,C,D = points
+ (A,B,C,D), idx = quad_coords_ish(A,B,C,D)
+ d1,d2,d3,d4 = map(np.linalg.norm, [C-A,D-C,B-D,A-B])
- d1,d2,d3,d4 = map(np.linalg.norm, [C-A,B-C,D-B,A-D])
-
return d1,d2,d3,d4
def qorder_ish(points):
- A,B,C,D = points
- A,B,C,D = force_AB_maxdist([A,B,C,D])
- Ap,Bp,Cp,Dp = quad_coords_ish(A,B,C,D)
+ (Ap,Bp,Cp,Dp), idx_quad_ord = quad_coords_ish(points[0],points[1],points[2],points[3])
+
+ pts_quad_ord = [points[i] for i in idx_quad_ord]
- if not distance(Ap,Bp)=2.4",
"pyyaml<5.4",
+ "psutil",
]
# need for setuptools_scm, so we derived version from git version control system
diff --git a/tests/test_osnt090_dipol.py b/tests/test_osnt090_dipol.py
index 896c6c4a..06ca5e67 100644
--- a/tests/test_osnt090_dipol.py
+++ b/tests/test_osnt090_dipol.py
@@ -10,6 +10,7 @@
# other imports
import os
from pytest import approx
+import numpy as np
# logging
import logging
@@ -107,4 +108,114 @@ def test_astrometric_calibration(load_test_catalog):
pos_E = src.coord.to_pixel(wcs=redf.wcs2)
assert (distance(pos_O, [684, 397]) < 50) # O position
- assert (distance(pos_E, [475, 411]) < 50) # E position
\ No newline at end of file
+ assert (distance(pos_E, [475, 411]) < 50) # E position
+
+
+
+
+def test_quad_ish():
+ r""" Test that the quad-ish matching works as expected. Procedure:
+ 1. Create N random 4-point sets
+ a) Check if quad coords are invariant under permutations of pts
+ b) Check that hash is invariant under permutations of pts
+ 2. Apply a random translation + rotation + reflection (X and/or Y) to them
+ a) Check that the hashes for them are the equal.
+ 3. Check that the qorder_ish function works, i.e. that the order of the points is the same. We do this by:
+ a) randomly ordering transformed points
+ b) transforming them back
+ c) checking the positions are the same to the original ones, one by one.
+ """
+ from iop4lib.utils.quadmatching import hash_ish, qorder_ish, quad_coords_ish
+
+ N = 1000
+
+ # 1.a Create N random 4-point sets
+
+ logger.debug("1. Creating random 4-point sets")
+
+ list_of_points = list()
+ for i in range(N):
+ list_of_points.append(np.random.rand(4,2))
+
+ # 1.a Check if quad coords are invariant under permutations of pts
+
+ logger.debug("1.a. Checking if quad coords are invariant under permutations of pts")
+
+ for points in list_of_points:
+ assert np.allclose(quad_coords_ish(*points)[0], quad_coords_ish(*points[np.random.permutation(4),:])[0])
+
+ # 1.b Check that hash is invariant under permutations of pts
+
+ logger.debug("1.b. Checking that hash is invariant under permutations of pts")
+
+ for points in list_of_points:
+ assert hash_ish(points) == approx(hash_ish(points[np.random.permutation(4),:]))
+
+ # 2. Apply a random translation + rotation + reflection to them
+
+ logger.debug("2. Applying random transformations")
+
+ list_of_points_transformed = list()
+ M_L = list()
+ t_L = list()
+ for points in list_of_points:
+
+ # translation
+ t = np.random.rand(2)
+
+ # orthogonal transformation (proper or improper rotation)
+ from scipy.linalg import qr
+ Q, R = qr(np.random.rand(2,2))
+ M = Q.dot(Q.T)
+ points = M@points.T + t[:,None]
+
+ list_of_points_transformed.append(points.T)
+ M_L.append(M)
+ t_L.append(t)
+
+ # 2.a Check that the hashes for them are the equal.
+
+ logger.debug("2.a. Checking that the hashes are equal points")
+
+ for p1, p2 in zip(list_of_points, list_of_points_transformed):
+ logger.debug(f"p1 = {p1}, p2 = {p2}")
+ assert hash_ish(p1) == approx(hash_ish(p2))
+
+ # 3. Check that the qorder_ish function works
+
+ logger.debug("3. Checking that the qorder_ish function works")
+
+ # 3.a) randomly ordering transformed points
+
+ logger.debug("3.a. Randomly ordering transformed points")
+
+ list_of_points_transformed_reordered = list()
+ for points in list_of_points_transformed:
+ list_of_points_transformed_reordered.append(points[np.random.permutation(4),:])
+
+ # 3.b) transforming them back (substract the translation, apply the inverse of the orthogonal transformation)
+
+ logger.debug("3.b. Transforming them back")
+
+ list_of_points_transformed_reordered_back = list()
+ for points, M, t in zip(list_of_points_transformed_reordered, M_L, t_L):
+ points = points - np.repeat(t[None,:], 4, axis=0)
+ points = np.linalg.inv(M)@points.T
+ list_of_points_transformed_reordered_back.append(points.T)
+
+ # 3.c) checking the positions are the same to the original ones, one by one.
+
+ logger.debug("3.c. Checking the positions are the same to the original ones, one by one.")
+
+ for p1, p2 in zip(list_of_points, list_of_points_transformed_reordered_back):
+
+ p1_ordered = np.array(qorder_ish([p for p in p1])).T
+ p2_ordered = np.array(qorder_ish([p for p in p2])).T
+
+ assert np.all(p1_ordered == approx(p2_ordered))
+
+
+
+
+
+
\ No newline at end of file